]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.12.6-201312251834.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.12.6-201312251834.patch
CommitLineData
1220fa23
PK
1 .|,
2 -*-
3 '/'\`
4 /`'o\
5 /#,o'`\
6 o/`"#,`\o
7 /`o``"#,\
8 o/#,`'o'`\o
9 /o`"#,`',o\
10 o`-._`"#_.-'o
11 _|"|_
12 \=%=/ hjw
13 """
14diff --git a/Documentation/dontdiff b/Documentation/dontdiff
15index b89a739..79768fb 100644
16--- a/Documentation/dontdiff
17+++ b/Documentation/dontdiff
18@@ -2,9 +2,11 @@
19 *.aux
20 *.bin
21 *.bz2
22+*.c.[012]*.*
23 *.cis
24 *.cpio
25 *.csp
26+*.dbg
27 *.dsp
28 *.dvi
29 *.elf
30@@ -14,6 +16,7 @@
31 *.gcov
32 *.gen.S
33 *.gif
34+*.gmo
35 *.grep
36 *.grp
37 *.gz
38@@ -48,14 +51,17 @@
39 *.tab.h
40 *.tex
41 *.ver
42+*.vim
43 *.xml
44 *.xz
45 *_MODULES
46+*_reg_safe.h
47 *_vga16.c
48 *~
49 \#*#
50 *.9
51-.*
52+.[^g]*
53+.gen*
54 .*.d
55 .mm
56 53c700_d.h
57@@ -69,9 +75,11 @@ Image
58 Module.markers
59 Module.symvers
60 PENDING
61+PERF*
62 SCCS
63 System.map*
64 TAGS
65+TRACEEVENT-CFLAGS
66 aconf
67 af_names.h
68 aic7*reg.h*
69@@ -80,6 +88,7 @@ aic7*seq.h*
70 aicasm
71 aicdb.h*
72 altivec*.c
73+ashldi3.S
74 asm-offsets.h
75 asm_offsets.h
76 autoconf.h*
77@@ -92,19 +101,24 @@ bounds.h
78 bsetup
79 btfixupprep
80 build
81+builtin-policy.h
82 bvmlinux
83 bzImage*
84 capability_names.h
85 capflags.c
86 classlist.h*
87+clut_vga16.c
88+common-cmds.h
89 comp*.log
90 compile.h*
91 conf
92 config
93 config-*
94 config_data.h*
95+config.c
96 config.mak
97 config.mak.autogen
98+config.tmp
99 conmakehash
100 consolemap_deftbl.c*
101 cpustr.h
102@@ -115,9 +129,11 @@ devlist.h*
103 dnotify_test
104 docproc
105 dslm
106+dtc-lexer.lex.c
107 elf2ecoff
108 elfconfig.h*
109 evergreen_reg_safe.h
110+exception_policy.conf
111 fixdep
112 flask.h
113 fore200e_mkfirm
114@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
115 gconf
116 gconf.glade.h
117 gen-devlist
118+gen-kdb_cmds.c
119 gen_crc32table
120 gen_init_cpio
121 generated
122 genheaders
123 genksyms
124 *_gray256.c
125+hash
126+hid-example
127 hpet_example
128 hugepage-mmap
129 hugepage-shm
130@@ -145,14 +164,14 @@ int32.c
131 int4.c
132 int8.c
133 kallsyms
134-kconfig
135+kern_constants.h
136 keywords.c
137 ksym.c*
138 ksym.h*
139 kxgettext
140 lex.c
141 lex.*.c
142-linux
143+lib1funcs.S
144 logo_*.c
145 logo_*_clut224.c
146 logo_*_mono.c
147@@ -162,14 +181,15 @@ mach-types.h
148 machtypes.h
149 map
150 map_hugetlb
151-media
152 mconf
153+mdp
154 miboot*
155 mk_elfconfig
156 mkboot
157 mkbugboot
158 mkcpustr
159 mkdep
160+mkpiggy
161 mkprep
162 mkregtable
163 mktables
164@@ -185,6 +205,8 @@ oui.c*
165 page-types
166 parse.c
167 parse.h
168+parse-events*
169+pasyms.h
170 patches*
171 pca200e.bin
172 pca200e_ecd.bin2
173@@ -194,6 +216,7 @@ perf-archive
174 piggyback
175 piggy.gzip
176 piggy.S
177+pmu-*
178 pnmtologo
179 ppc_defs.h*
180 pss_boot.h
181@@ -203,7 +226,10 @@ r200_reg_safe.h
182 r300_reg_safe.h
183 r420_reg_safe.h
184 r600_reg_safe.h
185+realmode.lds
186+realmode.relocs
187 recordmcount
188+regdb.c
189 relocs
190 rlim_names.h
191 rn50_reg_safe.h
192@@ -213,8 +239,12 @@ series
193 setup
194 setup.bin
195 setup.elf
196+signing_key*
197+size_overflow_hash.h
198 sImage
199+slabinfo
200 sm_tbl*
201+sortextable
202 split-include
203 syscalltab.h
204 tables.c
205@@ -224,6 +254,7 @@ tftpboot.img
206 timeconst.h
207 times.h*
208 trix_boot.h
209+user_constants.h
210 utsrelease.h*
211 vdso-syms.lds
212 vdso.lds
213@@ -235,13 +266,17 @@ vdso32.lds
214 vdso32.so.dbg
215 vdso64.lds
216 vdso64.so.dbg
217+vdsox32.lds
218+vdsox32-syms.lds
219 version.h*
220 vmImage
221 vmlinux
222 vmlinux-*
223 vmlinux.aout
224 vmlinux.bin.all
225+vmlinux.bin.bz2
226 vmlinux.lds
227+vmlinux.relocs
228 vmlinuz
229 voffset.h
230 vsyscall.lds
231@@ -249,9 +284,12 @@ vsyscall_32.lds
232 wanxlfw.inc
233 uImage
234 unifdef
235+utsrelease.h
236 wakeup.bin
237 wakeup.elf
238 wakeup.lds
239+x509*
240 zImage*
241 zconf.hash.c
242+zconf.lex.c
243 zoffset.h
244diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
245index fcbb736..5508d8c 100644
246--- a/Documentation/kernel-parameters.txt
247+++ b/Documentation/kernel-parameters.txt
248@@ -1031,6 +1031,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
249 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
250 Default: 1024
251
252+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
253+ ignore grsecurity's /proc restrictions
254+
255+
256 hashdist= [KNL,NUMA] Large hashes allocated during boot
257 are distributed across NUMA nodes. Defaults on
258 for 64-bit NUMA, off otherwise.
259@@ -1999,6 +2003,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
260 noexec=on: enable non-executable mappings (default)
261 noexec=off: disable non-executable mappings
262
263+ nopcid [X86-64]
264+ Disable PCID (Process-Context IDentifier) even if it
265+ is supported by the processor.
266+
267 nosmap [X86]
268 Disable SMAP (Supervisor Mode Access Prevention)
269 even if it is supported by processor.
270@@ -2266,6 +2274,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
271 the specified number of seconds. This is to be used if
272 your oopses keep scrolling off the screen.
273
274+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
275+ virtualization environments that don't cope well with the
276+ expand down segment used by UDEREF on X86-32 or the frequent
277+ page table updates on X86-64.
278+
279+ pax_sanitize_slab=
280+ 0/1 to disable/enable slab object sanitization (enabled by
281+ default).
282+
283+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
284+
285+ pax_extra_latent_entropy
286+ Enable a very simple form of latent entropy extraction
287+ from the first 4GB of memory as the bootmem allocator
288+ passes the memory pages to the buddy allocator.
289+
290+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
291+ when the processor supports PCID.
292+
293 pcbit= [HW,ISDN]
294
295 pcd. [PARIDE]
296diff --git a/Makefile b/Makefile
297index 2b23383..a66cff0 100644
298--- a/Makefile
299+++ b/Makefile
300@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
301
302 HOSTCC = gcc
303 HOSTCXX = g++
304-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
305-HOSTCXXFLAGS = -O2
306+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
307+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
308+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
309
310 # Decide whether to build built-in, modular, or both.
311 # Normally, just do built-in.
312@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
313 # Rules shared between *config targets and build targets
314
315 # Basic helpers built in scripts/
316-PHONY += scripts_basic
317-scripts_basic:
318+PHONY += scripts_basic gcc-plugins
319+scripts_basic: gcc-plugins
320 $(Q)$(MAKE) $(build)=scripts/basic
321 $(Q)rm -f .tmp_quiet_recordmcount
322
323@@ -576,6 +577,65 @@ else
324 KBUILD_CFLAGS += -O2
325 endif
326
327+ifndef DISABLE_PAX_PLUGINS
328+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
329+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
330+else
331+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
332+endif
333+ifneq ($(PLUGINCC),)
334+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
335+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
336+endif
337+ifdef CONFIG_PAX_MEMORY_STACKLEAK
338+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
339+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
340+endif
341+ifdef CONFIG_KALLOCSTAT_PLUGIN
342+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
343+endif
344+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
345+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
346+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
347+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
348+endif
349+ifdef CONFIG_CHECKER_PLUGIN
350+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
351+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
352+endif
353+endif
354+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
355+ifdef CONFIG_PAX_SIZE_OVERFLOW
356+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
357+endif
358+ifdef CONFIG_PAX_LATENT_ENTROPY
359+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
360+endif
361+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
362+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
363+endif
364+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
365+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
366+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
367+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
368+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
369+ifeq ($(KBUILD_EXTMOD),)
370+gcc-plugins:
371+ $(Q)$(MAKE) $(build)=tools/gcc
372+else
373+gcc-plugins: ;
374+endif
375+else
376+gcc-plugins:
377+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
378+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
379+else
380+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
381+endif
382+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
383+endif
384+endif
385+
386 include $(srctree)/arch/$(SRCARCH)/Makefile
387
388 ifdef CONFIG_READABLE_ASM
389@@ -733,7 +793,7 @@ export mod_sign_cmd
390
391
392 ifeq ($(KBUILD_EXTMOD),)
393-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
394+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
395
396 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
397 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
398@@ -782,6 +842,8 @@ endif
399
400 # The actual objects are generated when descending,
401 # make sure no implicit rule kicks in
402+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
403+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
404 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
405
406 # Handle descending into subdirectories listed in $(vmlinux-dirs)
407@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
408 # Error messages still appears in the original language
409
410 PHONY += $(vmlinux-dirs)
411-$(vmlinux-dirs): prepare scripts
412+$(vmlinux-dirs): gcc-plugins prepare scripts
413 $(Q)$(MAKE) $(build)=$@
414
415 define filechk_kernel.release
416@@ -838,6 +900,7 @@ prepare0: archprepare FORCE
417 $(Q)$(MAKE) $(build)=.
418
419 # All the preparing..
420+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
421 prepare: prepare0
422
423 # Generate some files
424@@ -945,6 +1008,8 @@ all: modules
425 # using awk while concatenating to the final file.
426
427 PHONY += modules
428+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
431 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
432 @$(kecho) ' Building modules, stage 2.';
433@@ -960,7 +1025,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
434
435 # Target to prepare building external modules
436 PHONY += modules_prepare
437-modules_prepare: prepare scripts
438+modules_prepare: gcc-plugins prepare scripts
439
440 # Target to install modules
441 PHONY += modules_install
442@@ -1026,7 +1091,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
443 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
444 signing_key.priv signing_key.x509 x509.genkey \
445 extra_certificates signing_key.x509.keyid \
446- signing_key.x509.signer
447+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
448
449 # clean - Delete most, but leave enough to build external modules
450 #
451@@ -1066,6 +1131,7 @@ distclean: mrproper
452 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
453 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
454 -o -name '.*.rej' \
455+ -o -name '.*.rej' -o -name '*.so' \
456 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
457 -type f -print | xargs rm -f
458
459@@ -1227,6 +1293,8 @@ PHONY += $(module-dirs) modules
460 $(module-dirs): crmodverdir $(objtree)/Module.symvers
461 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
462
463+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
464+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
465 modules: $(module-dirs)
466 @$(kecho) ' Building modules, stage 2.';
467 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
468@@ -1366,17 +1434,21 @@ else
469 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
470 endif
471
472-%.s: %.c prepare scripts FORCE
473+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
474+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
475+%.s: %.c gcc-plugins prepare scripts FORCE
476 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
477 %.i: %.c prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479-%.o: %.c prepare scripts FORCE
480+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
481+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
482+%.o: %.c gcc-plugins prepare scripts FORCE
483 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
484 %.lst: %.c prepare scripts FORCE
485 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
486-%.s: %.S prepare scripts FORCE
487+%.s: %.S gcc-plugins prepare scripts FORCE
488 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
489-%.o: %.S prepare scripts FORCE
490+%.o: %.S gcc-plugins prepare scripts FORCE
491 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
492 %.symtypes: %.c prepare scripts FORCE
493 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
494@@ -1386,11 +1458,15 @@ endif
495 $(cmd_crmodverdir)
496 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
497 $(build)=$(build-dir)
498-%/: prepare scripts FORCE
499+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
500+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
501+%/: gcc-plugins prepare scripts FORCE
502 $(cmd_crmodverdir)
503 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
504 $(build)=$(build-dir)
505-%.ko: prepare scripts FORCE
506+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
507+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
508+%.ko: gcc-plugins prepare scripts FORCE
509 $(cmd_crmodverdir)
510 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
511 $(build)=$(build-dir) $(@:.ko=.o)
512diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
513index 78b03ef..da28a51 100644
514--- a/arch/alpha/include/asm/atomic.h
515+++ b/arch/alpha/include/asm/atomic.h
516@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
517 #define atomic_dec(v) atomic_sub(1,(v))
518 #define atomic64_dec(v) atomic64_sub(1,(v))
519
520+#define atomic64_read_unchecked(v) atomic64_read(v)
521+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
522+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
523+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
524+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
525+#define atomic64_inc_unchecked(v) atomic64_inc(v)
526+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
527+#define atomic64_dec_unchecked(v) atomic64_dec(v)
528+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
529+
530 #define smp_mb__before_atomic_dec() smp_mb()
531 #define smp_mb__after_atomic_dec() smp_mb()
532 #define smp_mb__before_atomic_inc() smp_mb()
533diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
534index ad368a9..fbe0f25 100644
535--- a/arch/alpha/include/asm/cache.h
536+++ b/arch/alpha/include/asm/cache.h
537@@ -4,19 +4,19 @@
538 #ifndef __ARCH_ALPHA_CACHE_H
539 #define __ARCH_ALPHA_CACHE_H
540
541+#include <linux/const.h>
542
543 /* Bytes per L1 (data) cache line. */
544 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
545-# define L1_CACHE_BYTES 64
546 # define L1_CACHE_SHIFT 6
547 #else
548 /* Both EV4 and EV5 are write-through, read-allocate,
549 direct-mapped, physical.
550 */
551-# define L1_CACHE_BYTES 32
552 # define L1_CACHE_SHIFT 5
553 #endif
554
555+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
556 #define SMP_CACHE_BYTES L1_CACHE_BYTES
557
558 #endif
559diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
560index 968d999..d36b2df 100644
561--- a/arch/alpha/include/asm/elf.h
562+++ b/arch/alpha/include/asm/elf.h
563@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
564
565 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
566
567+#ifdef CONFIG_PAX_ASLR
568+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
569+
570+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
571+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
572+#endif
573+
574 /* $0 is set by ld.so to a pointer to a function which might be
575 registered using atexit. This provides a mean for the dynamic
576 linker to call DT_FINI functions for shared libraries that have
577diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
578index bc2a0da..8ad11ee 100644
579--- a/arch/alpha/include/asm/pgalloc.h
580+++ b/arch/alpha/include/asm/pgalloc.h
581@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
582 pgd_set(pgd, pmd);
583 }
584
585+static inline void
586+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
587+{
588+ pgd_populate(mm, pgd, pmd);
589+}
590+
591 extern pgd_t *pgd_alloc(struct mm_struct *mm);
592
593 static inline void
594diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
595index d8f9b7e..f6222fa 100644
596--- a/arch/alpha/include/asm/pgtable.h
597+++ b/arch/alpha/include/asm/pgtable.h
598@@ -102,6 +102,17 @@ struct vm_area_struct;
599 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
600 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
601 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
602+
603+#ifdef CONFIG_PAX_PAGEEXEC
604+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
605+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
606+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
607+#else
608+# define PAGE_SHARED_NOEXEC PAGE_SHARED
609+# define PAGE_COPY_NOEXEC PAGE_COPY
610+# define PAGE_READONLY_NOEXEC PAGE_READONLY
611+#endif
612+
613 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
614
615 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
616diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
617index 2fd00b7..cfd5069 100644
618--- a/arch/alpha/kernel/module.c
619+++ b/arch/alpha/kernel/module.c
620@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
621
622 /* The small sections were sorted to the end of the segment.
623 The following should definitely cover them. */
624- gp = (u64)me->module_core + me->core_size - 0x8000;
625+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
626 got = sechdrs[me->arch.gotsecindex].sh_addr;
627
628 for (i = 0; i < n; i++) {
629diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
630index 1402fcc..0b1abd2 100644
631--- a/arch/alpha/kernel/osf_sys.c
632+++ b/arch/alpha/kernel/osf_sys.c
633@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
634 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
635
636 static unsigned long
637-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
638- unsigned long limit)
639+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
640+ unsigned long limit, unsigned long flags)
641 {
642 struct vm_unmapped_area_info info;
643+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
644
645 info.flags = 0;
646 info.length = len;
647@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
648 info.high_limit = limit;
649 info.align_mask = 0;
650 info.align_offset = 0;
651+ info.threadstack_offset = offset;
652 return vm_unmapped_area(&info);
653 }
654
655@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
656 merely specific addresses, but regions of memory -- perhaps
657 this feature should be incorporated into all ports? */
658
659+#ifdef CONFIG_PAX_RANDMMAP
660+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
661+#endif
662+
663 if (addr) {
664- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
665+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
666 if (addr != (unsigned long) -ENOMEM)
667 return addr;
668 }
669
670 /* Next, try allocating at TASK_UNMAPPED_BASE. */
671- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
672- len, limit);
673+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
674+
675 if (addr != (unsigned long) -ENOMEM)
676 return addr;
677
678 /* Finally, try allocating in low memory. */
679- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
680+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
681
682 return addr;
683 }
684diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
685index 98838a0..b304fb4 100644
686--- a/arch/alpha/mm/fault.c
687+++ b/arch/alpha/mm/fault.c
688@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
689 __reload_thread(pcb);
690 }
691
692+#ifdef CONFIG_PAX_PAGEEXEC
693+/*
694+ * PaX: decide what to do with offenders (regs->pc = fault address)
695+ *
696+ * returns 1 when task should be killed
697+ * 2 when patched PLT trampoline was detected
698+ * 3 when unpatched PLT trampoline was detected
699+ */
700+static int pax_handle_fetch_fault(struct pt_regs *regs)
701+{
702+
703+#ifdef CONFIG_PAX_EMUPLT
704+ int err;
705+
706+ do { /* PaX: patched PLT emulation #1 */
707+ unsigned int ldah, ldq, jmp;
708+
709+ err = get_user(ldah, (unsigned int *)regs->pc);
710+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
711+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
712+
713+ if (err)
714+ break;
715+
716+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
717+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
718+ jmp == 0x6BFB0000U)
719+ {
720+ unsigned long r27, addr;
721+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
722+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
723+
724+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
725+ err = get_user(r27, (unsigned long *)addr);
726+ if (err)
727+ break;
728+
729+ regs->r27 = r27;
730+ regs->pc = r27;
731+ return 2;
732+ }
733+ } while (0);
734+
735+ do { /* PaX: patched PLT emulation #2 */
736+ unsigned int ldah, lda, br;
737+
738+ err = get_user(ldah, (unsigned int *)regs->pc);
739+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
740+ err |= get_user(br, (unsigned int *)(regs->pc+8));
741+
742+ if (err)
743+ break;
744+
745+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
746+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
747+ (br & 0xFFE00000U) == 0xC3E00000U)
748+ {
749+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
750+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
751+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
752+
753+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
754+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
755+ return 2;
756+ }
757+ } while (0);
758+
759+ do { /* PaX: unpatched PLT emulation */
760+ unsigned int br;
761+
762+ err = get_user(br, (unsigned int *)regs->pc);
763+
764+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
765+ unsigned int br2, ldq, nop, jmp;
766+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
767+
768+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
769+ err = get_user(br2, (unsigned int *)addr);
770+ err |= get_user(ldq, (unsigned int *)(addr+4));
771+ err |= get_user(nop, (unsigned int *)(addr+8));
772+ err |= get_user(jmp, (unsigned int *)(addr+12));
773+ err |= get_user(resolver, (unsigned long *)(addr+16));
774+
775+ if (err)
776+ break;
777+
778+ if (br2 == 0xC3600000U &&
779+ ldq == 0xA77B000CU &&
780+ nop == 0x47FF041FU &&
781+ jmp == 0x6B7B0000U)
782+ {
783+ regs->r28 = regs->pc+4;
784+ regs->r27 = addr+16;
785+ regs->pc = resolver;
786+ return 3;
787+ }
788+ }
789+ } while (0);
790+#endif
791+
792+ return 1;
793+}
794+
795+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
796+{
797+ unsigned long i;
798+
799+ printk(KERN_ERR "PAX: bytes at PC: ");
800+ for (i = 0; i < 5; i++) {
801+ unsigned int c;
802+ if (get_user(c, (unsigned int *)pc+i))
803+ printk(KERN_CONT "???????? ");
804+ else
805+ printk(KERN_CONT "%08x ", c);
806+ }
807+ printk("\n");
808+}
809+#endif
810
811 /*
812 * This routine handles page faults. It determines the address,
813@@ -133,8 +251,29 @@ retry:
814 good_area:
815 si_code = SEGV_ACCERR;
816 if (cause < 0) {
817- if (!(vma->vm_flags & VM_EXEC))
818+ if (!(vma->vm_flags & VM_EXEC)) {
819+
820+#ifdef CONFIG_PAX_PAGEEXEC
821+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
822+ goto bad_area;
823+
824+ up_read(&mm->mmap_sem);
825+ switch (pax_handle_fetch_fault(regs)) {
826+
827+#ifdef CONFIG_PAX_EMUPLT
828+ case 2:
829+ case 3:
830+ return;
831+#endif
832+
833+ }
834+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
835+ do_group_exit(SIGKILL);
836+#else
837 goto bad_area;
838+#endif
839+
840+ }
841 } else if (!cause) {
842 /* Allow reads even for write-only mappings */
843 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
844diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
845index 1ad6fb6..9406b3d 100644
846--- a/arch/arm/Kconfig
847+++ b/arch/arm/Kconfig
848@@ -1832,7 +1832,7 @@ config ALIGNMENT_TRAP
849
850 config UACCESS_WITH_MEMCPY
851 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
852- depends on MMU
853+ depends on MMU && !PAX_MEMORY_UDEREF
854 default y if CPU_FEROCEON
855 help
856 Implement faster copy_to_user and clear_user methods for CPU
857@@ -2097,6 +2097,7 @@ config XIP_PHYS_ADDR
858 config KEXEC
859 bool "Kexec system call (EXPERIMENTAL)"
860 depends on (!SMP || PM_SLEEP_SMP)
861+ depends on !GRKERNSEC_KMEM
862 help
863 kexec is a system call that implements the ability to shutdown your
864 current kernel, and to start another kernel. It is like a reboot
865diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
866index da1c77d..2ee6056 100644
867--- a/arch/arm/include/asm/atomic.h
868+++ b/arch/arm/include/asm/atomic.h
869@@ -17,17 +17,35 @@
870 #include <asm/barrier.h>
871 #include <asm/cmpxchg.h>
872
873+#ifdef CONFIG_GENERIC_ATOMIC64
874+#include <asm-generic/atomic64.h>
875+#endif
876+
877 #define ATOMIC_INIT(i) { (i) }
878
879 #ifdef __KERNEL__
880
881+#define _ASM_EXTABLE(from, to) \
882+" .pushsection __ex_table,\"a\"\n"\
883+" .align 3\n" \
884+" .long " #from ", " #to"\n" \
885+" .popsection"
886+
887 /*
888 * On ARM, ordinary assignment (str instruction) doesn't clear the local
889 * strex/ldrex monitor on some implementations. The reason we can use it for
890 * atomic_set() is the clrex or dummy strex done on every exception return.
891 */
892 #define atomic_read(v) (*(volatile int *)&(v)->counter)
893+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
894+{
895+ return v->counter;
896+}
897 #define atomic_set(v,i) (((v)->counter) = (i))
898+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
899+{
900+ v->counter = i;
901+}
902
903 #if __LINUX_ARM_ARCH__ >= 6
904
905@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
906 int result;
907
908 __asm__ __volatile__("@ atomic_add\n"
909+"1: ldrex %1, [%3]\n"
910+" adds %0, %1, %4\n"
911+
912+#ifdef CONFIG_PAX_REFCOUNT
913+" bvc 3f\n"
914+"2: bkpt 0xf103\n"
915+"3:\n"
916+#endif
917+
918+" strex %1, %0, [%3]\n"
919+" teq %1, #0\n"
920+" bne 1b"
921+
922+#ifdef CONFIG_PAX_REFCOUNT
923+"\n4:\n"
924+ _ASM_EXTABLE(2b, 4b)
925+#endif
926+
927+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
928+ : "r" (&v->counter), "Ir" (i)
929+ : "cc");
930+}
931+
932+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
933+{
934+ unsigned long tmp;
935+ int result;
936+
937+ __asm__ __volatile__("@ atomic_add_unchecked\n"
938 "1: ldrex %0, [%3]\n"
939 " add %0, %0, %4\n"
940 " strex %1, %0, [%3]\n"
941@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
942 smp_mb();
943
944 __asm__ __volatile__("@ atomic_add_return\n"
945+"1: ldrex %1, [%3]\n"
946+" adds %0, %1, %4\n"
947+
948+#ifdef CONFIG_PAX_REFCOUNT
949+" bvc 3f\n"
950+" mov %0, %1\n"
951+"2: bkpt 0xf103\n"
952+"3:\n"
953+#endif
954+
955+" strex %1, %0, [%3]\n"
956+" teq %1, #0\n"
957+" bne 1b"
958+
959+#ifdef CONFIG_PAX_REFCOUNT
960+"\n4:\n"
961+ _ASM_EXTABLE(2b, 4b)
962+#endif
963+
964+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
965+ : "r" (&v->counter), "Ir" (i)
966+ : "cc");
967+
968+ smp_mb();
969+
970+ return result;
971+}
972+
973+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
974+{
975+ unsigned long tmp;
976+ int result;
977+
978+ smp_mb();
979+
980+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
981 "1: ldrex %0, [%3]\n"
982 " add %0, %0, %4\n"
983 " strex %1, %0, [%3]\n"
984@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
985 int result;
986
987 __asm__ __volatile__("@ atomic_sub\n"
988+"1: ldrex %1, [%3]\n"
989+" subs %0, %1, %4\n"
990+
991+#ifdef CONFIG_PAX_REFCOUNT
992+" bvc 3f\n"
993+"2: bkpt 0xf103\n"
994+"3:\n"
995+#endif
996+
997+" strex %1, %0, [%3]\n"
998+" teq %1, #0\n"
999+" bne 1b"
1000+
1001+#ifdef CONFIG_PAX_REFCOUNT
1002+"\n4:\n"
1003+ _ASM_EXTABLE(2b, 4b)
1004+#endif
1005+
1006+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1007+ : "r" (&v->counter), "Ir" (i)
1008+ : "cc");
1009+}
1010+
1011+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1012+{
1013+ unsigned long tmp;
1014+ int result;
1015+
1016+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1017 "1: ldrex %0, [%3]\n"
1018 " sub %0, %0, %4\n"
1019 " strex %1, %0, [%3]\n"
1020@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1021 smp_mb();
1022
1023 __asm__ __volatile__("@ atomic_sub_return\n"
1024-"1: ldrex %0, [%3]\n"
1025-" sub %0, %0, %4\n"
1026+"1: ldrex %1, [%3]\n"
1027+" subs %0, %1, %4\n"
1028+
1029+#ifdef CONFIG_PAX_REFCOUNT
1030+" bvc 3f\n"
1031+" mov %0, %1\n"
1032+"2: bkpt 0xf103\n"
1033+"3:\n"
1034+#endif
1035+
1036 " strex %1, %0, [%3]\n"
1037 " teq %1, #0\n"
1038 " bne 1b"
1039+
1040+#ifdef CONFIG_PAX_REFCOUNT
1041+"\n4:\n"
1042+ _ASM_EXTABLE(2b, 4b)
1043+#endif
1044+
1045 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1046 : "r" (&v->counter), "Ir" (i)
1047 : "cc");
1048@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1049 return oldval;
1050 }
1051
1052+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1053+{
1054+ unsigned long oldval, res;
1055+
1056+ smp_mb();
1057+
1058+ do {
1059+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1060+ "ldrex %1, [%3]\n"
1061+ "mov %0, #0\n"
1062+ "teq %1, %4\n"
1063+ "strexeq %0, %5, [%3]\n"
1064+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1065+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1066+ : "cc");
1067+ } while (res);
1068+
1069+ smp_mb();
1070+
1071+ return oldval;
1072+}
1073+
1074 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1075 {
1076 unsigned long tmp, tmp2;
1077@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1078
1079 return val;
1080 }
1081+
1082+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1083+{
1084+ return atomic_add_return(i, v);
1085+}
1086+
1087 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1088+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1089+{
1090+ (void) atomic_add_return(i, v);
1091+}
1092
1093 static inline int atomic_sub_return(int i, atomic_t *v)
1094 {
1095@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1096 return val;
1097 }
1098 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1099+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1100+{
1101+ (void) atomic_sub_return(i, v);
1102+}
1103
1104 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1105 {
1106@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1107 return ret;
1108 }
1109
1110+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1111+{
1112+ return atomic_cmpxchg(v, old, new);
1113+}
1114+
1115 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1116 {
1117 unsigned long flags;
1118@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1119 #endif /* __LINUX_ARM_ARCH__ */
1120
1121 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1122+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1123+{
1124+ return xchg(&v->counter, new);
1125+}
1126
1127 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1128 {
1129@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1130 }
1131
1132 #define atomic_inc(v) atomic_add(1, v)
1133+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1134+{
1135+ atomic_add_unchecked(1, v);
1136+}
1137 #define atomic_dec(v) atomic_sub(1, v)
1138+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1139+{
1140+ atomic_sub_unchecked(1, v);
1141+}
1142
1143 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1144+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1145+{
1146+ return atomic_add_return_unchecked(1, v) == 0;
1147+}
1148 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1149 #define atomic_inc_return(v) (atomic_add_return(1, v))
1150+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1151+{
1152+ return atomic_add_return_unchecked(1, v);
1153+}
1154 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1155 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1156
1157@@ -241,6 +428,14 @@ typedef struct {
1158 u64 __aligned(8) counter;
1159 } atomic64_t;
1160
1161+#ifdef CONFIG_PAX_REFCOUNT
1162+typedef struct {
1163+ u64 __aligned(8) counter;
1164+} atomic64_unchecked_t;
1165+#else
1166+typedef atomic64_t atomic64_unchecked_t;
1167+#endif
1168+
1169 #define ATOMIC64_INIT(i) { (i) }
1170
1171 #ifdef CONFIG_ARM_LPAE
1172@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1173 return result;
1174 }
1175
1176+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1177+{
1178+ u64 result;
1179+
1180+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1181+" ldrd %0, %H0, [%1]"
1182+ : "=&r" (result)
1183+ : "r" (&v->counter), "Qo" (v->counter)
1184+ );
1185+
1186+ return result;
1187+}
1188+
1189 static inline void atomic64_set(atomic64_t *v, u64 i)
1190 {
1191 __asm__ __volatile__("@ atomic64_set\n"
1192@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1193 : "r" (&v->counter), "r" (i)
1194 );
1195 }
1196+
1197+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1198+{
1199+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1200+" strd %2, %H2, [%1]"
1201+ : "=Qo" (v->counter)
1202+ : "r" (&v->counter), "r" (i)
1203+ );
1204+}
1205 #else
1206 static inline u64 atomic64_read(const atomic64_t *v)
1207 {
1208@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1209 return result;
1210 }
1211
1212+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1213+{
1214+ u64 result;
1215+
1216+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1217+" ldrexd %0, %H0, [%1]"
1218+ : "=&r" (result)
1219+ : "r" (&v->counter), "Qo" (v->counter)
1220+ );
1221+
1222+ return result;
1223+}
1224+
1225 static inline void atomic64_set(atomic64_t *v, u64 i)
1226 {
1227 u64 tmp;
1228@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1229 : "r" (&v->counter), "r" (i)
1230 : "cc");
1231 }
1232+
1233+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1234+{
1235+ u64 tmp;
1236+
1237+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1238+"1: ldrexd %0, %H0, [%2]\n"
1239+" strexd %0, %3, %H3, [%2]\n"
1240+" teq %0, #0\n"
1241+" bne 1b"
1242+ : "=&r" (tmp), "=Qo" (v->counter)
1243+ : "r" (&v->counter), "r" (i)
1244+ : "cc");
1245+}
1246+
1247 #endif
1248
1249 static inline void atomic64_add(u64 i, atomic64_t *v)
1250@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1251 __asm__ __volatile__("@ atomic64_add\n"
1252 "1: ldrexd %0, %H0, [%3]\n"
1253 " adds %0, %0, %4\n"
1254+" adcs %H0, %H0, %H4\n"
1255+
1256+#ifdef CONFIG_PAX_REFCOUNT
1257+" bvc 3f\n"
1258+"2: bkpt 0xf103\n"
1259+"3:\n"
1260+#endif
1261+
1262+" strexd %1, %0, %H0, [%3]\n"
1263+" teq %1, #0\n"
1264+" bne 1b"
1265+
1266+#ifdef CONFIG_PAX_REFCOUNT
1267+"\n4:\n"
1268+ _ASM_EXTABLE(2b, 4b)
1269+#endif
1270+
1271+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1272+ : "r" (&v->counter), "r" (i)
1273+ : "cc");
1274+}
1275+
1276+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1277+{
1278+ u64 result;
1279+ unsigned long tmp;
1280+
1281+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1282+"1: ldrexd %0, %H0, [%3]\n"
1283+" adds %0, %0, %4\n"
1284 " adc %H0, %H0, %H4\n"
1285 " strexd %1, %0, %H0, [%3]\n"
1286 " teq %1, #0\n"
1287@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1288
1289 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1290 {
1291- u64 result;
1292- unsigned long tmp;
1293+ u64 result, tmp;
1294
1295 smp_mb();
1296
1297 __asm__ __volatile__("@ atomic64_add_return\n"
1298+"1: ldrexd %1, %H1, [%3]\n"
1299+" adds %0, %1, %4\n"
1300+" adcs %H0, %H1, %H4\n"
1301+
1302+#ifdef CONFIG_PAX_REFCOUNT
1303+" bvc 3f\n"
1304+" mov %0, %1\n"
1305+" mov %H0, %H1\n"
1306+"2: bkpt 0xf103\n"
1307+"3:\n"
1308+#endif
1309+
1310+" strexd %1, %0, %H0, [%3]\n"
1311+" teq %1, #0\n"
1312+" bne 1b"
1313+
1314+#ifdef CONFIG_PAX_REFCOUNT
1315+"\n4:\n"
1316+ _ASM_EXTABLE(2b, 4b)
1317+#endif
1318+
1319+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1320+ : "r" (&v->counter), "r" (i)
1321+ : "cc");
1322+
1323+ smp_mb();
1324+
1325+ return result;
1326+}
1327+
1328+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1329+{
1330+ u64 result;
1331+ unsigned long tmp;
1332+
1333+ smp_mb();
1334+
1335+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1336 "1: ldrexd %0, %H0, [%3]\n"
1337 " adds %0, %0, %4\n"
1338 " adc %H0, %H0, %H4\n"
1339@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1340 __asm__ __volatile__("@ atomic64_sub\n"
1341 "1: ldrexd %0, %H0, [%3]\n"
1342 " subs %0, %0, %4\n"
1343+" sbcs %H0, %H0, %H4\n"
1344+
1345+#ifdef CONFIG_PAX_REFCOUNT
1346+" bvc 3f\n"
1347+"2: bkpt 0xf103\n"
1348+"3:\n"
1349+#endif
1350+
1351+" strexd %1, %0, %H0, [%3]\n"
1352+" teq %1, #0\n"
1353+" bne 1b"
1354+
1355+#ifdef CONFIG_PAX_REFCOUNT
1356+"\n4:\n"
1357+ _ASM_EXTABLE(2b, 4b)
1358+#endif
1359+
1360+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1361+ : "r" (&v->counter), "r" (i)
1362+ : "cc");
1363+}
1364+
1365+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1366+{
1367+ u64 result;
1368+ unsigned long tmp;
1369+
1370+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1371+"1: ldrexd %0, %H0, [%3]\n"
1372+" subs %0, %0, %4\n"
1373 " sbc %H0, %H0, %H4\n"
1374 " strexd %1, %0, %H0, [%3]\n"
1375 " teq %1, #0\n"
1376@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1377
1378 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1379 {
1380- u64 result;
1381- unsigned long tmp;
1382+ u64 result, tmp;
1383
1384 smp_mb();
1385
1386 __asm__ __volatile__("@ atomic64_sub_return\n"
1387-"1: ldrexd %0, %H0, [%3]\n"
1388-" subs %0, %0, %4\n"
1389-" sbc %H0, %H0, %H4\n"
1390+"1: ldrexd %1, %H1, [%3]\n"
1391+" subs %0, %1, %4\n"
1392+" sbcs %H0, %H1, %H4\n"
1393+
1394+#ifdef CONFIG_PAX_REFCOUNT
1395+" bvc 3f\n"
1396+" mov %0, %1\n"
1397+" mov %H0, %H1\n"
1398+"2: bkpt 0xf103\n"
1399+"3:\n"
1400+#endif
1401+
1402 " strexd %1, %0, %H0, [%3]\n"
1403 " teq %1, #0\n"
1404 " bne 1b"
1405+
1406+#ifdef CONFIG_PAX_REFCOUNT
1407+"\n4:\n"
1408+ _ASM_EXTABLE(2b, 4b)
1409+#endif
1410+
1411 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1412 : "r" (&v->counter), "r" (i)
1413 : "cc");
1414@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1415 return oldval;
1416 }
1417
1418+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1419+{
1420+ u64 oldval;
1421+ unsigned long res;
1422+
1423+ smp_mb();
1424+
1425+ do {
1426+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1427+ "ldrexd %1, %H1, [%3]\n"
1428+ "mov %0, #0\n"
1429+ "teq %1, %4\n"
1430+ "teqeq %H1, %H4\n"
1431+ "strexdeq %0, %5, %H5, [%3]"
1432+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1433+ : "r" (&ptr->counter), "r" (old), "r" (new)
1434+ : "cc");
1435+ } while (res);
1436+
1437+ smp_mb();
1438+
1439+ return oldval;
1440+}
1441+
1442 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1443 {
1444 u64 result;
1445@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1446
1447 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1448 {
1449- u64 result;
1450- unsigned long tmp;
1451+ u64 result, tmp;
1452
1453 smp_mb();
1454
1455 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1456-"1: ldrexd %0, %H0, [%3]\n"
1457-" subs %0, %0, #1\n"
1458-" sbc %H0, %H0, #0\n"
1459+"1: ldrexd %1, %H1, [%3]\n"
1460+" subs %0, %1, #1\n"
1461+" sbcs %H0, %H1, #0\n"
1462+
1463+#ifdef CONFIG_PAX_REFCOUNT
1464+" bvc 3f\n"
1465+" mov %0, %1\n"
1466+" mov %H0, %H1\n"
1467+"2: bkpt 0xf103\n"
1468+"3:\n"
1469+#endif
1470+
1471 " teq %H0, #0\n"
1472-" bmi 2f\n"
1473+" bmi 4f\n"
1474 " strexd %1, %0, %H0, [%3]\n"
1475 " teq %1, #0\n"
1476 " bne 1b\n"
1477-"2:"
1478+"4:\n"
1479+
1480+#ifdef CONFIG_PAX_REFCOUNT
1481+ _ASM_EXTABLE(2b, 4b)
1482+#endif
1483+
1484 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1485 : "r" (&v->counter)
1486 : "cc");
1487@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1488 " teq %0, %5\n"
1489 " teqeq %H0, %H5\n"
1490 " moveq %1, #0\n"
1491-" beq 2f\n"
1492+" beq 4f\n"
1493 " adds %0, %0, %6\n"
1494-" adc %H0, %H0, %H6\n"
1495+" adcs %H0, %H0, %H6\n"
1496+
1497+#ifdef CONFIG_PAX_REFCOUNT
1498+" bvc 3f\n"
1499+"2: bkpt 0xf103\n"
1500+"3:\n"
1501+#endif
1502+
1503 " strexd %2, %0, %H0, [%4]\n"
1504 " teq %2, #0\n"
1505 " bne 1b\n"
1506-"2:"
1507+"4:\n"
1508+
1509+#ifdef CONFIG_PAX_REFCOUNT
1510+ _ASM_EXTABLE(2b, 4b)
1511+#endif
1512+
1513 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1514 : "r" (&v->counter), "r" (u), "r" (a)
1515 : "cc");
1516@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1517
1518 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1519 #define atomic64_inc(v) atomic64_add(1LL, (v))
1520+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1521 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1522+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1523 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1524 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1525 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1526+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1527 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1528 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1529 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1530diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1531index 75fe66b..ba3dee4 100644
1532--- a/arch/arm/include/asm/cache.h
1533+++ b/arch/arm/include/asm/cache.h
1534@@ -4,8 +4,10 @@
1535 #ifndef __ASMARM_CACHE_H
1536 #define __ASMARM_CACHE_H
1537
1538+#include <linux/const.h>
1539+
1540 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1541-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1542+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1543
1544 /*
1545 * Memory returned by kmalloc() may be used for DMA, so we must make
1546@@ -24,5 +26,6 @@
1547 #endif
1548
1549 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1550+#define __read_only __attribute__ ((__section__(".data..read_only")))
1551
1552 #endif
1553diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1554index 15f2d5b..43ffa53 100644
1555--- a/arch/arm/include/asm/cacheflush.h
1556+++ b/arch/arm/include/asm/cacheflush.h
1557@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1558 void (*dma_unmap_area)(const void *, size_t, int);
1559
1560 void (*dma_flush_range)(const void *, const void *);
1561-};
1562+} __no_const;
1563
1564 /*
1565 * Select the calling method
1566diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1567index 6dcc164..b14d917 100644
1568--- a/arch/arm/include/asm/checksum.h
1569+++ b/arch/arm/include/asm/checksum.h
1570@@ -37,7 +37,19 @@ __wsum
1571 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1572
1573 __wsum
1574-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1575+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1576+
1577+static inline __wsum
1578+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1579+{
1580+ __wsum ret;
1581+ pax_open_userland();
1582+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1583+ pax_close_userland();
1584+ return ret;
1585+}
1586+
1587+
1588
1589 /*
1590 * Fold a partial checksum without adding pseudo headers
1591diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1592index 4f009c1..466c59b 100644
1593--- a/arch/arm/include/asm/cmpxchg.h
1594+++ b/arch/arm/include/asm/cmpxchg.h
1595@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1596
1597 #define xchg(ptr,x) \
1598 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1599+#define xchg_unchecked(ptr,x) \
1600+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1601
1602 #include <asm-generic/cmpxchg-local.h>
1603
1604diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1605index 6ddbe44..b5e38b1 100644
1606--- a/arch/arm/include/asm/domain.h
1607+++ b/arch/arm/include/asm/domain.h
1608@@ -48,18 +48,37 @@
1609 * Domain types
1610 */
1611 #define DOMAIN_NOACCESS 0
1612-#define DOMAIN_CLIENT 1
1613 #ifdef CONFIG_CPU_USE_DOMAINS
1614+#define DOMAIN_USERCLIENT 1
1615+#define DOMAIN_KERNELCLIENT 1
1616 #define DOMAIN_MANAGER 3
1617+#define DOMAIN_VECTORS DOMAIN_USER
1618 #else
1619+
1620+#ifdef CONFIG_PAX_KERNEXEC
1621 #define DOMAIN_MANAGER 1
1622+#define DOMAIN_KERNEXEC 3
1623+#else
1624+#define DOMAIN_MANAGER 1
1625+#endif
1626+
1627+#ifdef CONFIG_PAX_MEMORY_UDEREF
1628+#define DOMAIN_USERCLIENT 0
1629+#define DOMAIN_UDEREF 1
1630+#define DOMAIN_VECTORS DOMAIN_KERNEL
1631+#else
1632+#define DOMAIN_USERCLIENT 1
1633+#define DOMAIN_VECTORS DOMAIN_USER
1634+#endif
1635+#define DOMAIN_KERNELCLIENT 1
1636+
1637 #endif
1638
1639 #define domain_val(dom,type) ((type) << (2*(dom)))
1640
1641 #ifndef __ASSEMBLY__
1642
1643-#ifdef CONFIG_CPU_USE_DOMAINS
1644+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1645 static inline void set_domain(unsigned val)
1646 {
1647 asm volatile(
1648@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1649 isb();
1650 }
1651
1652-#define modify_domain(dom,type) \
1653- do { \
1654- struct thread_info *thread = current_thread_info(); \
1655- unsigned int domain = thread->cpu_domain; \
1656- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1657- thread->cpu_domain = domain | domain_val(dom, type); \
1658- set_domain(thread->cpu_domain); \
1659- } while (0)
1660-
1661+extern void modify_domain(unsigned int dom, unsigned int type);
1662 #else
1663 static inline void set_domain(unsigned val) { }
1664 static inline void modify_domain(unsigned dom, unsigned type) { }
1665diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1666index f4b46d3..abc9b2b 100644
1667--- a/arch/arm/include/asm/elf.h
1668+++ b/arch/arm/include/asm/elf.h
1669@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1670 the loader. We need to make sure that it is out of the way of the program
1671 that it will "exec", and that there is sufficient room for the brk. */
1672
1673-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1674+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1675+
1676+#ifdef CONFIG_PAX_ASLR
1677+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1678+
1679+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1680+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1681+#endif
1682
1683 /* When the program starts, a1 contains a pointer to a function to be
1684 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1685@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1686 extern void elf_set_personality(const struct elf32_hdr *);
1687 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1688
1689-struct mm_struct;
1690-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1691-#define arch_randomize_brk arch_randomize_brk
1692-
1693 #ifdef CONFIG_MMU
1694 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1695 struct linux_binprm;
1696diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1697index de53547..52b9a28 100644
1698--- a/arch/arm/include/asm/fncpy.h
1699+++ b/arch/arm/include/asm/fncpy.h
1700@@ -81,7 +81,9 @@
1701 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1702 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1703 \
1704+ pax_open_kernel(); \
1705 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1706+ pax_close_kernel(); \
1707 flush_icache_range((unsigned long)(dest_buf), \
1708 (unsigned long)(dest_buf) + (size)); \
1709 \
1710diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1711index e42cf59..7b94b8f 100644
1712--- a/arch/arm/include/asm/futex.h
1713+++ b/arch/arm/include/asm/futex.h
1714@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1715 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1716 return -EFAULT;
1717
1718+ pax_open_userland();
1719+
1720 smp_mb();
1721 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1722 "1: ldrex %1, [%4]\n"
1723@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1724 : "cc", "memory");
1725 smp_mb();
1726
1727+ pax_close_userland();
1728+
1729 *uval = val;
1730 return ret;
1731 }
1732@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1733 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1734 return -EFAULT;
1735
1736+ pax_open_userland();
1737+
1738 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1739 "1: " TUSER(ldr) " %1, [%4]\n"
1740 " teq %1, %2\n"
1741@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1742 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1743 : "cc", "memory");
1744
1745+ pax_close_userland();
1746+
1747 *uval = val;
1748 return ret;
1749 }
1750@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1751 return -EFAULT;
1752
1753 pagefault_disable(); /* implies preempt_disable() */
1754+ pax_open_userland();
1755
1756 switch (op) {
1757 case FUTEX_OP_SET:
1758@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1759 ret = -ENOSYS;
1760 }
1761
1762+ pax_close_userland();
1763 pagefault_enable(); /* subsumes preempt_enable() */
1764
1765 if (!ret) {
1766diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1767index 83eb2f7..ed77159 100644
1768--- a/arch/arm/include/asm/kmap_types.h
1769+++ b/arch/arm/include/asm/kmap_types.h
1770@@ -4,6 +4,6 @@
1771 /*
1772 * This is the "bare minimum". AIO seems to require this.
1773 */
1774-#define KM_TYPE_NR 16
1775+#define KM_TYPE_NR 17
1776
1777 #endif
1778diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1779index 9e614a1..3302cca 100644
1780--- a/arch/arm/include/asm/mach/dma.h
1781+++ b/arch/arm/include/asm/mach/dma.h
1782@@ -22,7 +22,7 @@ struct dma_ops {
1783 int (*residue)(unsigned int, dma_t *); /* optional */
1784 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1785 const char *type;
1786-};
1787+} __do_const;
1788
1789 struct dma_struct {
1790 void *addr; /* single DMA address */
1791diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1792index 2fe141f..192dc01 100644
1793--- a/arch/arm/include/asm/mach/map.h
1794+++ b/arch/arm/include/asm/mach/map.h
1795@@ -27,13 +27,16 @@ struct map_desc {
1796 #define MT_MINICLEAN 6
1797 #define MT_LOW_VECTORS 7
1798 #define MT_HIGH_VECTORS 8
1799-#define MT_MEMORY 9
1800+#define MT_MEMORY_RWX 9
1801 #define MT_ROM 10
1802-#define MT_MEMORY_NONCACHED 11
1803+#define MT_MEMORY_NONCACHED_RX 11
1804 #define MT_MEMORY_DTCM 12
1805 #define MT_MEMORY_ITCM 13
1806 #define MT_MEMORY_SO 14
1807 #define MT_MEMORY_DMA_READY 15
1808+#define MT_MEMORY_RW 16
1809+#define MT_MEMORY_RX 17
1810+#define MT_MEMORY_NONCACHED_RW 18
1811
1812 #ifdef CONFIG_MMU
1813 extern void iotable_init(struct map_desc *, int);
1814diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1815index f94784f..9a09a4a 100644
1816--- a/arch/arm/include/asm/outercache.h
1817+++ b/arch/arm/include/asm/outercache.h
1818@@ -35,7 +35,7 @@ struct outer_cache_fns {
1819 #endif
1820 void (*set_debug)(unsigned long);
1821 void (*resume)(void);
1822-};
1823+} __no_const;
1824
1825 extern struct outer_cache_fns outer_cache;
1826
1827diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1828index 4355f0e..c229913 100644
1829--- a/arch/arm/include/asm/page.h
1830+++ b/arch/arm/include/asm/page.h
1831@@ -114,7 +114,7 @@ struct cpu_user_fns {
1832 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1833 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1834 unsigned long vaddr, struct vm_area_struct *vma);
1835-};
1836+} __no_const;
1837
1838 #ifdef MULTI_USER
1839 extern struct cpu_user_fns cpu_user;
1840diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1841index 943504f..c37a730 100644
1842--- a/arch/arm/include/asm/pgalloc.h
1843+++ b/arch/arm/include/asm/pgalloc.h
1844@@ -17,6 +17,7 @@
1845 #include <asm/processor.h>
1846 #include <asm/cacheflush.h>
1847 #include <asm/tlbflush.h>
1848+#include <asm/system_info.h>
1849
1850 #define check_pgt_cache() do { } while (0)
1851
1852@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1853 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1854 }
1855
1856+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1857+{
1858+ pud_populate(mm, pud, pmd);
1859+}
1860+
1861 #else /* !CONFIG_ARM_LPAE */
1862
1863 /*
1864@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1865 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1866 #define pmd_free(mm, pmd) do { } while (0)
1867 #define pud_populate(mm,pmd,pte) BUG()
1868+#define pud_populate_kernel(mm,pmd,pte) BUG()
1869
1870 #endif /* CONFIG_ARM_LPAE */
1871
1872@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1873 __free_page(pte);
1874 }
1875
1876+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1877+{
1878+#ifdef CONFIG_ARM_LPAE
1879+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1880+#else
1881+ if (addr & SECTION_SIZE)
1882+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1883+ else
1884+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1885+#endif
1886+ flush_pmd_entry(pmdp);
1887+}
1888+
1889 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1890 pmdval_t prot)
1891 {
1892@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1893 static inline void
1894 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1895 {
1896- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1897+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1898 }
1899 #define pmd_pgtable(pmd) pmd_page(pmd)
1900
1901diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1902index 5cfba15..f415e1a 100644
1903--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1904+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1905@@ -20,12 +20,15 @@
1906 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1907 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1908 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1909+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1910 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1911 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1912 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1913+
1914 /*
1915 * - section
1916 */
1917+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1918 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1919 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1920 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1921@@ -37,6 +40,7 @@
1922 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1923 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1924 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1925+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1926
1927 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1928 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1929@@ -66,6 +70,7 @@
1930 * - extended small page/tiny page
1931 */
1932 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1933+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1934 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1935 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1936 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1937diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1938index f97ee02..cc9fe9e 100644
1939--- a/arch/arm/include/asm/pgtable-2level.h
1940+++ b/arch/arm/include/asm/pgtable-2level.h
1941@@ -126,6 +126,9 @@
1942 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1943 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1944
1945+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1946+#define L_PTE_PXN (_AT(pteval_t, 0))
1947+
1948 /*
1949 * These are the memory types, defined to be compatible with
1950 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1951diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1952index 626989f..9d67a33 100644
1953--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1954+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1955@@ -75,6 +75,7 @@
1956 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1958 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1959+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1960 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1961
1962 /*
1963diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1964index 5689c18..eea12f9 100644
1965--- a/arch/arm/include/asm/pgtable-3level.h
1966+++ b/arch/arm/include/asm/pgtable-3level.h
1967@@ -82,6 +82,7 @@
1968 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1969 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1970 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1971+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1973 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1974 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1975@@ -95,6 +96,7 @@
1976 /*
1977 * To be used in assembly code with the upper page attributes.
1978 */
1979+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1980 #define L_PTE_XN_HIGH (1 << (54 - 32))
1981 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1982
1983diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1984index 1571d12..b8a9b43 100644
1985--- a/arch/arm/include/asm/pgtable.h
1986+++ b/arch/arm/include/asm/pgtable.h
1987@@ -33,6 +33,9 @@
1988 #include <asm/pgtable-2level.h>
1989 #endif
1990
1991+#define ktla_ktva(addr) (addr)
1992+#define ktva_ktla(addr) (addr)
1993+
1994 /*
1995 * Just any arbitrary offset to the start of the vmalloc VM area: the
1996 * current 8MB value just means that there will be a 8MB "hole" after the
1997@@ -48,6 +51,9 @@
1998 #define LIBRARY_TEXT_START 0x0c000000
1999
2000 #ifndef __ASSEMBLY__
2001+extern pteval_t __supported_pte_mask;
2002+extern pmdval_t __supported_pmd_mask;
2003+
2004 extern void __pte_error(const char *file, int line, pte_t);
2005 extern void __pmd_error(const char *file, int line, pmd_t);
2006 extern void __pgd_error(const char *file, int line, pgd_t);
2007@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2008 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2009 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2010
2011+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2012+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2013+
2014+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2015+#include <asm/domain.h>
2016+#include <linux/thread_info.h>
2017+#include <linux/preempt.h>
2018+
2019+static inline int test_domain(int domain, int domaintype)
2020+{
2021+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2022+}
2023+#endif
2024+
2025+#ifdef CONFIG_PAX_KERNEXEC
2026+static inline unsigned long pax_open_kernel(void) {
2027+#ifdef CONFIG_ARM_LPAE
2028+ /* TODO */
2029+#else
2030+ preempt_disable();
2031+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2032+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2033+#endif
2034+ return 0;
2035+}
2036+
2037+static inline unsigned long pax_close_kernel(void) {
2038+#ifdef CONFIG_ARM_LPAE
2039+ /* TODO */
2040+#else
2041+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2042+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2043+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2044+ preempt_enable_no_resched();
2045+#endif
2046+ return 0;
2047+}
2048+#else
2049+static inline unsigned long pax_open_kernel(void) { return 0; }
2050+static inline unsigned long pax_close_kernel(void) { return 0; }
2051+#endif
2052+
2053 /*
2054 * This is the lowest virtual address we can permit any user space
2055 * mapping to be mapped at. This is particularly important for
2056@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2057 /*
2058 * The pgprot_* and protection_map entries will be fixed up in runtime
2059 * to include the cachable and bufferable bits based on memory policy,
2060- * as well as any architecture dependent bits like global/ASID and SMP
2061- * shared mapping bits.
2062+ * as well as any architecture dependent bits like global/ASID, PXN,
2063+ * and SMP shared mapping bits.
2064 */
2065 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2066
2067@@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2068 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2069 {
2070 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2071- L_PTE_NONE | L_PTE_VALID;
2072+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2073 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2074 return pte;
2075 }
2076diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2077index 5324c11..bcae5f0 100644
2078--- a/arch/arm/include/asm/proc-fns.h
2079+++ b/arch/arm/include/asm/proc-fns.h
2080@@ -75,7 +75,7 @@ extern struct processor {
2081 unsigned int suspend_size;
2082 void (*do_suspend)(void *);
2083 void (*do_resume)(void *);
2084-} processor;
2085+} __do_const processor;
2086
2087 #ifndef MULTI_CPU
2088 extern void cpu_proc_init(void);
2089diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2090index c4ae171..ea0c0c2 100644
2091--- a/arch/arm/include/asm/psci.h
2092+++ b/arch/arm/include/asm/psci.h
2093@@ -29,7 +29,7 @@ struct psci_operations {
2094 int (*cpu_off)(struct psci_power_state state);
2095 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2096 int (*migrate)(unsigned long cpuid);
2097-};
2098+} __no_const;
2099
2100 extern struct psci_operations psci_ops;
2101 extern struct smp_operations psci_smp_ops;
2102diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2103index a8cae71c..65dd797 100644
2104--- a/arch/arm/include/asm/smp.h
2105+++ b/arch/arm/include/asm/smp.h
2106@@ -110,7 +110,7 @@ struct smp_operations {
2107 int (*cpu_disable)(unsigned int cpu);
2108 #endif
2109 #endif
2110-};
2111+} __no_const;
2112
2113 /*
2114 * set platform specific SMP operations
2115diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2116index df5e13d..97efb82 100644
2117--- a/arch/arm/include/asm/thread_info.h
2118+++ b/arch/arm/include/asm/thread_info.h
2119@@ -88,9 +88,9 @@ struct thread_info {
2120 .flags = 0, \
2121 .preempt_count = INIT_PREEMPT_COUNT, \
2122 .addr_limit = KERNEL_DS, \
2123- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2124- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2125- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2126+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2127+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2128+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2129 .restart_block = { \
2130 .fn = do_no_restart_syscall, \
2131 }, \
2132@@ -163,7 +163,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2133 #define TIF_SYSCALL_AUDIT 9
2134 #define TIF_SYSCALL_TRACEPOINT 10
2135 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2136-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2137+/* within 8 bits of TIF_SYSCALL_TRACE
2138+ * to meet flexible second operand requirements
2139+ */
2140+#define TIF_GRSEC_SETXID 12
2141+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2142 #define TIF_USING_IWMMXT 17
2143 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2144 #define TIF_RESTORE_SIGMASK 20
2145@@ -176,10 +180,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2146 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2147 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2148 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2149+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2150
2151 /* Checks for any syscall work in entry-common.S */
2152 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2153- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2154+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2155
2156 /*
2157 * Change these and you break ASM code in entry-common.S
2158diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2159index 72abdc5..9eba222 100644
2160--- a/arch/arm/include/asm/uaccess.h
2161+++ b/arch/arm/include/asm/uaccess.h
2162@@ -18,6 +18,7 @@
2163 #include <asm/domain.h>
2164 #include <asm/unified.h>
2165 #include <asm/compiler.h>
2166+#include <asm/pgtable.h>
2167
2168 #if __LINUX_ARM_ARCH__ < 6
2169 #include <asm-generic/uaccess-unaligned.h>
2170@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2171 static inline void set_fs(mm_segment_t fs)
2172 {
2173 current_thread_info()->addr_limit = fs;
2174- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2175+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2176 }
2177
2178 #define segment_eq(a,b) ((a) == (b))
2179
2180+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2181+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2182+
2183+static inline void pax_open_userland(void)
2184+{
2185+
2186+#ifdef CONFIG_PAX_MEMORY_UDEREF
2187+ if (segment_eq(get_fs(), USER_DS)) {
2188+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2189+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2190+ }
2191+#endif
2192+
2193+}
2194+
2195+static inline void pax_close_userland(void)
2196+{
2197+
2198+#ifdef CONFIG_PAX_MEMORY_UDEREF
2199+ if (segment_eq(get_fs(), USER_DS)) {
2200+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2201+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2202+ }
2203+#endif
2204+
2205+}
2206+
2207 #define __addr_ok(addr) ({ \
2208 unsigned long flag; \
2209 __asm__("cmp %2, %0; movlo %0, #0" \
2210@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2211
2212 #define get_user(x,p) \
2213 ({ \
2214+ int __e; \
2215 might_fault(); \
2216- __get_user_check(x,p); \
2217+ pax_open_userland(); \
2218+ __e = __get_user_check(x,p); \
2219+ pax_close_userland(); \
2220+ __e; \
2221 })
2222
2223 extern int __put_user_1(void *, unsigned int);
2224@@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2225
2226 #define put_user(x,p) \
2227 ({ \
2228+ int __e; \
2229 might_fault(); \
2230- __put_user_check(x,p); \
2231+ pax_open_userland(); \
2232+ __e = __put_user_check(x,p); \
2233+ pax_close_userland(); \
2234+ __e; \
2235 })
2236
2237 #else /* CONFIG_MMU */
2238@@ -237,13 +273,17 @@ static inline void set_fs(mm_segment_t fs)
2239 #define __get_user(x,ptr) \
2240 ({ \
2241 long __gu_err = 0; \
2242+ pax_open_userland(); \
2243 __get_user_err((x),(ptr),__gu_err); \
2244+ pax_close_userland(); \
2245 __gu_err; \
2246 })
2247
2248 #define __get_user_error(x,ptr,err) \
2249 ({ \
2250+ pax_open_userland(); \
2251 __get_user_err((x),(ptr),err); \
2252+ pax_close_userland(); \
2253 (void) 0; \
2254 })
2255
2256@@ -319,13 +359,17 @@ do { \
2257 #define __put_user(x,ptr) \
2258 ({ \
2259 long __pu_err = 0; \
2260+ pax_open_userland(); \
2261 __put_user_err((x),(ptr),__pu_err); \
2262+ pax_close_userland(); \
2263 __pu_err; \
2264 })
2265
2266 #define __put_user_error(x,ptr,err) \
2267 ({ \
2268+ pax_open_userland(); \
2269 __put_user_err((x),(ptr),err); \
2270+ pax_close_userland(); \
2271 (void) 0; \
2272 })
2273
2274@@ -425,11 +469,44 @@ do { \
2275
2276
2277 #ifdef CONFIG_MMU
2278-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2279-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2280+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2281+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2282+
2283+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2284+{
2285+ unsigned long ret;
2286+
2287+ check_object_size(to, n, false);
2288+ pax_open_userland();
2289+ ret = ___copy_from_user(to, from, n);
2290+ pax_close_userland();
2291+ return ret;
2292+}
2293+
2294+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2295+{
2296+ unsigned long ret;
2297+
2298+ check_object_size(from, n, true);
2299+ pax_open_userland();
2300+ ret = ___copy_to_user(to, from, n);
2301+ pax_close_userland();
2302+ return ret;
2303+}
2304+
2305 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2306-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2307+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2308 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2309+
2310+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2311+{
2312+ unsigned long ret;
2313+ pax_open_userland();
2314+ ret = ___clear_user(addr, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319 #else
2320 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2321 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2322@@ -438,6 +515,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2323
2324 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2325 {
2326+ if ((long)n < 0)
2327+ return n;
2328+
2329 if (access_ok(VERIFY_READ, from, n))
2330 n = __copy_from_user(to, from, n);
2331 else /* security hole - plug it */
2332@@ -447,6 +527,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2333
2334 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2335 {
2336+ if ((long)n < 0)
2337+ return n;
2338+
2339 if (access_ok(VERIFY_WRITE, to, n))
2340 n = __copy_to_user(to, from, n);
2341 return n;
2342diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2343index 5af0ed1..cea83883 100644
2344--- a/arch/arm/include/uapi/asm/ptrace.h
2345+++ b/arch/arm/include/uapi/asm/ptrace.h
2346@@ -92,7 +92,7 @@
2347 * ARMv7 groups of PSR bits
2348 */
2349 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2350-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2351+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2352 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2353 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2354
2355diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2356index 60d3b73..e5a0f22 100644
2357--- a/arch/arm/kernel/armksyms.c
2358+++ b/arch/arm/kernel/armksyms.c
2359@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2360
2361 /* networking */
2362 EXPORT_SYMBOL(csum_partial);
2363-EXPORT_SYMBOL(csum_partial_copy_from_user);
2364+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2365 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2366 EXPORT_SYMBOL(__csum_ipv6_magic);
2367
2368@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2369 #ifdef CONFIG_MMU
2370 EXPORT_SYMBOL(copy_page);
2371
2372-EXPORT_SYMBOL(__copy_from_user);
2373-EXPORT_SYMBOL(__copy_to_user);
2374-EXPORT_SYMBOL(__clear_user);
2375+EXPORT_SYMBOL(___copy_from_user);
2376+EXPORT_SYMBOL(___copy_to_user);
2377+EXPORT_SYMBOL(___clear_user);
2378
2379 EXPORT_SYMBOL(__get_user_1);
2380 EXPORT_SYMBOL(__get_user_2);
2381diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2382index ec3e5cf..b450ee3 100644
2383--- a/arch/arm/kernel/entry-armv.S
2384+++ b/arch/arm/kernel/entry-armv.S
2385@@ -47,6 +47,87 @@
2386 9997:
2387 .endm
2388
2389+ .macro pax_enter_kernel
2390+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2391+ @ make aligned space for saved DACR
2392+ sub sp, sp, #8
2393+ @ save regs
2394+ stmdb sp!, {r1, r2}
2395+ @ read DACR from cpu_domain into r1
2396+ mov r2, sp
2397+ @ assume 8K pages, since we have to split the immediate in two
2398+ bic r2, r2, #(0x1fc0)
2399+ bic r2, r2, #(0x3f)
2400+ ldr r1, [r2, #TI_CPU_DOMAIN]
2401+ @ store old DACR on stack
2402+ str r1, [sp, #8]
2403+#ifdef CONFIG_PAX_KERNEXEC
2404+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2405+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2406+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2407+#endif
2408+#ifdef CONFIG_PAX_MEMORY_UDEREF
2409+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2410+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2411+#endif
2412+ @ write r1 to current_thread_info()->cpu_domain
2413+ str r1, [r2, #TI_CPU_DOMAIN]
2414+ @ write r1 to DACR
2415+ mcr p15, 0, r1, c3, c0, 0
2416+ @ instruction sync
2417+ instr_sync
2418+ @ restore regs
2419+ ldmia sp!, {r1, r2}
2420+#endif
2421+ .endm
2422+
2423+ .macro pax_open_userland
2424+#ifdef CONFIG_PAX_MEMORY_UDEREF
2425+ @ save regs
2426+ stmdb sp!, {r0, r1}
2427+ @ read DACR from cpu_domain into r1
2428+ mov r0, sp
2429+ @ assume 8K pages, since we have to split the immediate in two
2430+ bic r0, r0, #(0x1fc0)
2431+ bic r0, r0, #(0x3f)
2432+ ldr r1, [r0, #TI_CPU_DOMAIN]
2433+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2434+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2435+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2436+ @ write r1 to current_thread_info()->cpu_domain
2437+ str r1, [r0, #TI_CPU_DOMAIN]
2438+ @ write r1 to DACR
2439+ mcr p15, 0, r1, c3, c0, 0
2440+ @ instruction sync
2441+ instr_sync
2442+ @ restore regs
2443+ ldmia sp!, {r0, r1}
2444+#endif
2445+ .endm
2446+
2447+ .macro pax_close_userland
2448+#ifdef CONFIG_PAX_MEMORY_UDEREF
2449+ @ save regs
2450+ stmdb sp!, {r0, r1}
2451+ @ read DACR from cpu_domain into r1
2452+ mov r0, sp
2453+ @ assume 8K pages, since we have to split the immediate in two
2454+ bic r0, r0, #(0x1fc0)
2455+ bic r0, r0, #(0x3f)
2456+ ldr r1, [r0, #TI_CPU_DOMAIN]
2457+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2458+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2459+ @ write r1 to current_thread_info()->cpu_domain
2460+ str r1, [r0, #TI_CPU_DOMAIN]
2461+ @ write r1 to DACR
2462+ mcr p15, 0, r1, c3, c0, 0
2463+ @ instruction sync
2464+ instr_sync
2465+ @ restore regs
2466+ ldmia sp!, {r0, r1}
2467+#endif
2468+ .endm
2469+
2470 .macro pabt_helper
2471 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2472 #ifdef MULTI_PABORT
2473@@ -89,11 +170,15 @@
2474 * Invalid mode handlers
2475 */
2476 .macro inv_entry, reason
2477+
2478+ pax_enter_kernel
2479+
2480 sub sp, sp, #S_FRAME_SIZE
2481 ARM( stmib sp, {r1 - lr} )
2482 THUMB( stmia sp, {r0 - r12} )
2483 THUMB( str sp, [sp, #S_SP] )
2484 THUMB( str lr, [sp, #S_LR] )
2485+
2486 mov r1, #\reason
2487 .endm
2488
2489@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2490 .macro svc_entry, stack_hole=0
2491 UNWIND(.fnstart )
2492 UNWIND(.save {r0 - pc} )
2493+
2494+ pax_enter_kernel
2495+
2496 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2497+
2498 #ifdef CONFIG_THUMB2_KERNEL
2499 SPFIX( str r0, [sp] ) @ temporarily saved
2500 SPFIX( mov r0, sp )
2501@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2502 ldmia r0, {r3 - r5}
2503 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2504 mov r6, #-1 @ "" "" "" ""
2505+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2506+ @ offset sp by 8 as done in pax_enter_kernel
2507+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2508+#else
2509 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2510+#endif
2511 SPFIX( addeq r2, r2, #4 )
2512 str r3, [sp, #-4]! @ save the "real" r0 copied
2513 @ from the exception stack
2514@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2515 .macro usr_entry
2516 UNWIND(.fnstart )
2517 UNWIND(.cantunwind ) @ don't unwind the user space
2518+
2519+ pax_enter_kernel_user
2520+
2521 sub sp, sp, #S_FRAME_SIZE
2522 ARM( stmib sp, {r1 - r12} )
2523 THUMB( stmia sp, {r0 - r12} )
2524@@ -416,7 +513,9 @@ __und_usr:
2525 tst r3, #PSR_T_BIT @ Thumb mode?
2526 bne __und_usr_thumb
2527 sub r4, r2, #4 @ ARM instr at LR - 4
2528+ pax_open_userland
2529 1: ldrt r0, [r4]
2530+ pax_close_userland
2531 #ifdef CONFIG_CPU_ENDIAN_BE8
2532 rev r0, r0 @ little endian instruction
2533 #endif
2534@@ -451,10 +550,14 @@ __und_usr_thumb:
2535 */
2536 .arch armv6t2
2537 #endif
2538+ pax_open_userland
2539 2: ldrht r5, [r4]
2540+ pax_close_userland
2541 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2542 blo __und_usr_fault_16 @ 16bit undefined instruction
2543+ pax_open_userland
2544 3: ldrht r0, [r2]
2545+ pax_close_userland
2546 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2547 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2548 orr r0, r0, r5, lsl #16
2549@@ -483,7 +586,8 @@ ENDPROC(__und_usr)
2550 */
2551 .pushsection .fixup, "ax"
2552 .align 2
2553-4: mov pc, r9
2554+4: pax_close_userland
2555+ mov pc, r9
2556 .popsection
2557 .pushsection __ex_table,"a"
2558 .long 1b, 4b
2559@@ -693,7 +797,7 @@ ENTRY(__switch_to)
2560 THUMB( str lr, [ip], #4 )
2561 ldr r4, [r2, #TI_TP_VALUE]
2562 ldr r5, [r2, #TI_TP_VALUE + 4]
2563-#ifdef CONFIG_CPU_USE_DOMAINS
2564+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2565 ldr r6, [r2, #TI_CPU_DOMAIN]
2566 #endif
2567 switch_tls r1, r4, r5, r3, r7
2568@@ -702,7 +806,7 @@ ENTRY(__switch_to)
2569 ldr r8, =__stack_chk_guard
2570 ldr r7, [r7, #TSK_STACK_CANARY]
2571 #endif
2572-#ifdef CONFIG_CPU_USE_DOMAINS
2573+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2574 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2575 #endif
2576 mov r5, r0
2577diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2578index bc6bd96..bd026cb 100644
2579--- a/arch/arm/kernel/entry-common.S
2580+++ b/arch/arm/kernel/entry-common.S
2581@@ -10,18 +10,46 @@
2582
2583 #include <asm/unistd.h>
2584 #include <asm/ftrace.h>
2585+#include <asm/domain.h>
2586 #include <asm/unwind.h>
2587
2588+#include "entry-header.S"
2589+
2590 #ifdef CONFIG_NEED_RET_TO_USER
2591 #include <mach/entry-macro.S>
2592 #else
2593 .macro arch_ret_to_user, tmp1, tmp2
2594+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2595+ @ save regs
2596+ stmdb sp!, {r1, r2}
2597+ @ read DACR from cpu_domain into r1
2598+ mov r2, sp
2599+ @ assume 8K pages, since we have to split the immediate in two
2600+ bic r2, r2, #(0x1fc0)
2601+ bic r2, r2, #(0x3f)
2602+ ldr r1, [r2, #TI_CPU_DOMAIN]
2603+#ifdef CONFIG_PAX_KERNEXEC
2604+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2605+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2606+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2607+#endif
2608+#ifdef CONFIG_PAX_MEMORY_UDEREF
2609+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2610+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2611+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2612+#endif
2613+ @ write r1 to current_thread_info()->cpu_domain
2614+ str r1, [r2, #TI_CPU_DOMAIN]
2615+ @ write r1 to DACR
2616+ mcr p15, 0, r1, c3, c0, 0
2617+ @ instruction sync
2618+ instr_sync
2619+ @ restore regs
2620+ ldmia sp!, {r1, r2}
2621+#endif
2622 .endm
2623 #endif
2624
2625-#include "entry-header.S"
2626-
2627-
2628 .align 5
2629 /*
2630 * This is the fast syscall return path. We do as little as
2631@@ -413,6 +441,12 @@ ENTRY(vector_swi)
2632 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2633 #endif
2634
2635+ /*
2636+ * do this here to avoid a performance hit of wrapping the code above
2637+ * that directly dereferences userland to parse the SWI instruction
2638+ */
2639+ pax_enter_kernel_user
2640+
2641 adr tbl, sys_call_table @ load syscall table pointer
2642
2643 #if defined(CONFIG_OABI_COMPAT)
2644diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2645index 39f89fb..d612bd9 100644
2646--- a/arch/arm/kernel/entry-header.S
2647+++ b/arch/arm/kernel/entry-header.S
2648@@ -184,6 +184,60 @@
2649 msr cpsr_c, \rtemp @ switch back to the SVC mode
2650 .endm
2651
2652+ .macro pax_enter_kernel_user
2653+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2654+ @ save regs
2655+ stmdb sp!, {r0, r1}
2656+ @ read DACR from cpu_domain into r1
2657+ mov r0, sp
2658+ @ assume 8K pages, since we have to split the immediate in two
2659+ bic r0, r0, #(0x1fc0)
2660+ bic r0, r0, #(0x3f)
2661+ ldr r1, [r0, #TI_CPU_DOMAIN]
2662+#ifdef CONFIG_PAX_MEMORY_UDEREF
2663+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2664+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2665+#endif
2666+#ifdef CONFIG_PAX_KERNEXEC
2667+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2668+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2669+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2670+#endif
2671+ @ write r1 to current_thread_info()->cpu_domain
2672+ str r1, [r0, #TI_CPU_DOMAIN]
2673+ @ write r1 to DACR
2674+ mcr p15, 0, r1, c3, c0, 0
2675+ @ instruction sync
2676+ instr_sync
2677+ @ restore regs
2678+ ldmia sp!, {r0, r1}
2679+#endif
2680+ .endm
2681+
2682+ .macro pax_exit_kernel
2683+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2684+ @ save regs
2685+ stmdb sp!, {r0, r1}
2686+ @ read old DACR from stack into r1
2687+ ldr r1, [sp, #(8 + S_SP)]
2688+ sub r1, r1, #8
2689+ ldr r1, [r1]
2690+
2691+ @ write r1 to current_thread_info()->cpu_domain
2692+ mov r0, sp
2693+ @ assume 8K pages, since we have to split the immediate in two
2694+ bic r0, r0, #(0x1fc0)
2695+ bic r0, r0, #(0x3f)
2696+ str r1, [r0, #TI_CPU_DOMAIN]
2697+ @ write r1 to DACR
2698+ mcr p15, 0, r1, c3, c0, 0
2699+ @ instruction sync
2700+ instr_sync
2701+ @ restore regs
2702+ ldmia sp!, {r0, r1}
2703+#endif
2704+ .endm
2705+
2706 #ifndef CONFIG_THUMB2_KERNEL
2707 .macro svc_exit, rpsr, irq = 0
2708 .if \irq != 0
2709@@ -203,6 +257,9 @@
2710 blne trace_hardirqs_off
2711 #endif
2712 .endif
2713+
2714+ pax_exit_kernel
2715+
2716 msr spsr_cxsf, \rpsr
2717 #if defined(CONFIG_CPU_V6)
2718 ldr r0, [sp]
2719@@ -266,6 +323,9 @@
2720 blne trace_hardirqs_off
2721 #endif
2722 .endif
2723+
2724+ pax_exit_kernel
2725+
2726 ldr lr, [sp, #S_SP] @ top of the stack
2727 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2728 clrex @ clear the exclusive monitor
2729diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2730index 918875d..cd5fa27 100644
2731--- a/arch/arm/kernel/fiq.c
2732+++ b/arch/arm/kernel/fiq.c
2733@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2734 void *base = vectors_page;
2735 unsigned offset = FIQ_OFFSET;
2736
2737+ pax_open_kernel();
2738 memcpy(base + offset, start, length);
2739+ pax_close_kernel();
2740+
2741 if (!cache_is_vipt_nonaliasing())
2742 flush_icache_range((unsigned long)base + offset, offset +
2743 length);
2744diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2745index 476de57..4857a76 100644
2746--- a/arch/arm/kernel/head.S
2747+++ b/arch/arm/kernel/head.S
2748@@ -52,7 +52,9 @@
2749 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2750
2751 .macro pgtbl, rd, phys
2752- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2753+ mov \rd, #TEXT_OFFSET
2754+ sub \rd, #PG_DIR_SIZE
2755+ add \rd, \rd, \phys
2756 .endm
2757
2758 /*
2759@@ -432,7 +434,7 @@ __enable_mmu:
2760 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2761 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2762 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2763- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2764+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2765 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2766 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2767 #endif
2768diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2769index 084dc88..fce4e68 100644
2770--- a/arch/arm/kernel/module.c
2771+++ b/arch/arm/kernel/module.c
2772@@ -37,12 +37,39 @@
2773 #endif
2774
2775 #ifdef CONFIG_MMU
2776-void *module_alloc(unsigned long size)
2777+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2778 {
2779+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2780+ return NULL;
2781 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2782- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2783+ GFP_KERNEL, prot, -1,
2784 __builtin_return_address(0));
2785 }
2786+
2787+void *module_alloc(unsigned long size)
2788+{
2789+
2790+#ifdef CONFIG_PAX_KERNEXEC
2791+ return __module_alloc(size, PAGE_KERNEL);
2792+#else
2793+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2794+#endif
2795+
2796+}
2797+
2798+#ifdef CONFIG_PAX_KERNEXEC
2799+void module_free_exec(struct module *mod, void *module_region)
2800+{
2801+ module_free(mod, module_region);
2802+}
2803+EXPORT_SYMBOL(module_free_exec);
2804+
2805+void *module_alloc_exec(unsigned long size)
2806+{
2807+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2808+}
2809+EXPORT_SYMBOL(module_alloc_exec);
2810+#endif
2811 #endif
2812
2813 int
2814diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2815index 07314af..c46655c 100644
2816--- a/arch/arm/kernel/patch.c
2817+++ b/arch/arm/kernel/patch.c
2818@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2819 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2820 int size;
2821
2822+ pax_open_kernel();
2823 if (thumb2 && __opcode_is_thumb16(insn)) {
2824 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2825 size = sizeof(u16);
2826@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2827 *(u32 *)addr = insn;
2828 size = sizeof(u32);
2829 }
2830+ pax_close_kernel();
2831
2832 flush_icache_range((uintptr_t)(addr),
2833 (uintptr_t)(addr) + size);
2834diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2835index 92f7b15..7048500 100644
2836--- a/arch/arm/kernel/process.c
2837+++ b/arch/arm/kernel/process.c
2838@@ -217,6 +217,7 @@ void machine_power_off(void)
2839
2840 if (pm_power_off)
2841 pm_power_off();
2842+ BUG();
2843 }
2844
2845 /*
2846@@ -230,7 +231,7 @@ void machine_power_off(void)
2847 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2848 * to use. Implementing such co-ordination would be essentially impossible.
2849 */
2850-void machine_restart(char *cmd)
2851+__noreturn void machine_restart(char *cmd)
2852 {
2853 local_irq_disable();
2854 smp_send_stop();
2855@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2856
2857 show_regs_print_info(KERN_DEFAULT);
2858
2859- print_symbol("PC is at %s\n", instruction_pointer(regs));
2860- print_symbol("LR is at %s\n", regs->ARM_lr);
2861+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2862+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2863 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2864 "sp : %08lx ip : %08lx fp : %08lx\n",
2865 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2866@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2867 return 0;
2868 }
2869
2870-unsigned long arch_randomize_brk(struct mm_struct *mm)
2871-{
2872- unsigned long range_end = mm->brk + 0x02000000;
2873- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2874-}
2875-
2876 #ifdef CONFIG_MMU
2877 #ifdef CONFIG_KUSER_HELPERS
2878 /*
2879@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2880
2881 static int __init gate_vma_init(void)
2882 {
2883- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2884+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2885 return 0;
2886 }
2887 arch_initcall(gate_vma_init);
2888@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2889
2890 const char *arch_vma_name(struct vm_area_struct *vma)
2891 {
2892- return is_gate_vma(vma) ? "[vectors]" :
2893- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2894- "[sigpage]" : NULL;
2895+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2896 }
2897
2898-static struct page *signal_page;
2899-extern struct page *get_signal_page(void);
2900-
2901 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2902 {
2903 struct mm_struct *mm = current->mm;
2904- unsigned long addr;
2905- int ret;
2906-
2907- if (!signal_page)
2908- signal_page = get_signal_page();
2909- if (!signal_page)
2910- return -ENOMEM;
2911
2912 down_write(&mm->mmap_sem);
2913- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2914- if (IS_ERR_VALUE(addr)) {
2915- ret = addr;
2916- goto up_fail;
2917- }
2918-
2919- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2920- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2921- &signal_page);
2922-
2923- if (ret == 0)
2924- mm->context.sigpage = addr;
2925-
2926- up_fail:
2927+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2928 up_write(&mm->mmap_sem);
2929- return ret;
2930+ return 0;
2931 }
2932 #endif
2933diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2934index 4693188..4596c5e 100644
2935--- a/arch/arm/kernel/psci.c
2936+++ b/arch/arm/kernel/psci.c
2937@@ -24,7 +24,7 @@
2938 #include <asm/opcodes-virt.h>
2939 #include <asm/psci.h>
2940
2941-struct psci_operations psci_ops;
2942+struct psci_operations psci_ops __read_only;
2943
2944 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2945
2946diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2947index 0dd3b79..e018f64 100644
2948--- a/arch/arm/kernel/ptrace.c
2949+++ b/arch/arm/kernel/ptrace.c
2950@@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2951 return current_thread_info()->syscall;
2952 }
2953
2954+#ifdef CONFIG_GRKERNSEC_SETXID
2955+extern void gr_delayed_cred_worker(void);
2956+#endif
2957+
2958 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2959 {
2960 current_thread_info()->syscall = scno;
2961
2962+#ifdef CONFIG_GRKERNSEC_SETXID
2963+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2964+ gr_delayed_cred_worker();
2965+#endif
2966+
2967 /* Do the secure computing check first; failures should be fast. */
2968 if (secure_computing(scno) == -1)
2969 return -1;
2970diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2971index 0e1e2b3..c0e821d 100644
2972--- a/arch/arm/kernel/setup.c
2973+++ b/arch/arm/kernel/setup.c
2974@@ -98,21 +98,23 @@ EXPORT_SYMBOL(system_serial_high);
2975 unsigned int elf_hwcap __read_mostly;
2976 EXPORT_SYMBOL(elf_hwcap);
2977
2978+pteval_t __supported_pte_mask __read_only;
2979+pmdval_t __supported_pmd_mask __read_only;
2980
2981 #ifdef MULTI_CPU
2982-struct processor processor __read_mostly;
2983+struct processor processor;
2984 #endif
2985 #ifdef MULTI_TLB
2986-struct cpu_tlb_fns cpu_tlb __read_mostly;
2987+struct cpu_tlb_fns cpu_tlb __read_only;
2988 #endif
2989 #ifdef MULTI_USER
2990-struct cpu_user_fns cpu_user __read_mostly;
2991+struct cpu_user_fns cpu_user __read_only;
2992 #endif
2993 #ifdef MULTI_CACHE
2994-struct cpu_cache_fns cpu_cache __read_mostly;
2995+struct cpu_cache_fns cpu_cache __read_only;
2996 #endif
2997 #ifdef CONFIG_OUTER_CACHE
2998-struct outer_cache_fns outer_cache __read_mostly;
2999+struct outer_cache_fns outer_cache __read_only;
3000 EXPORT_SYMBOL(outer_cache);
3001 #endif
3002
3003@@ -245,9 +247,13 @@ static int __get_cpu_architecture(void)
3004 asm("mrc p15, 0, %0, c0, c1, 4"
3005 : "=r" (mmfr0));
3006 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3007- (mmfr0 & 0x000000f0) >= 0x00000030)
3008+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3009 cpu_arch = CPU_ARCH_ARMv7;
3010- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3011+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3012+ __supported_pte_mask |= L_PTE_PXN;
3013+ __supported_pmd_mask |= PMD_PXNTABLE;
3014+ }
3015+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3016 (mmfr0 & 0x000000f0) == 0x00000020)
3017 cpu_arch = CPU_ARCH_ARMv6;
3018 else
3019@@ -571,7 +577,7 @@ static void __init setup_processor(void)
3020 __cpu_architecture = __get_cpu_architecture();
3021
3022 #ifdef MULTI_CPU
3023- processor = *list->proc;
3024+ memcpy((void *)&processor, list->proc, sizeof processor);
3025 #endif
3026 #ifdef MULTI_TLB
3027 cpu_tlb = *list->tlb;
3028diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3029index ab33042..11248a8 100644
3030--- a/arch/arm/kernel/signal.c
3031+++ b/arch/arm/kernel/signal.c
3032@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3033 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3034 };
3035
3036-static unsigned long signal_return_offset;
3037-
3038 #ifdef CONFIG_CRUNCH
3039 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3040 {
3041@@ -411,8 +409,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3042 * except when the MPU has protected the vectors
3043 * page from PL0
3044 */
3045- retcode = mm->context.sigpage + signal_return_offset +
3046- (idx << 2) + thumb;
3047+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3048 } else
3049 #endif
3050 {
3051@@ -616,33 +613,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3052 } while (thread_flags & _TIF_WORK_MASK);
3053 return 0;
3054 }
3055-
3056-struct page *get_signal_page(void)
3057-{
3058- unsigned long ptr;
3059- unsigned offset;
3060- struct page *page;
3061- void *addr;
3062-
3063- page = alloc_pages(GFP_KERNEL, 0);
3064-
3065- if (!page)
3066- return NULL;
3067-
3068- addr = page_address(page);
3069-
3070- /* Give the signal return code some randomness */
3071- offset = 0x200 + (get_random_int() & 0x7fc);
3072- signal_return_offset = offset;
3073-
3074- /*
3075- * Copy signal return handlers into the vector page, and
3076- * set sigreturn to be a pointer to these.
3077- */
3078- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3079-
3080- ptr = (unsigned long)addr + offset;
3081- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3082-
3083- return page;
3084-}
3085diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3086index 72024ea..ae302dd 100644
3087--- a/arch/arm/kernel/smp.c
3088+++ b/arch/arm/kernel/smp.c
3089@@ -70,7 +70,7 @@ enum ipi_msg_type {
3090
3091 static DECLARE_COMPLETION(cpu_running);
3092
3093-static struct smp_operations smp_ops;
3094+static struct smp_operations smp_ops __read_only;
3095
3096 void __init smp_set_ops(struct smp_operations *ops)
3097 {
3098diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3099index 65ed63f..430c478 100644
3100--- a/arch/arm/kernel/traps.c
3101+++ b/arch/arm/kernel/traps.c
3102@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3103 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3104 {
3105 #ifdef CONFIG_KALLSYMS
3106- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3107+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3108 #else
3109 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3110 #endif
3111@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3112 static int die_owner = -1;
3113 static unsigned int die_nest_count;
3114
3115+extern void gr_handle_kernel_exploit(void);
3116+
3117 static unsigned long oops_begin(void)
3118 {
3119 int cpu;
3120@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3121 panic("Fatal exception in interrupt");
3122 if (panic_on_oops)
3123 panic("Fatal exception");
3124+
3125+ gr_handle_kernel_exploit();
3126+
3127 if (signr)
3128 do_exit(signr);
3129 }
3130@@ -629,7 +634,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3131 * The user helper at 0xffff0fe0 must be used instead.
3132 * (see entry-armv.S for details)
3133 */
3134+ pax_open_kernel();
3135 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3136+ pax_close_kernel();
3137 }
3138 return 0;
3139
3140@@ -886,7 +893,11 @@ void __init early_trap_init(void *vectors_base)
3141 kuser_init(vectors_base);
3142
3143 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3144- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3145+
3146+#ifndef CONFIG_PAX_MEMORY_UDEREF
3147+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3148+#endif
3149+
3150 #else /* ifndef CONFIG_CPU_V7M */
3151 /*
3152 * on V7-M there is no need to copy the vector table to a dedicated
3153diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3154index 7bcee5c..e2f3249 100644
3155--- a/arch/arm/kernel/vmlinux.lds.S
3156+++ b/arch/arm/kernel/vmlinux.lds.S
3157@@ -8,7 +8,11 @@
3158 #include <asm/thread_info.h>
3159 #include <asm/memory.h>
3160 #include <asm/page.h>
3161-
3162+
3163+#ifdef CONFIG_PAX_KERNEXEC
3164+#include <asm/pgtable.h>
3165+#endif
3166+
3167 #define PROC_INFO \
3168 . = ALIGN(4); \
3169 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3170@@ -34,7 +38,7 @@
3171 #endif
3172
3173 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3174- defined(CONFIG_GENERIC_BUG)
3175+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3176 #define ARM_EXIT_KEEP(x) x
3177 #define ARM_EXIT_DISCARD(x)
3178 #else
3179@@ -90,6 +94,11 @@ SECTIONS
3180 _text = .;
3181 HEAD_TEXT
3182 }
3183+
3184+#ifdef CONFIG_PAX_KERNEXEC
3185+ . = ALIGN(1<<SECTION_SHIFT);
3186+#endif
3187+
3188 .text : { /* Real text segment */
3189 _stext = .; /* Text and read-only data */
3190 __exception_text_start = .;
3191@@ -112,6 +121,8 @@ SECTIONS
3192 ARM_CPU_KEEP(PROC_INFO)
3193 }
3194
3195+ _etext = .; /* End of text section */
3196+
3197 RO_DATA(PAGE_SIZE)
3198
3199 . = ALIGN(4);
3200@@ -142,7 +153,9 @@ SECTIONS
3201
3202 NOTES
3203
3204- _etext = .; /* End of text and rodata section */
3205+#ifdef CONFIG_PAX_KERNEXEC
3206+ . = ALIGN(1<<SECTION_SHIFT);
3207+#endif
3208
3209 #ifndef CONFIG_XIP_KERNEL
3210 . = ALIGN(PAGE_SIZE);
3211@@ -220,6 +233,11 @@ SECTIONS
3212 . = PAGE_OFFSET + TEXT_OFFSET;
3213 #else
3214 __init_end = .;
3215+
3216+#ifdef CONFIG_PAX_KERNEXEC
3217+ . = ALIGN(1<<SECTION_SHIFT);
3218+#endif
3219+
3220 . = ALIGN(THREAD_SIZE);
3221 __data_loc = .;
3222 #endif
3223diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3224index 9c697db..115237f 100644
3225--- a/arch/arm/kvm/arm.c
3226+++ b/arch/arm/kvm/arm.c
3227@@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
3228 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3229
3230 /* The VMID used in the VTTBR */
3231-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3232+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3233 static u8 kvm_next_vmid;
3234 static DEFINE_SPINLOCK(kvm_vmid_lock);
3235
3236@@ -396,7 +396,7 @@ void force_vm_exit(const cpumask_t *mask)
3237 */
3238 static bool need_new_vmid_gen(struct kvm *kvm)
3239 {
3240- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3241+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3242 }
3243
3244 /**
3245@@ -429,7 +429,7 @@ static void update_vttbr(struct kvm *kvm)
3246
3247 /* First user of a new VMID generation? */
3248 if (unlikely(kvm_next_vmid == 0)) {
3249- atomic64_inc(&kvm_vmid_gen);
3250+ atomic64_inc_unchecked(&kvm_vmid_gen);
3251 kvm_next_vmid = 1;
3252
3253 /*
3254@@ -446,7 +446,7 @@ static void update_vttbr(struct kvm *kvm)
3255 kvm_call_hyp(__kvm_flush_vm_context);
3256 }
3257
3258- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3259+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3260 kvm->arch.vmid = kvm_next_vmid;
3261 kvm_next_vmid++;
3262
3263diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3264index 14a0d98..7771a7d 100644
3265--- a/arch/arm/lib/clear_user.S
3266+++ b/arch/arm/lib/clear_user.S
3267@@ -12,14 +12,14 @@
3268
3269 .text
3270
3271-/* Prototype: int __clear_user(void *addr, size_t sz)
3272+/* Prototype: int ___clear_user(void *addr, size_t sz)
3273 * Purpose : clear some user memory
3274 * Params : addr - user memory address to clear
3275 * : sz - number of bytes to clear
3276 * Returns : number of bytes NOT cleared
3277 */
3278 ENTRY(__clear_user_std)
3279-WEAK(__clear_user)
3280+WEAK(___clear_user)
3281 stmfd sp!, {r1, lr}
3282 mov r2, #0
3283 cmp r1, #4
3284@@ -44,7 +44,7 @@ WEAK(__clear_user)
3285 USER( strnebt r2, [r0])
3286 mov r0, #0
3287 ldmfd sp!, {r1, pc}
3288-ENDPROC(__clear_user)
3289+ENDPROC(___clear_user)
3290 ENDPROC(__clear_user_std)
3291
3292 .pushsection .fixup,"ax"
3293diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3294index 66a477a..bee61d3 100644
3295--- a/arch/arm/lib/copy_from_user.S
3296+++ b/arch/arm/lib/copy_from_user.S
3297@@ -16,7 +16,7 @@
3298 /*
3299 * Prototype:
3300 *
3301- * size_t __copy_from_user(void *to, const void *from, size_t n)
3302+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3303 *
3304 * Purpose:
3305 *
3306@@ -84,11 +84,11 @@
3307
3308 .text
3309
3310-ENTRY(__copy_from_user)
3311+ENTRY(___copy_from_user)
3312
3313 #include "copy_template.S"
3314
3315-ENDPROC(__copy_from_user)
3316+ENDPROC(___copy_from_user)
3317
3318 .pushsection .fixup,"ax"
3319 .align 0
3320diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3321index 6ee2f67..d1cce76 100644
3322--- a/arch/arm/lib/copy_page.S
3323+++ b/arch/arm/lib/copy_page.S
3324@@ -10,6 +10,7 @@
3325 * ASM optimised string functions
3326 */
3327 #include <linux/linkage.h>
3328+#include <linux/const.h>
3329 #include <asm/assembler.h>
3330 #include <asm/asm-offsets.h>
3331 #include <asm/cache.h>
3332diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3333index d066df6..df28194 100644
3334--- a/arch/arm/lib/copy_to_user.S
3335+++ b/arch/arm/lib/copy_to_user.S
3336@@ -16,7 +16,7 @@
3337 /*
3338 * Prototype:
3339 *
3340- * size_t __copy_to_user(void *to, const void *from, size_t n)
3341+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3342 *
3343 * Purpose:
3344 *
3345@@ -88,11 +88,11 @@
3346 .text
3347
3348 ENTRY(__copy_to_user_std)
3349-WEAK(__copy_to_user)
3350+WEAK(___copy_to_user)
3351
3352 #include "copy_template.S"
3353
3354-ENDPROC(__copy_to_user)
3355+ENDPROC(___copy_to_user)
3356 ENDPROC(__copy_to_user_std)
3357
3358 .pushsection .fixup,"ax"
3359diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3360index 7d08b43..f7ca7ea 100644
3361--- a/arch/arm/lib/csumpartialcopyuser.S
3362+++ b/arch/arm/lib/csumpartialcopyuser.S
3363@@ -57,8 +57,8 @@
3364 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3365 */
3366
3367-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3368-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3369+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3370+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3371
3372 #include "csumpartialcopygeneric.S"
3373
3374diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3375index 5306de3..aed6d03 100644
3376--- a/arch/arm/lib/delay.c
3377+++ b/arch/arm/lib/delay.c
3378@@ -28,7 +28,7 @@
3379 /*
3380 * Default to the loop-based delay implementation.
3381 */
3382-struct arm_delay_ops arm_delay_ops = {
3383+struct arm_delay_ops arm_delay_ops __read_only = {
3384 .delay = __loop_delay,
3385 .const_udelay = __loop_const_udelay,
3386 .udelay = __loop_udelay,
3387diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3388index 025f742..a9e5b3b 100644
3389--- a/arch/arm/lib/uaccess_with_memcpy.c
3390+++ b/arch/arm/lib/uaccess_with_memcpy.c
3391@@ -104,7 +104,7 @@ out:
3392 }
3393
3394 unsigned long
3395-__copy_to_user(void __user *to, const void *from, unsigned long n)
3396+___copy_to_user(void __user *to, const void *from, unsigned long n)
3397 {
3398 /*
3399 * This test is stubbed out of the main function above to keep
3400@@ -155,7 +155,7 @@ out:
3401 return n;
3402 }
3403
3404-unsigned long __clear_user(void __user *addr, unsigned long n)
3405+unsigned long ___clear_user(void __user *addr, unsigned long n)
3406 {
3407 /* See rational for this in __copy_to_user() above. */
3408 if (n < 64)
3409diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3410index 1767611..d2e7e24 100644
3411--- a/arch/arm/mach-kirkwood/common.c
3412+++ b/arch/arm/mach-kirkwood/common.c
3413@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3414 clk_gate_ops.disable(hw);
3415 }
3416
3417-static struct clk_ops clk_gate_fn_ops;
3418+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3419+{
3420+ return clk_gate_ops.is_enabled(hw);
3421+}
3422+
3423+static struct clk_ops clk_gate_fn_ops = {
3424+ .enable = clk_gate_fn_enable,
3425+ .disable = clk_gate_fn_disable,
3426+ .is_enabled = clk_gate_fn_is_enabled,
3427+};
3428
3429 static struct clk __init *clk_register_gate_fn(struct device *dev,
3430 const char *name,
3431@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3432 gate_fn->fn_en = fn_en;
3433 gate_fn->fn_dis = fn_dis;
3434
3435- /* ops is the gate ops, but with our enable/disable functions */
3436- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3437- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3438- clk_gate_fn_ops = clk_gate_ops;
3439- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3440- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3441- }
3442-
3443 clk = clk_register(dev, &gate_fn->gate.hw);
3444
3445 if (IS_ERR(clk))
3446diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3447index 827d1500..2885dc6 100644
3448--- a/arch/arm/mach-omap2/board-n8x0.c
3449+++ b/arch/arm/mach-omap2/board-n8x0.c
3450@@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3451 }
3452 #endif
3453
3454-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3455+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3456 .late_init = n8x0_menelaus_late_init,
3457 };
3458
3459diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3460index 579697a..1d5a3b2 100644
3461--- a/arch/arm/mach-omap2/gpmc.c
3462+++ b/arch/arm/mach-omap2/gpmc.c
3463@@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3464 };
3465
3466 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3467-static struct irq_chip gpmc_irq_chip;
3468 static int gpmc_irq_start;
3469
3470 static struct resource gpmc_mem_root;
3471@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3472
3473 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3474
3475+static struct irq_chip gpmc_irq_chip = {
3476+ .name = "gpmc",
3477+ .irq_startup = gpmc_irq_noop_ret,
3478+ .irq_enable = gpmc_irq_enable,
3479+ .irq_disable = gpmc_irq_disable,
3480+ .irq_shutdown = gpmc_irq_noop,
3481+ .irq_ack = gpmc_irq_noop,
3482+ .irq_mask = gpmc_irq_noop,
3483+ .irq_unmask = gpmc_irq_noop,
3484+
3485+};
3486+
3487 static int gpmc_setup_irq(void)
3488 {
3489 int i;
3490@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3491 return gpmc_irq_start;
3492 }
3493
3494- gpmc_irq_chip.name = "gpmc";
3495- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3496- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3497- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3498- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3499- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3500- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3501- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3502-
3503 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3504 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3505
3506diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3507index f991016..145ebeb 100644
3508--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3509+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3510@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3511 int (*finish_suspend)(unsigned long cpu_state);
3512 void (*resume)(void);
3513 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3514-};
3515+} __no_const;
3516
3517 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3518 static struct powerdomain *mpuss_pd;
3519@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3520 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3521 {}
3522
3523-struct cpu_pm_ops omap_pm_ops = {
3524+static struct cpu_pm_ops omap_pm_ops __read_only = {
3525 .finish_suspend = default_finish_suspend,
3526 .resume = dummy_cpu_resume,
3527 .scu_prepare = dummy_scu_prepare,
3528diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3529index 813c615..ce467c6 100644
3530--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3531+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3532@@ -339,7 +339,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3533 return NOTIFY_OK;
3534 }
3535
3536-static struct notifier_block __refdata irq_hotplug_notifier = {
3537+static struct notifier_block irq_hotplug_notifier = {
3538 .notifier_call = irq_cpu_hotplug_notify,
3539 };
3540
3541diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3542index 53f0735..5b54eb6 100644
3543--- a/arch/arm/mach-omap2/omap_device.c
3544+++ b/arch/arm/mach-omap2/omap_device.c
3545@@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
3546 struct platform_device __init *omap_device_build(const char *pdev_name,
3547 int pdev_id,
3548 struct omap_hwmod *oh,
3549- void *pdata, int pdata_len)
3550+ const void *pdata, int pdata_len)
3551 {
3552 struct omap_hwmod *ohs[] = { oh };
3553
3554@@ -532,7 +532,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3555 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3556 int pdev_id,
3557 struct omap_hwmod **ohs,
3558- int oh_cnt, void *pdata,
3559+ int oh_cnt, const void *pdata,
3560 int pdata_len)
3561 {
3562 int ret = -ENOMEM;
3563diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3564index 17ca1ae..beba869 100644
3565--- a/arch/arm/mach-omap2/omap_device.h
3566+++ b/arch/arm/mach-omap2/omap_device.h
3567@@ -71,12 +71,12 @@ int omap_device_idle(struct platform_device *pdev);
3568 /* Core code interface */
3569
3570 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3571- struct omap_hwmod *oh, void *pdata,
3572+ struct omap_hwmod *oh, const void *pdata,
3573 int pdata_len);
3574
3575 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3576 struct omap_hwmod **oh, int oh_cnt,
3577- void *pdata, int pdata_len);
3578+ const void *pdata, int pdata_len);
3579
3580 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3581 struct omap_hwmod **ohs, int oh_cnt);
3582diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3583index 832adb1..49b62c4 100644
3584--- a/arch/arm/mach-omap2/omap_hwmod.c
3585+++ b/arch/arm/mach-omap2/omap_hwmod.c
3586@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3587 int (*init_clkdm)(struct omap_hwmod *oh);
3588 void (*update_context_lost)(struct omap_hwmod *oh);
3589 int (*get_context_lost)(struct omap_hwmod *oh);
3590-};
3591+} __no_const;
3592
3593 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3594-static struct omap_hwmod_soc_ops soc_ops;
3595+static struct omap_hwmod_soc_ops soc_ops __read_only;
3596
3597 /* omap_hwmod_list contains all registered struct omap_hwmods */
3598 static LIST_HEAD(omap_hwmod_list);
3599diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3600index d15c7bb..b2d1f0c 100644
3601--- a/arch/arm/mach-omap2/wd_timer.c
3602+++ b/arch/arm/mach-omap2/wd_timer.c
3603@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3604 struct omap_hwmod *oh;
3605 char *oh_name = "wd_timer2";
3606 char *dev_name = "omap_wdt";
3607- struct omap_wd_timer_platform_data pdata;
3608+ static struct omap_wd_timer_platform_data pdata = {
3609+ .read_reset_sources = prm_read_reset_sources
3610+ };
3611
3612 if (!cpu_class_is_omap2() || of_have_populated_dt())
3613 return 0;
3614@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3615 return -EINVAL;
3616 }
3617
3618- pdata.read_reset_sources = prm_read_reset_sources;
3619-
3620 pdev = omap_device_build(dev_name, id, oh, &pdata,
3621 sizeof(struct omap_wd_timer_platform_data));
3622 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3623diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3624index b82dcae..44ee5b6 100644
3625--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3626+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3627@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3628 bool entered_lp2 = false;
3629
3630 if (tegra_pending_sgi())
3631- ACCESS_ONCE(abort_flag) = true;
3632+ ACCESS_ONCE_RW(abort_flag) = true;
3633
3634 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3635
3636diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3637index 656324a..0beba28 100644
3638--- a/arch/arm/mach-ux500/setup.h
3639+++ b/arch/arm/mach-ux500/setup.h
3640@@ -40,13 +40,6 @@ extern void ux500_timer_init(void);
3641 .type = MT_DEVICE, \
3642 }
3643
3644-#define __MEM_DEV_DESC(x, sz) { \
3645- .virtual = IO_ADDRESS(x), \
3646- .pfn = __phys_to_pfn(x), \
3647- .length = sz, \
3648- .type = MT_MEMORY, \
3649-}
3650-
3651 extern struct smp_operations ux500_smp_ops;
3652 extern void ux500_cpu_die(unsigned int cpu);
3653
3654diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3655index cd2c88e..4dd9b67 100644
3656--- a/arch/arm/mm/Kconfig
3657+++ b/arch/arm/mm/Kconfig
3658@@ -446,7 +446,7 @@ config CPU_32v5
3659
3660 config CPU_32v6
3661 bool
3662- select CPU_USE_DOMAINS if CPU_V6 && MMU
3663+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3664 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3665
3666 config CPU_32v6K
3667@@ -601,6 +601,7 @@ config CPU_CP15_MPU
3668
3669 config CPU_USE_DOMAINS
3670 bool
3671+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3672 help
3673 This option enables or disables the use of domain switching
3674 via the set_fs() function.
3675@@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
3676 config KUSER_HELPERS
3677 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3678 default y
3679+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3680 help
3681 Warning: disabling this option may break user programs.
3682
3683@@ -812,7 +814,7 @@ config KUSER_HELPERS
3684 See Documentation/arm/kernel_user_helpers.txt for details.
3685
3686 However, the fixed address nature of these helpers can be used
3687- by ROP (return orientated programming) authors when creating
3688+ by ROP (Return Oriented Programming) authors when creating
3689 exploits.
3690
3691 If all of the binaries and libraries which run on your platform
3692diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3693index 6f4585b..7b6f52b 100644
3694--- a/arch/arm/mm/alignment.c
3695+++ b/arch/arm/mm/alignment.c
3696@@ -211,10 +211,12 @@ union offset_union {
3697 #define __get16_unaligned_check(ins,val,addr) \
3698 do { \
3699 unsigned int err = 0, v, a = addr; \
3700+ pax_open_userland(); \
3701 __get8_unaligned_check(ins,v,a,err); \
3702 val = v << ((BE) ? 8 : 0); \
3703 __get8_unaligned_check(ins,v,a,err); \
3704 val |= v << ((BE) ? 0 : 8); \
3705+ pax_close_userland(); \
3706 if (err) \
3707 goto fault; \
3708 } while (0)
3709@@ -228,6 +230,7 @@ union offset_union {
3710 #define __get32_unaligned_check(ins,val,addr) \
3711 do { \
3712 unsigned int err = 0, v, a = addr; \
3713+ pax_open_userland(); \
3714 __get8_unaligned_check(ins,v,a,err); \
3715 val = v << ((BE) ? 24 : 0); \
3716 __get8_unaligned_check(ins,v,a,err); \
3717@@ -236,6 +239,7 @@ union offset_union {
3718 val |= v << ((BE) ? 8 : 16); \
3719 __get8_unaligned_check(ins,v,a,err); \
3720 val |= v << ((BE) ? 0 : 24); \
3721+ pax_close_userland(); \
3722 if (err) \
3723 goto fault; \
3724 } while (0)
3725@@ -249,6 +253,7 @@ union offset_union {
3726 #define __put16_unaligned_check(ins,val,addr) \
3727 do { \
3728 unsigned int err = 0, v = val, a = addr; \
3729+ pax_open_userland(); \
3730 __asm__( FIRST_BYTE_16 \
3731 ARM( "1: "ins" %1, [%2], #1\n" ) \
3732 THUMB( "1: "ins" %1, [%2]\n" ) \
3733@@ -268,6 +273,7 @@ union offset_union {
3734 " .popsection\n" \
3735 : "=r" (err), "=&r" (v), "=&r" (a) \
3736 : "0" (err), "1" (v), "2" (a)); \
3737+ pax_close_userland(); \
3738 if (err) \
3739 goto fault; \
3740 } while (0)
3741@@ -281,6 +287,7 @@ union offset_union {
3742 #define __put32_unaligned_check(ins,val,addr) \
3743 do { \
3744 unsigned int err = 0, v = val, a = addr; \
3745+ pax_open_userland(); \
3746 __asm__( FIRST_BYTE_32 \
3747 ARM( "1: "ins" %1, [%2], #1\n" ) \
3748 THUMB( "1: "ins" %1, [%2]\n" ) \
3749@@ -310,6 +317,7 @@ union offset_union {
3750 " .popsection\n" \
3751 : "=r" (err), "=&r" (v), "=&r" (a) \
3752 : "0" (err), "1" (v), "2" (a)); \
3753+ pax_close_userland(); \
3754 if (err) \
3755 goto fault; \
3756 } while (0)
3757diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3758index 447da6f..77a5057 100644
3759--- a/arch/arm/mm/cache-l2x0.c
3760+++ b/arch/arm/mm/cache-l2x0.c
3761@@ -45,7 +45,7 @@ struct l2x0_of_data {
3762 void (*setup)(const struct device_node *, u32 *, u32 *);
3763 void (*save)(void);
3764 struct outer_cache_fns outer_cache;
3765-};
3766+} __do_const;
3767
3768 static bool of_init = false;
3769
3770diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3771index 84e6f77..0b52f31 100644
3772--- a/arch/arm/mm/context.c
3773+++ b/arch/arm/mm/context.c
3774@@ -43,7 +43,7 @@
3775 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3776
3777 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3778-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3779+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3780 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3781
3782 static DEFINE_PER_CPU(atomic64_t, active_asids);
3783@@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
3784 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3785 {
3786 u64 asid = atomic64_read(&mm->context.id);
3787- u64 generation = atomic64_read(&asid_generation);
3788+ u64 generation = atomic64_read_unchecked(&asid_generation);
3789
3790 if (asid != 0 && is_reserved_asid(asid)) {
3791 /*
3792@@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3793 */
3794 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3795 if (asid == NUM_USER_ASIDS) {
3796- generation = atomic64_add_return(ASID_FIRST_VERSION,
3797+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3798 &asid_generation);
3799 flush_context(cpu);
3800 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3801@@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3802 cpu_set_reserved_ttbr0();
3803
3804 asid = atomic64_read(&mm->context.id);
3805- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3806+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3807 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3808 goto switch_mm_fastpath;
3809
3810 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3811 /* Check that our ASID belongs to the current generation. */
3812 asid = atomic64_read(&mm->context.id);
3813- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3814+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3815 asid = new_context(mm, cpu);
3816 atomic64_set(&mm->context.id, asid);
3817 }
3818diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3819index eb8830a..5360ce7 100644
3820--- a/arch/arm/mm/fault.c
3821+++ b/arch/arm/mm/fault.c
3822@@ -25,6 +25,7 @@
3823 #include <asm/system_misc.h>
3824 #include <asm/system_info.h>
3825 #include <asm/tlbflush.h>
3826+#include <asm/sections.h>
3827
3828 #include "fault.h"
3829
3830@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3831 if (fixup_exception(regs))
3832 return;
3833
3834+#ifdef CONFIG_PAX_MEMORY_UDEREF
3835+ if (addr < TASK_SIZE) {
3836+ if (current->signal->curr_ip)
3837+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3838+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3839+ else
3840+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3841+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3842+ }
3843+#endif
3844+
3845+#ifdef CONFIG_PAX_KERNEXEC
3846+ if ((fsr & FSR_WRITE) &&
3847+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3848+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3849+ {
3850+ if (current->signal->curr_ip)
3851+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3852+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3853+ else
3854+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3855+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3856+ }
3857+#endif
3858+
3859 /*
3860 * No handler, we'll have to terminate things with extreme prejudice.
3861 */
3862@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3863 }
3864 #endif
3865
3866+#ifdef CONFIG_PAX_PAGEEXEC
3867+ if (fsr & FSR_LNX_PF) {
3868+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3869+ do_group_exit(SIGKILL);
3870+ }
3871+#endif
3872+
3873 tsk->thread.address = addr;
3874 tsk->thread.error_code = fsr;
3875 tsk->thread.trap_no = 14;
3876@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3877 }
3878 #endif /* CONFIG_MMU */
3879
3880+#ifdef CONFIG_PAX_PAGEEXEC
3881+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3882+{
3883+ long i;
3884+
3885+ printk(KERN_ERR "PAX: bytes at PC: ");
3886+ for (i = 0; i < 20; i++) {
3887+ unsigned char c;
3888+ if (get_user(c, (__force unsigned char __user *)pc+i))
3889+ printk(KERN_CONT "?? ");
3890+ else
3891+ printk(KERN_CONT "%02x ", c);
3892+ }
3893+ printk("\n");
3894+
3895+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3896+ for (i = -1; i < 20; i++) {
3897+ unsigned long c;
3898+ if (get_user(c, (__force unsigned long __user *)sp+i))
3899+ printk(KERN_CONT "???????? ");
3900+ else
3901+ printk(KERN_CONT "%08lx ", c);
3902+ }
3903+ printk("\n");
3904+}
3905+#endif
3906+
3907 /*
3908 * First Level Translation Fault Handler
3909 *
3910@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3911 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3912 struct siginfo info;
3913
3914+#ifdef CONFIG_PAX_MEMORY_UDEREF
3915+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3916+ if (current->signal->curr_ip)
3917+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3918+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3919+ else
3920+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3921+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3922+ goto die;
3923+ }
3924+#endif
3925+
3926 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3927 return;
3928
3929+die:
3930 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3931 inf->name, fsr, addr);
3932
3933@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3934 ifsr_info[nr].name = name;
3935 }
3936
3937+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3938+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3939+
3940 asmlinkage void __exception
3941 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3942 {
3943 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3944 struct siginfo info;
3945+ unsigned long pc = instruction_pointer(regs);
3946+
3947+ if (user_mode(regs)) {
3948+ unsigned long sigpage = current->mm->context.sigpage;
3949+
3950+ if (sigpage <= pc && pc < sigpage + 7*4) {
3951+ if (pc < sigpage + 3*4)
3952+ sys_sigreturn(regs);
3953+ else
3954+ sys_rt_sigreturn(regs);
3955+ return;
3956+ }
3957+ if (pc == 0xffff0f60UL) {
3958+ /*
3959+ * PaX: __kuser_cmpxchg64 emulation
3960+ */
3961+ // TODO
3962+ //regs->ARM_pc = regs->ARM_lr;
3963+ //return;
3964+ }
3965+ if (pc == 0xffff0fa0UL) {
3966+ /*
3967+ * PaX: __kuser_memory_barrier emulation
3968+ */
3969+ // dmb(); implied by the exception
3970+ regs->ARM_pc = regs->ARM_lr;
3971+ return;
3972+ }
3973+ if (pc == 0xffff0fc0UL) {
3974+ /*
3975+ * PaX: __kuser_cmpxchg emulation
3976+ */
3977+ // TODO
3978+ //long new;
3979+ //int op;
3980+
3981+ //op = FUTEX_OP_SET << 28;
3982+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
3983+ //regs->ARM_r0 = old != new;
3984+ //regs->ARM_pc = regs->ARM_lr;
3985+ //return;
3986+ }
3987+ if (pc == 0xffff0fe0UL) {
3988+ /*
3989+ * PaX: __kuser_get_tls emulation
3990+ */
3991+ regs->ARM_r0 = current_thread_info()->tp_value[0];
3992+ regs->ARM_pc = regs->ARM_lr;
3993+ return;
3994+ }
3995+ }
3996+
3997+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3998+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3999+ if (current->signal->curr_ip)
4000+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4001+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4002+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4003+ else
4004+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4005+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4006+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4007+ goto die;
4008+ }
4009+#endif
4010+
4011+#ifdef CONFIG_PAX_REFCOUNT
4012+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4013+ unsigned int bkpt;
4014+
4015+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4016+ current->thread.error_code = ifsr;
4017+ current->thread.trap_no = 0;
4018+ pax_report_refcount_overflow(regs);
4019+ fixup_exception(regs);
4020+ return;
4021+ }
4022+ }
4023+#endif
4024
4025 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4026 return;
4027
4028+die:
4029 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4030 inf->name, ifsr, addr);
4031
4032diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4033index cf08bdf..772656c 100644
4034--- a/arch/arm/mm/fault.h
4035+++ b/arch/arm/mm/fault.h
4036@@ -3,6 +3,7 @@
4037
4038 /*
4039 * Fault status register encodings. We steal bit 31 for our own purposes.
4040+ * Set when the FSR value is from an instruction fault.
4041 */
4042 #define FSR_LNX_PF (1 << 31)
4043 #define FSR_WRITE (1 << 11)
4044@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4045 }
4046 #endif
4047
4048+/* valid for LPAE and !LPAE */
4049+static inline int is_xn_fault(unsigned int fsr)
4050+{
4051+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4052+}
4053+
4054+static inline int is_domain_fault(unsigned int fsr)
4055+{
4056+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4057+}
4058+
4059 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4060 unsigned long search_exception_table(unsigned long addr);
4061
4062diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4063index 18ec4c5..479bb6a 100644
4064--- a/arch/arm/mm/init.c
4065+++ b/arch/arm/mm/init.c
4066@@ -30,6 +30,8 @@
4067 #include <asm/setup.h>
4068 #include <asm/tlb.h>
4069 #include <asm/fixmap.h>
4070+#include <asm/system_info.h>
4071+#include <asm/cp15.h>
4072
4073 #include <asm/mach/arch.h>
4074 #include <asm/mach/map.h>
4075@@ -684,7 +686,46 @@ void free_initmem(void)
4076 {
4077 #ifdef CONFIG_HAVE_TCM
4078 extern char __tcm_start, __tcm_end;
4079+#endif
4080
4081+#ifdef CONFIG_PAX_KERNEXEC
4082+ unsigned long addr;
4083+ pgd_t *pgd;
4084+ pud_t *pud;
4085+ pmd_t *pmd;
4086+ int cpu_arch = cpu_architecture();
4087+ unsigned int cr = get_cr();
4088+
4089+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4090+ /* make pages tables, etc before .text NX */
4091+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4092+ pgd = pgd_offset_k(addr);
4093+ pud = pud_offset(pgd, addr);
4094+ pmd = pmd_offset(pud, addr);
4095+ __section_update(pmd, addr, PMD_SECT_XN);
4096+ }
4097+ /* make init NX */
4098+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4099+ pgd = pgd_offset_k(addr);
4100+ pud = pud_offset(pgd, addr);
4101+ pmd = pmd_offset(pud, addr);
4102+ __section_update(pmd, addr, PMD_SECT_XN);
4103+ }
4104+ /* make kernel code/rodata RX */
4105+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4106+ pgd = pgd_offset_k(addr);
4107+ pud = pud_offset(pgd, addr);
4108+ pmd = pmd_offset(pud, addr);
4109+#ifdef CONFIG_ARM_LPAE
4110+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4111+#else
4112+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4113+#endif
4114+ }
4115+ }
4116+#endif
4117+
4118+#ifdef CONFIG_HAVE_TCM
4119 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4120 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4121 #endif
4122diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4123index f123d6e..04bf569 100644
4124--- a/arch/arm/mm/ioremap.c
4125+++ b/arch/arm/mm/ioremap.c
4126@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4127 unsigned int mtype;
4128
4129 if (cached)
4130- mtype = MT_MEMORY;
4131+ mtype = MT_MEMORY_RX;
4132 else
4133- mtype = MT_MEMORY_NONCACHED;
4134+ mtype = MT_MEMORY_NONCACHED_RX;
4135
4136 return __arm_ioremap_caller(phys_addr, size, mtype,
4137 __builtin_return_address(0));
4138diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4139index 304661d..53a6b19 100644
4140--- a/arch/arm/mm/mmap.c
4141+++ b/arch/arm/mm/mmap.c
4142@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4143 struct vm_area_struct *vma;
4144 int do_align = 0;
4145 int aliasing = cache_is_vipt_aliasing();
4146+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4147 struct vm_unmapped_area_info info;
4148
4149 /*
4150@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4151 if (len > TASK_SIZE)
4152 return -ENOMEM;
4153
4154+#ifdef CONFIG_PAX_RANDMMAP
4155+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4156+#endif
4157+
4158 if (addr) {
4159 if (do_align)
4160 addr = COLOUR_ALIGN(addr, pgoff);
4161@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4162 addr = PAGE_ALIGN(addr);
4163
4164 vma = find_vma(mm, addr);
4165- if (TASK_SIZE - len >= addr &&
4166- (!vma || addr + len <= vma->vm_start))
4167+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4168 return addr;
4169 }
4170
4171@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4172 info.high_limit = TASK_SIZE;
4173 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4174 info.align_offset = pgoff << PAGE_SHIFT;
4175+ info.threadstack_offset = offset;
4176 return vm_unmapped_area(&info);
4177 }
4178
4179@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4180 unsigned long addr = addr0;
4181 int do_align = 0;
4182 int aliasing = cache_is_vipt_aliasing();
4183+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4184 struct vm_unmapped_area_info info;
4185
4186 /*
4187@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4188 return addr;
4189 }
4190
4191+#ifdef CONFIG_PAX_RANDMMAP
4192+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4193+#endif
4194+
4195 /* requesting a specific address */
4196 if (addr) {
4197 if (do_align)
4198@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4199 else
4200 addr = PAGE_ALIGN(addr);
4201 vma = find_vma(mm, addr);
4202- if (TASK_SIZE - len >= addr &&
4203- (!vma || addr + len <= vma->vm_start))
4204+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4205 return addr;
4206 }
4207
4208@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4209 info.high_limit = mm->mmap_base;
4210 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4211 info.align_offset = pgoff << PAGE_SHIFT;
4212+ info.threadstack_offset = offset;
4213 addr = vm_unmapped_area(&info);
4214
4215 /*
4216@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4217 {
4218 unsigned long random_factor = 0UL;
4219
4220+#ifdef CONFIG_PAX_RANDMMAP
4221+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4222+#endif
4223+
4224 /* 8 bits of randomness in 20 address space bits */
4225 if ((current->flags & PF_RANDOMIZE) &&
4226 !(current->personality & ADDR_NO_RANDOMIZE))
4227@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4228
4229 if (mmap_is_legacy()) {
4230 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4231+
4232+#ifdef CONFIG_PAX_RANDMMAP
4233+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4234+ mm->mmap_base += mm->delta_mmap;
4235+#endif
4236+
4237 mm->get_unmapped_area = arch_get_unmapped_area;
4238 } else {
4239 mm->mmap_base = mmap_base(random_factor);
4240+
4241+#ifdef CONFIG_PAX_RANDMMAP
4242+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4243+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4244+#endif
4245+
4246 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4247 }
4248 }
4249diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4250index b1d17ee..7a6f4d3 100644
4251--- a/arch/arm/mm/mmu.c
4252+++ b/arch/arm/mm/mmu.c
4253@@ -36,6 +36,22 @@
4254 #include "mm.h"
4255 #include "tcm.h"
4256
4257+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4258+void modify_domain(unsigned int dom, unsigned int type)
4259+{
4260+ struct thread_info *thread = current_thread_info();
4261+ unsigned int domain = thread->cpu_domain;
4262+ /*
4263+ * DOMAIN_MANAGER might be defined to some other value,
4264+ * use the arch-defined constant
4265+ */
4266+ domain &= ~domain_val(dom, 3);
4267+ thread->cpu_domain = domain | domain_val(dom, type);
4268+ set_domain(thread->cpu_domain);
4269+}
4270+EXPORT_SYMBOL(modify_domain);
4271+#endif
4272+
4273 /*
4274 * empty_zero_page is a special page that is used for
4275 * zero-initialized data and COW.
4276@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4277
4278 #endif /* ifdef CONFIG_CPU_CP15 / else */
4279
4280-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4281+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4282 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4283
4284-static struct mem_type mem_types[] = {
4285+#ifdef CONFIG_PAX_KERNEXEC
4286+#define L_PTE_KERNEXEC L_PTE_RDONLY
4287+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4288+#else
4289+#define L_PTE_KERNEXEC L_PTE_DIRTY
4290+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4291+#endif
4292+
4293+static struct mem_type mem_types[] __read_only = {
4294 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4295 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4296 L_PTE_SHARED,
4297@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4298 [MT_UNCACHED] = {
4299 .prot_pte = PROT_PTE_DEVICE,
4300 .prot_l1 = PMD_TYPE_TABLE,
4301- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4302+ .prot_sect = PROT_SECT_DEVICE,
4303 .domain = DOMAIN_IO,
4304 },
4305 [MT_CACHECLEAN] = {
4306- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4307+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4308 .domain = DOMAIN_KERNEL,
4309 },
4310 #ifndef CONFIG_ARM_LPAE
4311 [MT_MINICLEAN] = {
4312- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4313+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4314 .domain = DOMAIN_KERNEL,
4315 },
4316 #endif
4317@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4318 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4319 L_PTE_RDONLY,
4320 .prot_l1 = PMD_TYPE_TABLE,
4321- .domain = DOMAIN_USER,
4322+ .domain = DOMAIN_VECTORS,
4323 },
4324 [MT_HIGH_VECTORS] = {
4325 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4326 L_PTE_USER | L_PTE_RDONLY,
4327 .prot_l1 = PMD_TYPE_TABLE,
4328- .domain = DOMAIN_USER,
4329+ .domain = DOMAIN_VECTORS,
4330 },
4331- [MT_MEMORY] = {
4332+ [MT_MEMORY_RWX] = {
4333 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4334 .prot_l1 = PMD_TYPE_TABLE,
4335 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4336 .domain = DOMAIN_KERNEL,
4337 },
4338+ [MT_MEMORY_RW] = {
4339+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4340+ .prot_l1 = PMD_TYPE_TABLE,
4341+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4342+ .domain = DOMAIN_KERNEL,
4343+ },
4344+ [MT_MEMORY_RX] = {
4345+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4346+ .prot_l1 = PMD_TYPE_TABLE,
4347+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4348+ .domain = DOMAIN_KERNEL,
4349+ },
4350 [MT_ROM] = {
4351- .prot_sect = PMD_TYPE_SECT,
4352+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4353 .domain = DOMAIN_KERNEL,
4354 },
4355- [MT_MEMORY_NONCACHED] = {
4356+ [MT_MEMORY_NONCACHED_RW] = {
4357 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4358 L_PTE_MT_BUFFERABLE,
4359 .prot_l1 = PMD_TYPE_TABLE,
4360 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4361 .domain = DOMAIN_KERNEL,
4362 },
4363+ [MT_MEMORY_NONCACHED_RX] = {
4364+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4365+ L_PTE_MT_BUFFERABLE,
4366+ .prot_l1 = PMD_TYPE_TABLE,
4367+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4368+ .domain = DOMAIN_KERNEL,
4369+ },
4370 [MT_MEMORY_DTCM] = {
4371- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4372- L_PTE_XN,
4373+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4374 .prot_l1 = PMD_TYPE_TABLE,
4375- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4376+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4377 .domain = DOMAIN_KERNEL,
4378 },
4379 [MT_MEMORY_ITCM] = {
4380@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4381 },
4382 [MT_MEMORY_SO] = {
4383 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4384- L_PTE_MT_UNCACHED | L_PTE_XN,
4385+ L_PTE_MT_UNCACHED,
4386 .prot_l1 = PMD_TYPE_TABLE,
4387 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4388- PMD_SECT_UNCACHED | PMD_SECT_XN,
4389+ PMD_SECT_UNCACHED,
4390 .domain = DOMAIN_KERNEL,
4391 },
4392 [MT_MEMORY_DMA_READY] = {
4393@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4394 * to prevent speculative instruction fetches.
4395 */
4396 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4397+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4398 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4399+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4400 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4401+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4402 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4403+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4404+
4405+ /* Mark other regions on ARMv6+ as execute-never */
4406+
4407+#ifdef CONFIG_PAX_KERNEXEC
4408+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4409+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4410+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4411+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4412+#ifndef CONFIG_ARM_LPAE
4413+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4414+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4415+#endif
4416+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4417+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4418+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4419+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4420+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4421+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4422+#endif
4423+
4424+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4425+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4426 }
4427 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4428 /*
4429@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4430 * from SVC mode and no access from userspace.
4431 */
4432 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4433+#ifdef CONFIG_PAX_KERNEXEC
4434+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4435+#endif
4436 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4438 #endif
4439@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4440 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4441 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4442 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4443- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4444- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4445+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4446+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4447+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4448+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4449+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4450+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4451 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4452- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4453- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4454+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4455+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4456+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4457+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4458 }
4459 }
4460
4461@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4462 if (cpu_arch >= CPU_ARCH_ARMv6) {
4463 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4464 /* Non-cacheable Normal is XCB = 001 */
4465- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4466+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4467+ PMD_SECT_BUFFERED;
4468+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4469 PMD_SECT_BUFFERED;
4470 } else {
4471 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4472- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4473+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4474+ PMD_SECT_TEX(1);
4475+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4476 PMD_SECT_TEX(1);
4477 }
4478 } else {
4479- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4480+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4481+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4482 }
4483
4484 #ifdef CONFIG_ARM_LPAE
4485@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4486 vecs_pgprot |= PTE_EXT_AF;
4487 #endif
4488
4489+ user_pgprot |= __supported_pte_mask;
4490+
4491 for (i = 0; i < 16; i++) {
4492 pteval_t v = pgprot_val(protection_map[i]);
4493 protection_map[i] = __pgprot(v | user_pgprot);
4494@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4495
4496 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4497 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4498- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4499- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4500+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4501+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4502+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4503+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4504+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4505+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4506 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4507- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4508+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4509+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4510 mem_types[MT_ROM].prot_sect |= cp->pmd;
4511
4512 switch (cp->pmd) {
4513@@ -1186,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4514 * called function. This means you can't use any function or debugging
4515 * method which may touch any device, otherwise the kernel _will_ crash.
4516 */
4517+
4518+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4519+
4520 static void __init devicemaps_init(const struct machine_desc *mdesc)
4521 {
4522 struct map_desc map;
4523 unsigned long addr;
4524- void *vectors;
4525
4526- /*
4527- * Allocate the vector page early.
4528- */
4529- vectors = early_alloc(PAGE_SIZE * 2);
4530-
4531- early_trap_init(vectors);
4532+ early_trap_init(&vectors);
4533
4534 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4535 pmd_clear(pmd_off_k(addr));
4536@@ -1237,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4537 * location (0xffff0000). If we aren't using high-vectors, also
4538 * create a mapping at the low-vectors virtual address.
4539 */
4540- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4541+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4542 map.virtual = 0xffff0000;
4543 map.length = PAGE_SIZE;
4544 #ifdef CONFIG_KUSER_HELPERS
4545@@ -1309,8 +1395,39 @@ static void __init map_lowmem(void)
4546 map.pfn = __phys_to_pfn(start);
4547 map.virtual = __phys_to_virt(start);
4548 map.length = end - start;
4549- map.type = MT_MEMORY;
4550
4551+#ifdef CONFIG_PAX_KERNEXEC
4552+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4553+ struct map_desc kernel;
4554+ struct map_desc initmap;
4555+
4556+ /* when freeing initmem we will make this RW */
4557+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4558+ initmap.virtual = (unsigned long)__init_begin;
4559+ initmap.length = _sdata - __init_begin;
4560+ initmap.type = MT_MEMORY_RWX;
4561+ create_mapping(&initmap);
4562+
4563+ /* when freeing initmem we will make this RX */
4564+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4565+ kernel.virtual = (unsigned long)_stext;
4566+ kernel.length = __init_begin - _stext;
4567+ kernel.type = MT_MEMORY_RWX;
4568+ create_mapping(&kernel);
4569+
4570+ if (map.virtual < (unsigned long)_stext) {
4571+ map.length = (unsigned long)_stext - map.virtual;
4572+ map.type = MT_MEMORY_RWX;
4573+ create_mapping(&map);
4574+ }
4575+
4576+ map.pfn = __phys_to_pfn(__pa(_sdata));
4577+ map.virtual = (unsigned long)_sdata;
4578+ map.length = end - __pa(_sdata);
4579+ }
4580+#endif
4581+
4582+ map.type = MT_MEMORY_RW;
4583 create_mapping(&map);
4584 }
4585 }
4586diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4587index a5bc92d..0bb4730 100644
4588--- a/arch/arm/plat-omap/sram.c
4589+++ b/arch/arm/plat-omap/sram.c
4590@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4591 * Looks like we need to preserve some bootloader code at the
4592 * beginning of SRAM for jumping to flash for reboot to work...
4593 */
4594+ pax_open_kernel();
4595 memset_io(omap_sram_base + omap_sram_skip, 0,
4596 omap_sram_size - omap_sram_skip);
4597+ pax_close_kernel();
4598 }
4599diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4600index ce6d763..cfea917 100644
4601--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4602+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4603@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4604 int (*started)(unsigned ch);
4605 int (*flush)(unsigned ch);
4606 int (*stop)(unsigned ch);
4607-};
4608+} __no_const;
4609
4610 extern void *samsung_dmadev_get_ops(void);
4611 extern void *s3c_dma_get_ops(void);
4612diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4613index c3a58a1..78fbf54 100644
4614--- a/arch/avr32/include/asm/cache.h
4615+++ b/arch/avr32/include/asm/cache.h
4616@@ -1,8 +1,10 @@
4617 #ifndef __ASM_AVR32_CACHE_H
4618 #define __ASM_AVR32_CACHE_H
4619
4620+#include <linux/const.h>
4621+
4622 #define L1_CACHE_SHIFT 5
4623-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4624+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4625
4626 /*
4627 * Memory returned by kmalloc() may be used for DMA, so we must make
4628diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4629index d232888..87c8df1 100644
4630--- a/arch/avr32/include/asm/elf.h
4631+++ b/arch/avr32/include/asm/elf.h
4632@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4633 the loader. We need to make sure that it is out of the way of the program
4634 that it will "exec", and that there is sufficient room for the brk. */
4635
4636-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4637+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4638
4639+#ifdef CONFIG_PAX_ASLR
4640+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4641+
4642+#define PAX_DELTA_MMAP_LEN 15
4643+#define PAX_DELTA_STACK_LEN 15
4644+#endif
4645
4646 /* This yields a mask that user programs can use to figure out what
4647 instruction set this CPU supports. This could be done in user space,
4648diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4649index 479330b..53717a8 100644
4650--- a/arch/avr32/include/asm/kmap_types.h
4651+++ b/arch/avr32/include/asm/kmap_types.h
4652@@ -2,9 +2,9 @@
4653 #define __ASM_AVR32_KMAP_TYPES_H
4654
4655 #ifdef CONFIG_DEBUG_HIGHMEM
4656-# define KM_TYPE_NR 29
4657+# define KM_TYPE_NR 30
4658 #else
4659-# define KM_TYPE_NR 14
4660+# define KM_TYPE_NR 15
4661 #endif
4662
4663 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4664diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4665index 0eca933..eb78c7b 100644
4666--- a/arch/avr32/mm/fault.c
4667+++ b/arch/avr32/mm/fault.c
4668@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4669
4670 int exception_trace = 1;
4671
4672+#ifdef CONFIG_PAX_PAGEEXEC
4673+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4674+{
4675+ unsigned long i;
4676+
4677+ printk(KERN_ERR "PAX: bytes at PC: ");
4678+ for (i = 0; i < 20; i++) {
4679+ unsigned char c;
4680+ if (get_user(c, (unsigned char *)pc+i))
4681+ printk(KERN_CONT "???????? ");
4682+ else
4683+ printk(KERN_CONT "%02x ", c);
4684+ }
4685+ printk("\n");
4686+}
4687+#endif
4688+
4689 /*
4690 * This routine handles page faults. It determines the address and the
4691 * problem, and then passes it off to one of the appropriate routines.
4692@@ -176,6 +193,16 @@ bad_area:
4693 up_read(&mm->mmap_sem);
4694
4695 if (user_mode(regs)) {
4696+
4697+#ifdef CONFIG_PAX_PAGEEXEC
4698+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4699+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4700+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4701+ do_group_exit(SIGKILL);
4702+ }
4703+ }
4704+#endif
4705+
4706 if (exception_trace && printk_ratelimit())
4707 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4708 "sp %08lx ecr %lu\n",
4709diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4710index 568885a..f8008df 100644
4711--- a/arch/blackfin/include/asm/cache.h
4712+++ b/arch/blackfin/include/asm/cache.h
4713@@ -7,6 +7,7 @@
4714 #ifndef __ARCH_BLACKFIN_CACHE_H
4715 #define __ARCH_BLACKFIN_CACHE_H
4716
4717+#include <linux/const.h>
4718 #include <linux/linkage.h> /* for asmlinkage */
4719
4720 /*
4721@@ -14,7 +15,7 @@
4722 * Blackfin loads 32 bytes for cache
4723 */
4724 #define L1_CACHE_SHIFT 5
4725-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4726+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4727 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4728
4729 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4730diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4731index aea2718..3639a60 100644
4732--- a/arch/cris/include/arch-v10/arch/cache.h
4733+++ b/arch/cris/include/arch-v10/arch/cache.h
4734@@ -1,8 +1,9 @@
4735 #ifndef _ASM_ARCH_CACHE_H
4736 #define _ASM_ARCH_CACHE_H
4737
4738+#include <linux/const.h>
4739 /* Etrax 100LX have 32-byte cache-lines. */
4740-#define L1_CACHE_BYTES 32
4741 #define L1_CACHE_SHIFT 5
4742+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4743
4744 #endif /* _ASM_ARCH_CACHE_H */
4745diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4746index 7caf25d..ee65ac5 100644
4747--- a/arch/cris/include/arch-v32/arch/cache.h
4748+++ b/arch/cris/include/arch-v32/arch/cache.h
4749@@ -1,11 +1,12 @@
4750 #ifndef _ASM_CRIS_ARCH_CACHE_H
4751 #define _ASM_CRIS_ARCH_CACHE_H
4752
4753+#include <linux/const.h>
4754 #include <arch/hwregs/dma.h>
4755
4756 /* A cache-line is 32 bytes. */
4757-#define L1_CACHE_BYTES 32
4758 #define L1_CACHE_SHIFT 5
4759+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4760
4761 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4762
4763diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4764index b86329d..6709906 100644
4765--- a/arch/frv/include/asm/atomic.h
4766+++ b/arch/frv/include/asm/atomic.h
4767@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4768 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4769 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4770
4771+#define atomic64_read_unchecked(v) atomic64_read(v)
4772+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4773+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4774+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4775+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4776+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4777+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4778+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4779+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4780+
4781 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4782 {
4783 int c, old;
4784diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4785index 2797163..c2a401d 100644
4786--- a/arch/frv/include/asm/cache.h
4787+++ b/arch/frv/include/asm/cache.h
4788@@ -12,10 +12,11 @@
4789 #ifndef __ASM_CACHE_H
4790 #define __ASM_CACHE_H
4791
4792+#include <linux/const.h>
4793
4794 /* bytes per L1 cache line */
4795 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4796-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4797+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4798
4799 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4800 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4801diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4802index 43901f2..0d8b865 100644
4803--- a/arch/frv/include/asm/kmap_types.h
4804+++ b/arch/frv/include/asm/kmap_types.h
4805@@ -2,6 +2,6 @@
4806 #ifndef _ASM_KMAP_TYPES_H
4807 #define _ASM_KMAP_TYPES_H
4808
4809-#define KM_TYPE_NR 17
4810+#define KM_TYPE_NR 18
4811
4812 #endif
4813diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4814index 836f147..4cf23f5 100644
4815--- a/arch/frv/mm/elf-fdpic.c
4816+++ b/arch/frv/mm/elf-fdpic.c
4817@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4818 {
4819 struct vm_area_struct *vma;
4820 struct vm_unmapped_area_info info;
4821+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4822
4823 if (len > TASK_SIZE)
4824 return -ENOMEM;
4825@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4826 if (addr) {
4827 addr = PAGE_ALIGN(addr);
4828 vma = find_vma(current->mm, addr);
4829- if (TASK_SIZE - len >= addr &&
4830- (!vma || addr + len <= vma->vm_start))
4831+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4832 goto success;
4833 }
4834
4835@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4836 info.high_limit = (current->mm->start_stack - 0x00200000);
4837 info.align_mask = 0;
4838 info.align_offset = 0;
4839+ info.threadstack_offset = offset;
4840 addr = vm_unmapped_area(&info);
4841 if (!(addr & ~PAGE_MASK))
4842 goto success;
4843diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4844index f4ca594..adc72fd6 100644
4845--- a/arch/hexagon/include/asm/cache.h
4846+++ b/arch/hexagon/include/asm/cache.h
4847@@ -21,9 +21,11 @@
4848 #ifndef __ASM_CACHE_H
4849 #define __ASM_CACHE_H
4850
4851+#include <linux/const.h>
4852+
4853 /* Bytes per L1 cache line */
4854-#define L1_CACHE_SHIFT (5)
4855-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4856+#define L1_CACHE_SHIFT 5
4857+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4858
4859 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4860 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4861diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
4862index 7740ab1..17fa8c5 100644
4863--- a/arch/ia64/Kconfig
4864+++ b/arch/ia64/Kconfig
4865@@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
4866 config KEXEC
4867 bool "kexec system call"
4868 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
4869+ depends on !GRKERNSEC_KMEM
4870 help
4871 kexec is a system call that implements the ability to shutdown your
4872 current kernel, and to start another kernel. It is like a reboot
4873diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4874index 6e6fe18..a6ae668 100644
4875--- a/arch/ia64/include/asm/atomic.h
4876+++ b/arch/ia64/include/asm/atomic.h
4877@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4878 #define atomic64_inc(v) atomic64_add(1, (v))
4879 #define atomic64_dec(v) atomic64_sub(1, (v))
4880
4881+#define atomic64_read_unchecked(v) atomic64_read(v)
4882+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4883+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4884+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4885+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4886+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4887+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4888+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4889+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4890+
4891 /* Atomic operations are already serializing */
4892 #define smp_mb__before_atomic_dec() barrier()
4893 #define smp_mb__after_atomic_dec() barrier()
4894diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4895index 988254a..e1ee885 100644
4896--- a/arch/ia64/include/asm/cache.h
4897+++ b/arch/ia64/include/asm/cache.h
4898@@ -1,6 +1,7 @@
4899 #ifndef _ASM_IA64_CACHE_H
4900 #define _ASM_IA64_CACHE_H
4901
4902+#include <linux/const.h>
4903
4904 /*
4905 * Copyright (C) 1998-2000 Hewlett-Packard Co
4906@@ -9,7 +10,7 @@
4907
4908 /* Bytes per L1 (data) cache line. */
4909 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4910-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4911+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4912
4913 #ifdef CONFIG_SMP
4914 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4915diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4916index 5a83c5c..4d7f553 100644
4917--- a/arch/ia64/include/asm/elf.h
4918+++ b/arch/ia64/include/asm/elf.h
4919@@ -42,6 +42,13 @@
4920 */
4921 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4922
4923+#ifdef CONFIG_PAX_ASLR
4924+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4925+
4926+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4927+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4928+#endif
4929+
4930 #define PT_IA_64_UNWIND 0x70000001
4931
4932 /* IA-64 relocations: */
4933diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4934index 96a8d92..617a1cf 100644
4935--- a/arch/ia64/include/asm/pgalloc.h
4936+++ b/arch/ia64/include/asm/pgalloc.h
4937@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4938 pgd_val(*pgd_entry) = __pa(pud);
4939 }
4940
4941+static inline void
4942+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4943+{
4944+ pgd_populate(mm, pgd_entry, pud);
4945+}
4946+
4947 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4948 {
4949 return quicklist_alloc(0, GFP_KERNEL, NULL);
4950@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4951 pud_val(*pud_entry) = __pa(pmd);
4952 }
4953
4954+static inline void
4955+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4956+{
4957+ pud_populate(mm, pud_entry, pmd);
4958+}
4959+
4960 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4961 {
4962 return quicklist_alloc(0, GFP_KERNEL, NULL);
4963diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4964index 7935115..c0eca6a 100644
4965--- a/arch/ia64/include/asm/pgtable.h
4966+++ b/arch/ia64/include/asm/pgtable.h
4967@@ -12,7 +12,7 @@
4968 * David Mosberger-Tang <davidm@hpl.hp.com>
4969 */
4970
4971-
4972+#include <linux/const.h>
4973 #include <asm/mman.h>
4974 #include <asm/page.h>
4975 #include <asm/processor.h>
4976@@ -142,6 +142,17 @@
4977 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4978 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4979 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4980+
4981+#ifdef CONFIG_PAX_PAGEEXEC
4982+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4983+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4984+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4985+#else
4986+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4987+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4988+# define PAGE_COPY_NOEXEC PAGE_COPY
4989+#endif
4990+
4991 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4992 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4993 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4994diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4995index 45698cd..e8e2dbc 100644
4996--- a/arch/ia64/include/asm/spinlock.h
4997+++ b/arch/ia64/include/asm/spinlock.h
4998@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4999 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5000
5001 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5002- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5003+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5004 }
5005
5006 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5007diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5008index 449c8c0..18965fb 100644
5009--- a/arch/ia64/include/asm/uaccess.h
5010+++ b/arch/ia64/include/asm/uaccess.h
5011@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5012 static inline unsigned long
5013 __copy_to_user (void __user *to, const void *from, unsigned long count)
5014 {
5015+ if (count > INT_MAX)
5016+ return count;
5017+
5018+ if (!__builtin_constant_p(count))
5019+ check_object_size(from, count, true);
5020+
5021 return __copy_user(to, (__force void __user *) from, count);
5022 }
5023
5024 static inline unsigned long
5025 __copy_from_user (void *to, const void __user *from, unsigned long count)
5026 {
5027+ if (count > INT_MAX)
5028+ return count;
5029+
5030+ if (!__builtin_constant_p(count))
5031+ check_object_size(to, count, false);
5032+
5033 return __copy_user((__force void __user *) to, from, count);
5034 }
5035
5036@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5037 ({ \
5038 void __user *__cu_to = (to); \
5039 const void *__cu_from = (from); \
5040- long __cu_len = (n); \
5041+ unsigned long __cu_len = (n); \
5042 \
5043- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5044+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5045+ if (!__builtin_constant_p(n)) \
5046+ check_object_size(__cu_from, __cu_len, true); \
5047 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5048+ } \
5049 __cu_len; \
5050 })
5051
5052@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5053 ({ \
5054 void *__cu_to = (to); \
5055 const void __user *__cu_from = (from); \
5056- long __cu_len = (n); \
5057+ unsigned long __cu_len = (n); \
5058 \
5059 __chk_user_ptr(__cu_from); \
5060- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5061+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5062+ if (!__builtin_constant_p(n)) \
5063+ check_object_size(__cu_to, __cu_len, false); \
5064 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5065+ } \
5066 __cu_len; \
5067 })
5068
5069diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5070index 24603be..948052d 100644
5071--- a/arch/ia64/kernel/module.c
5072+++ b/arch/ia64/kernel/module.c
5073@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5074 void
5075 module_free (struct module *mod, void *module_region)
5076 {
5077- if (mod && mod->arch.init_unw_table &&
5078- module_region == mod->module_init) {
5079+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5080 unw_remove_unwind_table(mod->arch.init_unw_table);
5081 mod->arch.init_unw_table = NULL;
5082 }
5083@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5084 }
5085
5086 static inline int
5087+in_init_rx (const struct module *mod, uint64_t addr)
5088+{
5089+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5090+}
5091+
5092+static inline int
5093+in_init_rw (const struct module *mod, uint64_t addr)
5094+{
5095+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5096+}
5097+
5098+static inline int
5099 in_init (const struct module *mod, uint64_t addr)
5100 {
5101- return addr - (uint64_t) mod->module_init < mod->init_size;
5102+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5103+}
5104+
5105+static inline int
5106+in_core_rx (const struct module *mod, uint64_t addr)
5107+{
5108+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5109+}
5110+
5111+static inline int
5112+in_core_rw (const struct module *mod, uint64_t addr)
5113+{
5114+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5115 }
5116
5117 static inline int
5118 in_core (const struct module *mod, uint64_t addr)
5119 {
5120- return addr - (uint64_t) mod->module_core < mod->core_size;
5121+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5122 }
5123
5124 static inline int
5125@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5126 break;
5127
5128 case RV_BDREL:
5129- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5130+ if (in_init_rx(mod, val))
5131+ val -= (uint64_t) mod->module_init_rx;
5132+ else if (in_init_rw(mod, val))
5133+ val -= (uint64_t) mod->module_init_rw;
5134+ else if (in_core_rx(mod, val))
5135+ val -= (uint64_t) mod->module_core_rx;
5136+ else if (in_core_rw(mod, val))
5137+ val -= (uint64_t) mod->module_core_rw;
5138 break;
5139
5140 case RV_LTV:
5141@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5142 * addresses have been selected...
5143 */
5144 uint64_t gp;
5145- if (mod->core_size > MAX_LTOFF)
5146+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5147 /*
5148 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5149 * at the end of the module.
5150 */
5151- gp = mod->core_size - MAX_LTOFF / 2;
5152+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5153 else
5154- gp = mod->core_size / 2;
5155- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5156+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5157+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5158 mod->arch.gp = gp;
5159 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5160 }
5161diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5162index ab33328..f39506c 100644
5163--- a/arch/ia64/kernel/palinfo.c
5164+++ b/arch/ia64/kernel/palinfo.c
5165@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5166 return NOTIFY_OK;
5167 }
5168
5169-static struct notifier_block __refdata palinfo_cpu_notifier =
5170+static struct notifier_block palinfo_cpu_notifier =
5171 {
5172 .notifier_call = palinfo_cpu_callback,
5173 .priority = 0,
5174diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5175index 41e33f8..65180b2a 100644
5176--- a/arch/ia64/kernel/sys_ia64.c
5177+++ b/arch/ia64/kernel/sys_ia64.c
5178@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5179 unsigned long align_mask = 0;
5180 struct mm_struct *mm = current->mm;
5181 struct vm_unmapped_area_info info;
5182+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5183
5184 if (len > RGN_MAP_LIMIT)
5185 return -ENOMEM;
5186@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5187 if (REGION_NUMBER(addr) == RGN_HPAGE)
5188 addr = 0;
5189 #endif
5190+
5191+#ifdef CONFIG_PAX_RANDMMAP
5192+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5193+ addr = mm->free_area_cache;
5194+ else
5195+#endif
5196+
5197 if (!addr)
5198 addr = TASK_UNMAPPED_BASE;
5199
5200@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5201 info.high_limit = TASK_SIZE;
5202 info.align_mask = align_mask;
5203 info.align_offset = 0;
5204+ info.threadstack_offset = offset;
5205 return vm_unmapped_area(&info);
5206 }
5207
5208diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5209index 0ccb28f..8992469 100644
5210--- a/arch/ia64/kernel/vmlinux.lds.S
5211+++ b/arch/ia64/kernel/vmlinux.lds.S
5212@@ -198,7 +198,7 @@ SECTIONS {
5213 /* Per-cpu data: */
5214 . = ALIGN(PERCPU_PAGE_SIZE);
5215 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5216- __phys_per_cpu_start = __per_cpu_load;
5217+ __phys_per_cpu_start = per_cpu_load;
5218 /*
5219 * ensure percpu data fits
5220 * into percpu page size
5221diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5222index 7225dad..2a7c8256 100644
5223--- a/arch/ia64/mm/fault.c
5224+++ b/arch/ia64/mm/fault.c
5225@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5226 return pte_present(pte);
5227 }
5228
5229+#ifdef CONFIG_PAX_PAGEEXEC
5230+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5231+{
5232+ unsigned long i;
5233+
5234+ printk(KERN_ERR "PAX: bytes at PC: ");
5235+ for (i = 0; i < 8; i++) {
5236+ unsigned int c;
5237+ if (get_user(c, (unsigned int *)pc+i))
5238+ printk(KERN_CONT "???????? ");
5239+ else
5240+ printk(KERN_CONT "%08x ", c);
5241+ }
5242+ printk("\n");
5243+}
5244+#endif
5245+
5246 # define VM_READ_BIT 0
5247 # define VM_WRITE_BIT 1
5248 # define VM_EXEC_BIT 2
5249@@ -151,8 +168,21 @@ retry:
5250 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5251 goto bad_area;
5252
5253- if ((vma->vm_flags & mask) != mask)
5254+ if ((vma->vm_flags & mask) != mask) {
5255+
5256+#ifdef CONFIG_PAX_PAGEEXEC
5257+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5258+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5259+ goto bad_area;
5260+
5261+ up_read(&mm->mmap_sem);
5262+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5263+ do_group_exit(SIGKILL);
5264+ }
5265+#endif
5266+
5267 goto bad_area;
5268+ }
5269
5270 /*
5271 * If for any reason at all we couldn't handle the fault, make
5272diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5273index 68232db..6ca80af 100644
5274--- a/arch/ia64/mm/hugetlbpage.c
5275+++ b/arch/ia64/mm/hugetlbpage.c
5276@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5277 unsigned long pgoff, unsigned long flags)
5278 {
5279 struct vm_unmapped_area_info info;
5280+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5281
5282 if (len > RGN_MAP_LIMIT)
5283 return -ENOMEM;
5284@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5285 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5286 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5287 info.align_offset = 0;
5288+ info.threadstack_offset = offset;
5289 return vm_unmapped_area(&info);
5290 }
5291
5292diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5293index b6f7f43..c04320d 100644
5294--- a/arch/ia64/mm/init.c
5295+++ b/arch/ia64/mm/init.c
5296@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5297 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5298 vma->vm_end = vma->vm_start + PAGE_SIZE;
5299 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5300+
5301+#ifdef CONFIG_PAX_PAGEEXEC
5302+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5303+ vma->vm_flags &= ~VM_EXEC;
5304+
5305+#ifdef CONFIG_PAX_MPROTECT
5306+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5307+ vma->vm_flags &= ~VM_MAYEXEC;
5308+#endif
5309+
5310+ }
5311+#endif
5312+
5313 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5314 down_write(&current->mm->mmap_sem);
5315 if (insert_vm_struct(current->mm, vma)) {
5316diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5317index 40b3ee9..8c2c112 100644
5318--- a/arch/m32r/include/asm/cache.h
5319+++ b/arch/m32r/include/asm/cache.h
5320@@ -1,8 +1,10 @@
5321 #ifndef _ASM_M32R_CACHE_H
5322 #define _ASM_M32R_CACHE_H
5323
5324+#include <linux/const.h>
5325+
5326 /* L1 cache line size */
5327 #define L1_CACHE_SHIFT 4
5328-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5329+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5330
5331 #endif /* _ASM_M32R_CACHE_H */
5332diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5333index 82abd15..d95ae5d 100644
5334--- a/arch/m32r/lib/usercopy.c
5335+++ b/arch/m32r/lib/usercopy.c
5336@@ -14,6 +14,9 @@
5337 unsigned long
5338 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5339 {
5340+ if ((long)n < 0)
5341+ return n;
5342+
5343 prefetch(from);
5344 if (access_ok(VERIFY_WRITE, to, n))
5345 __copy_user(to,from,n);
5346@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5347 unsigned long
5348 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5349 {
5350+ if ((long)n < 0)
5351+ return n;
5352+
5353 prefetchw(to);
5354 if (access_ok(VERIFY_READ, from, n))
5355 __copy_user_zeroing(to,from,n);
5356diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5357index 0395c51..5f26031 100644
5358--- a/arch/m68k/include/asm/cache.h
5359+++ b/arch/m68k/include/asm/cache.h
5360@@ -4,9 +4,11 @@
5361 #ifndef __ARCH_M68K_CACHE_H
5362 #define __ARCH_M68K_CACHE_H
5363
5364+#include <linux/const.h>
5365+
5366 /* bytes per L1 cache line */
5367 #define L1_CACHE_SHIFT 4
5368-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5369+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5370
5371 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5372
5373diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5374index 0424315..defcca9 100644
5375--- a/arch/metag/mm/hugetlbpage.c
5376+++ b/arch/metag/mm/hugetlbpage.c
5377@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5378 info.high_limit = TASK_SIZE;
5379 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5380 info.align_offset = 0;
5381+ info.threadstack_offset = 0;
5382 return vm_unmapped_area(&info);
5383 }
5384
5385diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5386index 4efe96a..60e8699 100644
5387--- a/arch/microblaze/include/asm/cache.h
5388+++ b/arch/microblaze/include/asm/cache.h
5389@@ -13,11 +13,12 @@
5390 #ifndef _ASM_MICROBLAZE_CACHE_H
5391 #define _ASM_MICROBLAZE_CACHE_H
5392
5393+#include <linux/const.h>
5394 #include <asm/registers.h>
5395
5396 #define L1_CACHE_SHIFT 5
5397 /* word-granular cache in microblaze */
5398-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5399+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5400
5401 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5402
5403diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5404index f75ab4a..adc6968 100644
5405--- a/arch/mips/Kconfig
5406+++ b/arch/mips/Kconfig
5407@@ -2283,6 +2283,7 @@ source "kernel/Kconfig.preempt"
5408
5409 config KEXEC
5410 bool "Kexec system call"
5411+ depends on !GRKERNSEC_KMEM
5412 help
5413 kexec is a system call that implements the ability to shutdown your
5414 current kernel, and to start another kernel. It is like a reboot
5415diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5416index 08b6079..8b554d2 100644
5417--- a/arch/mips/include/asm/atomic.h
5418+++ b/arch/mips/include/asm/atomic.h
5419@@ -21,15 +21,39 @@
5420 #include <asm/cmpxchg.h>
5421 #include <asm/war.h>
5422
5423+#ifdef CONFIG_GENERIC_ATOMIC64
5424+#include <asm-generic/atomic64.h>
5425+#endif
5426+
5427 #define ATOMIC_INIT(i) { (i) }
5428
5429+#ifdef CONFIG_64BIT
5430+#define _ASM_EXTABLE(from, to) \
5431+" .section __ex_table,\"a\"\n" \
5432+" .dword " #from ", " #to"\n" \
5433+" .previous\n"
5434+#else
5435+#define _ASM_EXTABLE(from, to) \
5436+" .section __ex_table,\"a\"\n" \
5437+" .word " #from ", " #to"\n" \
5438+" .previous\n"
5439+#endif
5440+
5441 /*
5442 * atomic_read - read atomic variable
5443 * @v: pointer of type atomic_t
5444 *
5445 * Atomically reads the value of @v.
5446 */
5447-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5448+static inline int atomic_read(const atomic_t *v)
5449+{
5450+ return (*(volatile const int *) &v->counter);
5451+}
5452+
5453+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5454+{
5455+ return (*(volatile const int *) &v->counter);
5456+}
5457
5458 /*
5459 * atomic_set - set atomic variable
5460@@ -38,7 +62,15 @@
5461 *
5462 * Atomically sets the value of @v to @i.
5463 */
5464-#define atomic_set(v, i) ((v)->counter = (i))
5465+static inline void atomic_set(atomic_t *v, int i)
5466+{
5467+ v->counter = i;
5468+}
5469+
5470+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5471+{
5472+ v->counter = i;
5473+}
5474
5475 /*
5476 * atomic_add - add integer to atomic variable
5477@@ -47,7 +79,67 @@
5478 *
5479 * Atomically adds @i to @v.
5480 */
5481-static __inline__ void atomic_add(int i, atomic_t * v)
5482+static __inline__ void atomic_add(int i, atomic_t *v)
5483+{
5484+ int temp;
5485+
5486+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5487+ __asm__ __volatile__(
5488+ " .set mips3 \n"
5489+ "1: ll %0, %1 # atomic_add \n"
5490+#ifdef CONFIG_PAX_REFCOUNT
5491+ /* Exception on overflow. */
5492+ "2: add %0, %2 \n"
5493+#else
5494+ " addu %0, %2 \n"
5495+#endif
5496+ " sc %0, %1 \n"
5497+ " beqzl %0, 1b \n"
5498+#ifdef CONFIG_PAX_REFCOUNT
5499+ "3: \n"
5500+ _ASM_EXTABLE(2b, 3b)
5501+#endif
5502+ " .set mips0 \n"
5503+ : "=&r" (temp), "+m" (v->counter)
5504+ : "Ir" (i));
5505+ } else if (kernel_uses_llsc) {
5506+ __asm__ __volatile__(
5507+ " .set mips3 \n"
5508+ "1: ll %0, %1 # atomic_add \n"
5509+#ifdef CONFIG_PAX_REFCOUNT
5510+ /* Exception on overflow. */
5511+ "2: add %0, %2 \n"
5512+#else
5513+ " addu %0, %2 \n"
5514+#endif
5515+ " sc %0, %1 \n"
5516+ " beqz %0, 1b \n"
5517+#ifdef CONFIG_PAX_REFCOUNT
5518+ "3: \n"
5519+ _ASM_EXTABLE(2b, 3b)
5520+#endif
5521+ " .set mips0 \n"
5522+ : "=&r" (temp), "+m" (v->counter)
5523+ : "Ir" (i));
5524+ } else {
5525+ unsigned long flags;
5526+
5527+ raw_local_irq_save(flags);
5528+ __asm__ __volatile__(
5529+#ifdef CONFIG_PAX_REFCOUNT
5530+ /* Exception on overflow. */
5531+ "1: add %0, %1 \n"
5532+ "2: \n"
5533+ _ASM_EXTABLE(1b, 2b)
5534+#else
5535+ " addu %0, %1 \n"
5536+#endif
5537+ : "+r" (v->counter) : "Ir" (i));
5538+ raw_local_irq_restore(flags);
5539+ }
5540+}
5541+
5542+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5543 {
5544 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5545 int temp;
5546@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5547 *
5548 * Atomically subtracts @i from @v.
5549 */
5550-static __inline__ void atomic_sub(int i, atomic_t * v)
5551+static __inline__ void atomic_sub(int i, atomic_t *v)
5552+{
5553+ int temp;
5554+
5555+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5556+ __asm__ __volatile__(
5557+ " .set mips3 \n"
5558+ "1: ll %0, %1 # atomic64_sub \n"
5559+#ifdef CONFIG_PAX_REFCOUNT
5560+ /* Exception on overflow. */
5561+ "2: sub %0, %2 \n"
5562+#else
5563+ " subu %0, %2 \n"
5564+#endif
5565+ " sc %0, %1 \n"
5566+ " beqzl %0, 1b \n"
5567+#ifdef CONFIG_PAX_REFCOUNT
5568+ "3: \n"
5569+ _ASM_EXTABLE(2b, 3b)
5570+#endif
5571+ " .set mips0 \n"
5572+ : "=&r" (temp), "+m" (v->counter)
5573+ : "Ir" (i));
5574+ } else if (kernel_uses_llsc) {
5575+ __asm__ __volatile__(
5576+ " .set mips3 \n"
5577+ "1: ll %0, %1 # atomic64_sub \n"
5578+#ifdef CONFIG_PAX_REFCOUNT
5579+ /* Exception on overflow. */
5580+ "2: sub %0, %2 \n"
5581+#else
5582+ " subu %0, %2 \n"
5583+#endif
5584+ " sc %0, %1 \n"
5585+ " beqz %0, 1b \n"
5586+#ifdef CONFIG_PAX_REFCOUNT
5587+ "3: \n"
5588+ _ASM_EXTABLE(2b, 3b)
5589+#endif
5590+ " .set mips0 \n"
5591+ : "=&r" (temp), "+m" (v->counter)
5592+ : "Ir" (i));
5593+ } else {
5594+ unsigned long flags;
5595+
5596+ raw_local_irq_save(flags);
5597+ __asm__ __volatile__(
5598+#ifdef CONFIG_PAX_REFCOUNT
5599+ /* Exception on overflow. */
5600+ "1: sub %0, %1 \n"
5601+ "2: \n"
5602+ _ASM_EXTABLE(1b, 2b)
5603+#else
5604+ " subu %0, %1 \n"
5605+#endif
5606+ : "+r" (v->counter) : "Ir" (i));
5607+ raw_local_irq_restore(flags);
5608+ }
5609+}
5610+
5611+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5612 {
5613 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5614 int temp;
5615@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5616 /*
5617 * Same as above, but return the result value
5618 */
5619-static __inline__ int atomic_add_return(int i, atomic_t * v)
5620+static __inline__ int atomic_add_return(int i, atomic_t *v)
5621+{
5622+ int result;
5623+ int temp;
5624+
5625+ smp_mb__before_llsc();
5626+
5627+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5628+ __asm__ __volatile__(
5629+ " .set mips3 \n"
5630+ "1: ll %1, %2 # atomic_add_return \n"
5631+#ifdef CONFIG_PAX_REFCOUNT
5632+ "2: add %0, %1, %3 \n"
5633+#else
5634+ " addu %0, %1, %3 \n"
5635+#endif
5636+ " sc %0, %2 \n"
5637+ " beqzl %0, 1b \n"
5638+#ifdef CONFIG_PAX_REFCOUNT
5639+ " b 4f \n"
5640+ " .set noreorder \n"
5641+ "3: b 5f \n"
5642+ " move %0, %1 \n"
5643+ " .set reorder \n"
5644+ _ASM_EXTABLE(2b, 3b)
5645+#endif
5646+ "4: addu %0, %1, %3 \n"
5647+#ifdef CONFIG_PAX_REFCOUNT
5648+ "5: \n"
5649+#endif
5650+ " .set mips0 \n"
5651+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5652+ : "Ir" (i));
5653+ } else if (kernel_uses_llsc) {
5654+ __asm__ __volatile__(
5655+ " .set mips3 \n"
5656+ "1: ll %1, %2 # atomic_add_return \n"
5657+#ifdef CONFIG_PAX_REFCOUNT
5658+ "2: add %0, %1, %3 \n"
5659+#else
5660+ " addu %0, %1, %3 \n"
5661+#endif
5662+ " sc %0, %2 \n"
5663+ " bnez %0, 4f \n"
5664+ " b 1b \n"
5665+#ifdef CONFIG_PAX_REFCOUNT
5666+ " .set noreorder \n"
5667+ "3: b 5f \n"
5668+ " move %0, %1 \n"
5669+ " .set reorder \n"
5670+ _ASM_EXTABLE(2b, 3b)
5671+#endif
5672+ "4: addu %0, %1, %3 \n"
5673+#ifdef CONFIG_PAX_REFCOUNT
5674+ "5: \n"
5675+#endif
5676+ " .set mips0 \n"
5677+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5678+ : "Ir" (i));
5679+ } else {
5680+ unsigned long flags;
5681+
5682+ raw_local_irq_save(flags);
5683+ __asm__ __volatile__(
5684+ " lw %0, %1 \n"
5685+#ifdef CONFIG_PAX_REFCOUNT
5686+ /* Exception on overflow. */
5687+ "1: add %0, %2 \n"
5688+#else
5689+ " addu %0, %2 \n"
5690+#endif
5691+ " sw %0, %1 \n"
5692+#ifdef CONFIG_PAX_REFCOUNT
5693+ /* Note: Dest reg is not modified on overflow */
5694+ "2: \n"
5695+ _ASM_EXTABLE(1b, 2b)
5696+#endif
5697+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5698+ raw_local_irq_restore(flags);
5699+ }
5700+
5701+ smp_llsc_mb();
5702+
5703+ return result;
5704+}
5705+
5706+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5707 {
5708 int result;
5709
5710@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5711 return result;
5712 }
5713
5714-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5715+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5716+{
5717+ int result;
5718+ int temp;
5719+
5720+ smp_mb__before_llsc();
5721+
5722+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5723+ __asm__ __volatile__(
5724+ " .set mips3 \n"
5725+ "1: ll %1, %2 # atomic_sub_return \n"
5726+#ifdef CONFIG_PAX_REFCOUNT
5727+ "2: sub %0, %1, %3 \n"
5728+#else
5729+ " subu %0, %1, %3 \n"
5730+#endif
5731+ " sc %0, %2 \n"
5732+ " beqzl %0, 1b \n"
5733+#ifdef CONFIG_PAX_REFCOUNT
5734+ " b 4f \n"
5735+ " .set noreorder \n"
5736+ "3: b 5f \n"
5737+ " move %0, %1 \n"
5738+ " .set reorder \n"
5739+ _ASM_EXTABLE(2b, 3b)
5740+#endif
5741+ "4: subu %0, %1, %3 \n"
5742+#ifdef CONFIG_PAX_REFCOUNT
5743+ "5: \n"
5744+#endif
5745+ " .set mips0 \n"
5746+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5747+ : "Ir" (i), "m" (v->counter)
5748+ : "memory");
5749+ } else if (kernel_uses_llsc) {
5750+ __asm__ __volatile__(
5751+ " .set mips3 \n"
5752+ "1: ll %1, %2 # atomic_sub_return \n"
5753+#ifdef CONFIG_PAX_REFCOUNT
5754+ "2: sub %0, %1, %3 \n"
5755+#else
5756+ " subu %0, %1, %3 \n"
5757+#endif
5758+ " sc %0, %2 \n"
5759+ " bnez %0, 4f \n"
5760+ " b 1b \n"
5761+#ifdef CONFIG_PAX_REFCOUNT
5762+ " .set noreorder \n"
5763+ "3: b 5f \n"
5764+ " move %0, %1 \n"
5765+ " .set reorder \n"
5766+ _ASM_EXTABLE(2b, 3b)
5767+#endif
5768+ "4: subu %0, %1, %3 \n"
5769+#ifdef CONFIG_PAX_REFCOUNT
5770+ "5: \n"
5771+#endif
5772+ " .set mips0 \n"
5773+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5774+ : "Ir" (i));
5775+ } else {
5776+ unsigned long flags;
5777+
5778+ raw_local_irq_save(flags);
5779+ __asm__ __volatile__(
5780+ " lw %0, %1 \n"
5781+#ifdef CONFIG_PAX_REFCOUNT
5782+ /* Exception on overflow. */
5783+ "1: sub %0, %2 \n"
5784+#else
5785+ " subu %0, %2 \n"
5786+#endif
5787+ " sw %0, %1 \n"
5788+#ifdef CONFIG_PAX_REFCOUNT
5789+ /* Note: Dest reg is not modified on overflow */
5790+ "2: \n"
5791+ _ASM_EXTABLE(1b, 2b)
5792+#endif
5793+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5794+ raw_local_irq_restore(flags);
5795+ }
5796+
5797+ smp_llsc_mb();
5798+
5799+ return result;
5800+}
5801+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5802 {
5803 int result;
5804
5805@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5806 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5807 * The function returns the old value of @v minus @i.
5808 */
5809-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5810+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5811 {
5812 int result;
5813
5814@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5815 return result;
5816 }
5817
5818-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5819-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5820+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5821+{
5822+ return cmpxchg(&v->counter, old, new);
5823+}
5824+
5825+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5826+ int new)
5827+{
5828+ return cmpxchg(&(v->counter), old, new);
5829+}
5830+
5831+static inline int atomic_xchg(atomic_t *v, int new)
5832+{
5833+ return xchg(&v->counter, new);
5834+}
5835+
5836+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5837+{
5838+ return xchg(&(v->counter), new);
5839+}
5840
5841 /**
5842 * __atomic_add_unless - add unless the number is a given value
5843@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5844
5845 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5846 #define atomic_inc_return(v) atomic_add_return(1, (v))
5847+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5848+{
5849+ return atomic_add_return_unchecked(1, v);
5850+}
5851
5852 /*
5853 * atomic_sub_and_test - subtract value from variable and test result
5854@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5855 * other cases.
5856 */
5857 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5858+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5859+{
5860+ return atomic_add_return_unchecked(1, v) == 0;
5861+}
5862
5863 /*
5864 * atomic_dec_and_test - decrement by 1 and test
5865@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5866 * Atomically increments @v by 1.
5867 */
5868 #define atomic_inc(v) atomic_add(1, (v))
5869+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5870+{
5871+ atomic_add_unchecked(1, v);
5872+}
5873
5874 /*
5875 * atomic_dec - decrement and test
5876@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5877 * Atomically decrements @v by 1.
5878 */
5879 #define atomic_dec(v) atomic_sub(1, (v))
5880+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5881+{
5882+ atomic_sub_unchecked(1, v);
5883+}
5884
5885 /*
5886 * atomic_add_negative - add and test if negative
5887@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5888 * @v: pointer of type atomic64_t
5889 *
5890 */
5891-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5892+static inline long atomic64_read(const atomic64_t *v)
5893+{
5894+ return (*(volatile const long *) &v->counter);
5895+}
5896+
5897+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5898+{
5899+ return (*(volatile const long *) &v->counter);
5900+}
5901
5902 /*
5903 * atomic64_set - set atomic variable
5904 * @v: pointer of type atomic64_t
5905 * @i: required value
5906 */
5907-#define atomic64_set(v, i) ((v)->counter = (i))
5908+static inline void atomic64_set(atomic64_t *v, long i)
5909+{
5910+ v->counter = i;
5911+}
5912+
5913+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5914+{
5915+ v->counter = i;
5916+}
5917
5918 /*
5919 * atomic64_add - add integer to atomic variable
5920@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5921 *
5922 * Atomically adds @i to @v.
5923 */
5924-static __inline__ void atomic64_add(long i, atomic64_t * v)
5925+static __inline__ void atomic64_add(long i, atomic64_t *v)
5926+{
5927+ long temp;
5928+
5929+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5930+ __asm__ __volatile__(
5931+ " .set mips3 \n"
5932+ "1: lld %0, %1 # atomic64_add \n"
5933+#ifdef CONFIG_PAX_REFCOUNT
5934+ /* Exception on overflow. */
5935+ "2: dadd %0, %2 \n"
5936+#else
5937+ " daddu %0, %2 \n"
5938+#endif
5939+ " scd %0, %1 \n"
5940+ " beqzl %0, 1b \n"
5941+#ifdef CONFIG_PAX_REFCOUNT
5942+ "3: \n"
5943+ _ASM_EXTABLE(2b, 3b)
5944+#endif
5945+ " .set mips0 \n"
5946+ : "=&r" (temp), "+m" (v->counter)
5947+ : "Ir" (i));
5948+ } else if (kernel_uses_llsc) {
5949+ __asm__ __volatile__(
5950+ " .set mips3 \n"
5951+ "1: lld %0, %1 # atomic64_add \n"
5952+#ifdef CONFIG_PAX_REFCOUNT
5953+ /* Exception on overflow. */
5954+ "2: dadd %0, %2 \n"
5955+#else
5956+ " daddu %0, %2 \n"
5957+#endif
5958+ " scd %0, %1 \n"
5959+ " beqz %0, 1b \n"
5960+#ifdef CONFIG_PAX_REFCOUNT
5961+ "3: \n"
5962+ _ASM_EXTABLE(2b, 3b)
5963+#endif
5964+ " .set mips0 \n"
5965+ : "=&r" (temp), "+m" (v->counter)
5966+ : "Ir" (i));
5967+ } else {
5968+ unsigned long flags;
5969+
5970+ raw_local_irq_save(flags);
5971+ __asm__ __volatile__(
5972+#ifdef CONFIG_PAX_REFCOUNT
5973+ /* Exception on overflow. */
5974+ "1: dadd %0, %1 \n"
5975+ "2: \n"
5976+ _ASM_EXTABLE(1b, 2b)
5977+#else
5978+ " daddu %0, %1 \n"
5979+#endif
5980+ : "+r" (v->counter) : "Ir" (i));
5981+ raw_local_irq_restore(flags);
5982+ }
5983+}
5984+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
5985 {
5986 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5987 long temp;
5988@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
5989 *
5990 * Atomically subtracts @i from @v.
5991 */
5992-static __inline__ void atomic64_sub(long i, atomic64_t * v)
5993+static __inline__ void atomic64_sub(long i, atomic64_t *v)
5994+{
5995+ long temp;
5996+
5997+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5998+ __asm__ __volatile__(
5999+ " .set mips3 \n"
6000+ "1: lld %0, %1 # atomic64_sub \n"
6001+#ifdef CONFIG_PAX_REFCOUNT
6002+ /* Exception on overflow. */
6003+ "2: dsub %0, %2 \n"
6004+#else
6005+ " dsubu %0, %2 \n"
6006+#endif
6007+ " scd %0, %1 \n"
6008+ " beqzl %0, 1b \n"
6009+#ifdef CONFIG_PAX_REFCOUNT
6010+ "3: \n"
6011+ _ASM_EXTABLE(2b, 3b)
6012+#endif
6013+ " .set mips0 \n"
6014+ : "=&r" (temp), "+m" (v->counter)
6015+ : "Ir" (i));
6016+ } else if (kernel_uses_llsc) {
6017+ __asm__ __volatile__(
6018+ " .set mips3 \n"
6019+ "1: lld %0, %1 # atomic64_sub \n"
6020+#ifdef CONFIG_PAX_REFCOUNT
6021+ /* Exception on overflow. */
6022+ "2: dsub %0, %2 \n"
6023+#else
6024+ " dsubu %0, %2 \n"
6025+#endif
6026+ " scd %0, %1 \n"
6027+ " beqz %0, 1b \n"
6028+#ifdef CONFIG_PAX_REFCOUNT
6029+ "3: \n"
6030+ _ASM_EXTABLE(2b, 3b)
6031+#endif
6032+ " .set mips0 \n"
6033+ : "=&r" (temp), "+m" (v->counter)
6034+ : "Ir" (i));
6035+ } else {
6036+ unsigned long flags;
6037+
6038+ raw_local_irq_save(flags);
6039+ __asm__ __volatile__(
6040+#ifdef CONFIG_PAX_REFCOUNT
6041+ /* Exception on overflow. */
6042+ "1: dsub %0, %1 \n"
6043+ "2: \n"
6044+ _ASM_EXTABLE(1b, 2b)
6045+#else
6046+ " dsubu %0, %1 \n"
6047+#endif
6048+ : "+r" (v->counter) : "Ir" (i));
6049+ raw_local_irq_restore(flags);
6050+ }
6051+}
6052+
6053+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6054 {
6055 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6056 long temp;
6057@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6058 /*
6059 * Same as above, but return the result value
6060 */
6061-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6062+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6063+{
6064+ long result;
6065+ long temp;
6066+
6067+ smp_mb__before_llsc();
6068+
6069+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6070+ __asm__ __volatile__(
6071+ " .set mips3 \n"
6072+ "1: lld %1, %2 # atomic64_add_return \n"
6073+#ifdef CONFIG_PAX_REFCOUNT
6074+ "2: dadd %0, %1, %3 \n"
6075+#else
6076+ " daddu %0, %1, %3 \n"
6077+#endif
6078+ " scd %0, %2 \n"
6079+ " beqzl %0, 1b \n"
6080+#ifdef CONFIG_PAX_REFCOUNT
6081+ " b 4f \n"
6082+ " .set noreorder \n"
6083+ "3: b 5f \n"
6084+ " move %0, %1 \n"
6085+ " .set reorder \n"
6086+ _ASM_EXTABLE(2b, 3b)
6087+#endif
6088+ "4: daddu %0, %1, %3 \n"
6089+#ifdef CONFIG_PAX_REFCOUNT
6090+ "5: \n"
6091+#endif
6092+ " .set mips0 \n"
6093+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6094+ : "Ir" (i));
6095+ } else if (kernel_uses_llsc) {
6096+ __asm__ __volatile__(
6097+ " .set mips3 \n"
6098+ "1: lld %1, %2 # atomic64_add_return \n"
6099+#ifdef CONFIG_PAX_REFCOUNT
6100+ "2: dadd %0, %1, %3 \n"
6101+#else
6102+ " daddu %0, %1, %3 \n"
6103+#endif
6104+ " scd %0, %2 \n"
6105+ " bnez %0, 4f \n"
6106+ " b 1b \n"
6107+#ifdef CONFIG_PAX_REFCOUNT
6108+ " .set noreorder \n"
6109+ "3: b 5f \n"
6110+ " move %0, %1 \n"
6111+ " .set reorder \n"
6112+ _ASM_EXTABLE(2b, 3b)
6113+#endif
6114+ "4: daddu %0, %1, %3 \n"
6115+#ifdef CONFIG_PAX_REFCOUNT
6116+ "5: \n"
6117+#endif
6118+ " .set mips0 \n"
6119+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6120+ : "Ir" (i), "m" (v->counter)
6121+ : "memory");
6122+ } else {
6123+ unsigned long flags;
6124+
6125+ raw_local_irq_save(flags);
6126+ __asm__ __volatile__(
6127+ " ld %0, %1 \n"
6128+#ifdef CONFIG_PAX_REFCOUNT
6129+ /* Exception on overflow. */
6130+ "1: dadd %0, %2 \n"
6131+#else
6132+ " daddu %0, %2 \n"
6133+#endif
6134+ " sd %0, %1 \n"
6135+#ifdef CONFIG_PAX_REFCOUNT
6136+ /* Note: Dest reg is not modified on overflow */
6137+ "2: \n"
6138+ _ASM_EXTABLE(1b, 2b)
6139+#endif
6140+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6141+ raw_local_irq_restore(flags);
6142+ }
6143+
6144+ smp_llsc_mb();
6145+
6146+ return result;
6147+}
6148+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6149 {
6150 long result;
6151
6152@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6153 return result;
6154 }
6155
6156-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6157+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6158+{
6159+ long result;
6160+ long temp;
6161+
6162+ smp_mb__before_llsc();
6163+
6164+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6165+ long temp;
6166+
6167+ __asm__ __volatile__(
6168+ " .set mips3 \n"
6169+ "1: lld %1, %2 # atomic64_sub_return \n"
6170+#ifdef CONFIG_PAX_REFCOUNT
6171+ "2: dsub %0, %1, %3 \n"
6172+#else
6173+ " dsubu %0, %1, %3 \n"
6174+#endif
6175+ " scd %0, %2 \n"
6176+ " beqzl %0, 1b \n"
6177+#ifdef CONFIG_PAX_REFCOUNT
6178+ " b 4f \n"
6179+ " .set noreorder \n"
6180+ "3: b 5f \n"
6181+ " move %0, %1 \n"
6182+ " .set reorder \n"
6183+ _ASM_EXTABLE(2b, 3b)
6184+#endif
6185+ "4: dsubu %0, %1, %3 \n"
6186+#ifdef CONFIG_PAX_REFCOUNT
6187+ "5: \n"
6188+#endif
6189+ " .set mips0 \n"
6190+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6191+ : "Ir" (i), "m" (v->counter)
6192+ : "memory");
6193+ } else if (kernel_uses_llsc) {
6194+ __asm__ __volatile__(
6195+ " .set mips3 \n"
6196+ "1: lld %1, %2 # atomic64_sub_return \n"
6197+#ifdef CONFIG_PAX_REFCOUNT
6198+ "2: dsub %0, %1, %3 \n"
6199+#else
6200+ " dsubu %0, %1, %3 \n"
6201+#endif
6202+ " scd %0, %2 \n"
6203+ " bnez %0, 4f \n"
6204+ " b 1b \n"
6205+#ifdef CONFIG_PAX_REFCOUNT
6206+ " .set noreorder \n"
6207+ "3: b 5f \n"
6208+ " move %0, %1 \n"
6209+ " .set reorder \n"
6210+ _ASM_EXTABLE(2b, 3b)
6211+#endif
6212+ "4: dsubu %0, %1, %3 \n"
6213+#ifdef CONFIG_PAX_REFCOUNT
6214+ "5: \n"
6215+#endif
6216+ " .set mips0 \n"
6217+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6218+ : "Ir" (i), "m" (v->counter)
6219+ : "memory");
6220+ } else {
6221+ unsigned long flags;
6222+
6223+ raw_local_irq_save(flags);
6224+ __asm__ __volatile__(
6225+ " ld %0, %1 \n"
6226+#ifdef CONFIG_PAX_REFCOUNT
6227+ /* Exception on overflow. */
6228+ "1: dsub %0, %2 \n"
6229+#else
6230+ " dsubu %0, %2 \n"
6231+#endif
6232+ " sd %0, %1 \n"
6233+#ifdef CONFIG_PAX_REFCOUNT
6234+ /* Note: Dest reg is not modified on overflow */
6235+ "2: \n"
6236+ _ASM_EXTABLE(1b, 2b)
6237+#endif
6238+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6239+ raw_local_irq_restore(flags);
6240+ }
6241+
6242+ smp_llsc_mb();
6243+
6244+ return result;
6245+}
6246+
6247+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6248 {
6249 long result;
6250
6251@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6252 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6253 * The function returns the old value of @v minus @i.
6254 */
6255-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6256+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6257 {
6258 long result;
6259
6260@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6261 return result;
6262 }
6263
6264-#define atomic64_cmpxchg(v, o, n) \
6265- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6266-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6267+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6268+{
6269+ return cmpxchg(&v->counter, old, new);
6270+}
6271+
6272+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6273+ long new)
6274+{
6275+ return cmpxchg(&(v->counter), old, new);
6276+}
6277+
6278+static inline long atomic64_xchg(atomic64_t *v, long new)
6279+{
6280+ return xchg(&v->counter, new);
6281+}
6282+
6283+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6284+{
6285+ return xchg(&(v->counter), new);
6286+}
6287
6288 /**
6289 * atomic64_add_unless - add unless the number is a given value
6290@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6291
6292 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6293 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6294+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6295
6296 /*
6297 * atomic64_sub_and_test - subtract value from variable and test result
6298@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6299 * other cases.
6300 */
6301 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6302+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6303
6304 /*
6305 * atomic64_dec_and_test - decrement by 1 and test
6306@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6307 * Atomically increments @v by 1.
6308 */
6309 #define atomic64_inc(v) atomic64_add(1, (v))
6310+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6311
6312 /*
6313 * atomic64_dec - decrement and test
6314@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6315 * Atomically decrements @v by 1.
6316 */
6317 #define atomic64_dec(v) atomic64_sub(1, (v))
6318+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6319
6320 /*
6321 * atomic64_add_negative - add and test if negative
6322diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6323index b4db69f..8f3b093 100644
6324--- a/arch/mips/include/asm/cache.h
6325+++ b/arch/mips/include/asm/cache.h
6326@@ -9,10 +9,11 @@
6327 #ifndef _ASM_CACHE_H
6328 #define _ASM_CACHE_H
6329
6330+#include <linux/const.h>
6331 #include <kmalloc.h>
6332
6333 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6334-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6335+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6336
6337 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6338 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6339diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6340index cf3ae24..238d22f 100644
6341--- a/arch/mips/include/asm/elf.h
6342+++ b/arch/mips/include/asm/elf.h
6343@@ -372,13 +372,16 @@ extern const char *__elf_platform;
6344 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6345 #endif
6346
6347+#ifdef CONFIG_PAX_ASLR
6348+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6349+
6350+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6351+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6352+#endif
6353+
6354 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6355 struct linux_binprm;
6356 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6357 int uses_interp);
6358
6359-struct mm_struct;
6360-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6361-#define arch_randomize_brk arch_randomize_brk
6362-
6363 #endif /* _ASM_ELF_H */
6364diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6365index c1f6afa..38cc6e9 100644
6366--- a/arch/mips/include/asm/exec.h
6367+++ b/arch/mips/include/asm/exec.h
6368@@ -12,6 +12,6 @@
6369 #ifndef _ASM_EXEC_H
6370 #define _ASM_EXEC_H
6371
6372-extern unsigned long arch_align_stack(unsigned long sp);
6373+#define arch_align_stack(x) ((x) & ~0xfUL)
6374
6375 #endif /* _ASM_EXEC_H */
6376diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6377index d44622c..64990d2 100644
6378--- a/arch/mips/include/asm/local.h
6379+++ b/arch/mips/include/asm/local.h
6380@@ -12,15 +12,25 @@ typedef struct
6381 atomic_long_t a;
6382 } local_t;
6383
6384+typedef struct {
6385+ atomic_long_unchecked_t a;
6386+} local_unchecked_t;
6387+
6388 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6389
6390 #define local_read(l) atomic_long_read(&(l)->a)
6391+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6392 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6393+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6394
6395 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6396+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6397 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6398+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6399 #define local_inc(l) atomic_long_inc(&(l)->a)
6400+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6401 #define local_dec(l) atomic_long_dec(&(l)->a)
6402+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6403
6404 /*
6405 * Same as above, but return the result value
6406@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6407 return result;
6408 }
6409
6410+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6411+{
6412+ unsigned long result;
6413+
6414+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6415+ unsigned long temp;
6416+
6417+ __asm__ __volatile__(
6418+ " .set mips3 \n"
6419+ "1:" __LL "%1, %2 # local_add_return \n"
6420+ " addu %0, %1, %3 \n"
6421+ __SC "%0, %2 \n"
6422+ " beqzl %0, 1b \n"
6423+ " addu %0, %1, %3 \n"
6424+ " .set mips0 \n"
6425+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6426+ : "Ir" (i), "m" (l->a.counter)
6427+ : "memory");
6428+ } else if (kernel_uses_llsc) {
6429+ unsigned long temp;
6430+
6431+ __asm__ __volatile__(
6432+ " .set mips3 \n"
6433+ "1:" __LL "%1, %2 # local_add_return \n"
6434+ " addu %0, %1, %3 \n"
6435+ __SC "%0, %2 \n"
6436+ " beqz %0, 1b \n"
6437+ " addu %0, %1, %3 \n"
6438+ " .set mips0 \n"
6439+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6440+ : "Ir" (i), "m" (l->a.counter)
6441+ : "memory");
6442+ } else {
6443+ unsigned long flags;
6444+
6445+ local_irq_save(flags);
6446+ result = l->a.counter;
6447+ result += i;
6448+ l->a.counter = result;
6449+ local_irq_restore(flags);
6450+ }
6451+
6452+ return result;
6453+}
6454+
6455 static __inline__ long local_sub_return(long i, local_t * l)
6456 {
6457 unsigned long result;
6458@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6459
6460 #define local_cmpxchg(l, o, n) \
6461 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6462+#define local_cmpxchg_unchecked(l, o, n) \
6463+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6464 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6465
6466 /**
6467diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6468index f6be474..12ad554 100644
6469--- a/arch/mips/include/asm/page.h
6470+++ b/arch/mips/include/asm/page.h
6471@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6472 #ifdef CONFIG_CPU_MIPS32
6473 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6474 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6475- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6476+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6477 #else
6478 typedef struct { unsigned long long pte; } pte_t;
6479 #define pte_val(x) ((x).pte)
6480diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6481index 881d18b..cea38bc 100644
6482--- a/arch/mips/include/asm/pgalloc.h
6483+++ b/arch/mips/include/asm/pgalloc.h
6484@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6485 {
6486 set_pud(pud, __pud((unsigned long)pmd));
6487 }
6488+
6489+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6490+{
6491+ pud_populate(mm, pud, pmd);
6492+}
6493 #endif
6494
6495 /*
6496diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6497index 25da651..ae2a259 100644
6498--- a/arch/mips/include/asm/smtc_proc.h
6499+++ b/arch/mips/include/asm/smtc_proc.h
6500@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6501
6502 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6503
6504-extern atomic_t smtc_fpu_recoveries;
6505+extern atomic_unchecked_t smtc_fpu_recoveries;
6506
6507 #endif /* __ASM_SMTC_PROC_H */
6508diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6509index 61215a3..213ee0e 100644
6510--- a/arch/mips/include/asm/thread_info.h
6511+++ b/arch/mips/include/asm/thread_info.h
6512@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
6513 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
6514 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6515 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6516+/* li takes a 32bit immediate */
6517+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6518 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6519
6520 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6521@@ -132,20 +134,18 @@ static inline struct thread_info *current_thread_info(void)
6522 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
6523 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6524 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6525+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6526
6527-#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6528- _TIF_SYSCALL_AUDIT)
6529+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6530
6531 /* work to do in syscall_trace_leave() */
6532-#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6533- _TIF_SYSCALL_AUDIT)
6534+#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6535
6536 /* work to do on interrupt/exception return */
6537 #define _TIF_WORK_MASK \
6538 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
6539 /* work to do on any return to u-space */
6540-#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6541- _TIF_WORK_SYSCALL_EXIT)
6542+#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
6543
6544 #endif /* __KERNEL__ */
6545
6546diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6547index 1188e00..41cf144 100644
6548--- a/arch/mips/kernel/binfmt_elfn32.c
6549+++ b/arch/mips/kernel/binfmt_elfn32.c
6550@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6551 #undef ELF_ET_DYN_BASE
6552 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6553
6554+#ifdef CONFIG_PAX_ASLR
6555+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6556+
6557+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6558+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6559+#endif
6560+
6561 #include <asm/processor.h>
6562 #include <linux/module.h>
6563 #include <linux/elfcore.h>
6564diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6565index 202e581..689ca79 100644
6566--- a/arch/mips/kernel/binfmt_elfo32.c
6567+++ b/arch/mips/kernel/binfmt_elfo32.c
6568@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6569 #undef ELF_ET_DYN_BASE
6570 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6571
6572+#ifdef CONFIG_PAX_ASLR
6573+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6574+
6575+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6576+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6577+#endif
6578+
6579 #include <asm/processor.h>
6580
6581 /*
6582diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6583index d1fea7a..45602ea 100644
6584--- a/arch/mips/kernel/irq.c
6585+++ b/arch/mips/kernel/irq.c
6586@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6587 printk("unexpected IRQ # %d\n", irq);
6588 }
6589
6590-atomic_t irq_err_count;
6591+atomic_unchecked_t irq_err_count;
6592
6593 int arch_show_interrupts(struct seq_file *p, int prec)
6594 {
6595- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6596+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6597 return 0;
6598 }
6599
6600 asmlinkage void spurious_interrupt(void)
6601 {
6602- atomic_inc(&irq_err_count);
6603+ atomic_inc_unchecked(&irq_err_count);
6604 }
6605
6606 void __init init_IRQ(void)
6607diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6608index ddc7610..8c58f17 100644
6609--- a/arch/mips/kernel/process.c
6610+++ b/arch/mips/kernel/process.c
6611@@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_struct *task)
6612 out:
6613 return pc;
6614 }
6615-
6616-/*
6617- * Don't forget that the stack pointer must be aligned on a 8 bytes
6618- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6619- */
6620-unsigned long arch_align_stack(unsigned long sp)
6621-{
6622- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6623- sp -= get_random_int() & ~PAGE_MASK;
6624-
6625- return sp & ALMASK;
6626-}
6627diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6628index 8ae1ebe..1bcbf47 100644
6629--- a/arch/mips/kernel/ptrace.c
6630+++ b/arch/mips/kernel/ptrace.c
6631@@ -529,6 +529,10 @@ static inline int audit_arch(void)
6632 return arch;
6633 }
6634
6635+#ifdef CONFIG_GRKERNSEC_SETXID
6636+extern void gr_delayed_cred_worker(void);
6637+#endif
6638+
6639 /*
6640 * Notification of system call entry/exit
6641 * - triggered by current->work.syscall_trace
6642@@ -540,6 +544,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6643 /* do the secure computing check first */
6644 secure_computing_strict(regs->regs[2]);
6645
6646+#ifdef CONFIG_GRKERNSEC_SETXID
6647+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6648+ gr_delayed_cred_worker();
6649+#endif
6650+
6651 if (!(current->ptrace & PT_PTRACED))
6652 goto out;
6653
6654diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6655index c10aa84..9ec2e60 100644
6656--- a/arch/mips/kernel/smtc-proc.c
6657+++ b/arch/mips/kernel/smtc-proc.c
6658@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6659
6660 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6661
6662-atomic_t smtc_fpu_recoveries;
6663+atomic_unchecked_t smtc_fpu_recoveries;
6664
6665 static int smtc_proc_show(struct seq_file *m, void *v)
6666 {
6667@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6668 for(i = 0; i < NR_CPUS; i++)
6669 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6670 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6671- atomic_read(&smtc_fpu_recoveries));
6672+ atomic_read_unchecked(&smtc_fpu_recoveries));
6673 return 0;
6674 }
6675
6676@@ -73,7 +73,7 @@ void init_smtc_stats(void)
6677 smtc_cpu_stats[i].selfipis = 0;
6678 }
6679
6680- atomic_set(&smtc_fpu_recoveries, 0);
6681+ atomic_set_unchecked(&smtc_fpu_recoveries, 0);
6682
6683 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
6684 }
6685diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
6686index dfc1b91..11a2c07 100644
6687--- a/arch/mips/kernel/smtc.c
6688+++ b/arch/mips/kernel/smtc.c
6689@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
6690 }
6691 smtc_ipi_qdump();
6692 printk("%d Recoveries of \"stolen\" FPU\n",
6693- atomic_read(&smtc_fpu_recoveries));
6694+ atomic_read_unchecked(&smtc_fpu_recoveries));
6695 }
6696
6697
6698diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6699index 84536bf..79caa4d 100644
6700--- a/arch/mips/kernel/sync-r4k.c
6701+++ b/arch/mips/kernel/sync-r4k.c
6702@@ -21,8 +21,8 @@
6703 #include <asm/mipsregs.h>
6704
6705 static atomic_t count_start_flag = ATOMIC_INIT(0);
6706-static atomic_t count_count_start = ATOMIC_INIT(0);
6707-static atomic_t count_count_stop = ATOMIC_INIT(0);
6708+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6709+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6710 static atomic_t count_reference = ATOMIC_INIT(0);
6711
6712 #define COUNTON 100
6713@@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
6714
6715 for (i = 0; i < NR_LOOPS; i++) {
6716 /* slaves loop on '!= 2' */
6717- while (atomic_read(&count_count_start) != 1)
6718+ while (atomic_read_unchecked(&count_count_start) != 1)
6719 mb();
6720- atomic_set(&count_count_stop, 0);
6721+ atomic_set_unchecked(&count_count_stop, 0);
6722 smp_wmb();
6723
6724 /* this lets the slaves write their count register */
6725- atomic_inc(&count_count_start);
6726+ atomic_inc_unchecked(&count_count_start);
6727
6728 /*
6729 * Everyone initialises count in the last loop:
6730@@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
6731 /*
6732 * Wait for all slaves to leave the synchronization point:
6733 */
6734- while (atomic_read(&count_count_stop) != 1)
6735+ while (atomic_read_unchecked(&count_count_stop) != 1)
6736 mb();
6737- atomic_set(&count_count_start, 0);
6738+ atomic_set_unchecked(&count_count_start, 0);
6739 smp_wmb();
6740- atomic_inc(&count_count_stop);
6741+ atomic_inc_unchecked(&count_count_stop);
6742 }
6743 /* Arrange for an interrupt in a short while */
6744 write_c0_compare(read_c0_count() + COUNTON);
6745@@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
6746 initcount = atomic_read(&count_reference);
6747
6748 for (i = 0; i < NR_LOOPS; i++) {
6749- atomic_inc(&count_count_start);
6750- while (atomic_read(&count_count_start) != 2)
6751+ atomic_inc_unchecked(&count_count_start);
6752+ while (atomic_read_unchecked(&count_count_start) != 2)
6753 mb();
6754
6755 /*
6756@@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
6757 if (i == NR_LOOPS-1)
6758 write_c0_count(initcount);
6759
6760- atomic_inc(&count_count_stop);
6761- while (atomic_read(&count_count_stop) != 2)
6762+ atomic_inc_unchecked(&count_count_stop);
6763+ while (atomic_read_unchecked(&count_count_stop) != 2)
6764 mb();
6765 }
6766 /* Arrange for an interrupt in a short while */
6767diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6768index 524841f..3eef41e 100644
6769--- a/arch/mips/kernel/traps.c
6770+++ b/arch/mips/kernel/traps.c
6771@@ -684,7 +684,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6772 siginfo_t info;
6773
6774 prev_state = exception_enter();
6775- die_if_kernel("Integer overflow", regs);
6776+ if (unlikely(!user_mode(regs))) {
6777+
6778+#ifdef CONFIG_PAX_REFCOUNT
6779+ if (fixup_exception(regs)) {
6780+ pax_report_refcount_overflow(regs);
6781+ exception_exit(prev_state);
6782+ return;
6783+ }
6784+#endif
6785+
6786+ die("Integer overflow", regs);
6787+ }
6788
6789 info.si_code = FPE_INTOVF;
6790 info.si_signo = SIGFPE;
6791diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6792index becc42b..9e43d4b 100644
6793--- a/arch/mips/mm/fault.c
6794+++ b/arch/mips/mm/fault.c
6795@@ -28,6 +28,23 @@
6796 #include <asm/highmem.h> /* For VMALLOC_END */
6797 #include <linux/kdebug.h>
6798
6799+#ifdef CONFIG_PAX_PAGEEXEC
6800+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6801+{
6802+ unsigned long i;
6803+
6804+ printk(KERN_ERR "PAX: bytes at PC: ");
6805+ for (i = 0; i < 5; i++) {
6806+ unsigned int c;
6807+ if (get_user(c, (unsigned int *)pc+i))
6808+ printk(KERN_CONT "???????? ");
6809+ else
6810+ printk(KERN_CONT "%08x ", c);
6811+ }
6812+ printk("\n");
6813+}
6814+#endif
6815+
6816 /*
6817 * This routine handles page faults. It determines the address,
6818 * and the problem, and then passes it off to one of the appropriate
6819@@ -199,6 +216,14 @@ bad_area:
6820 bad_area_nosemaphore:
6821 /* User mode accesses just cause a SIGSEGV */
6822 if (user_mode(regs)) {
6823+
6824+#ifdef CONFIG_PAX_PAGEEXEC
6825+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6826+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6827+ do_group_exit(SIGKILL);
6828+ }
6829+#endif
6830+
6831 tsk->thread.cp0_badvaddr = address;
6832 tsk->thread.error_code = write;
6833 #if 0
6834diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6835index f1baadd..8537544 100644
6836--- a/arch/mips/mm/mmap.c
6837+++ b/arch/mips/mm/mmap.c
6838@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6839 struct vm_area_struct *vma;
6840 unsigned long addr = addr0;
6841 int do_color_align;
6842+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6843 struct vm_unmapped_area_info info;
6844
6845 if (unlikely(len > TASK_SIZE))
6846@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6847 do_color_align = 1;
6848
6849 /* requesting a specific address */
6850+
6851+#ifdef CONFIG_PAX_RANDMMAP
6852+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6853+#endif
6854+
6855 if (addr) {
6856 if (do_color_align)
6857 addr = COLOUR_ALIGN(addr, pgoff);
6858@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6859 addr = PAGE_ALIGN(addr);
6860
6861 vma = find_vma(mm, addr);
6862- if (TASK_SIZE - len >= addr &&
6863- (!vma || addr + len <= vma->vm_start))
6864+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6865 return addr;
6866 }
6867
6868 info.length = len;
6869 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6870 info.align_offset = pgoff << PAGE_SHIFT;
6871+ info.threadstack_offset = offset;
6872
6873 if (dir == DOWN) {
6874 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6875@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6876 {
6877 unsigned long random_factor = 0UL;
6878
6879+#ifdef CONFIG_PAX_RANDMMAP
6880+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6881+#endif
6882+
6883 if (current->flags & PF_RANDOMIZE) {
6884 random_factor = get_random_int();
6885 random_factor = random_factor << PAGE_SHIFT;
6886@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6887
6888 if (mmap_is_legacy()) {
6889 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6890+
6891+#ifdef CONFIG_PAX_RANDMMAP
6892+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6893+ mm->mmap_base += mm->delta_mmap;
6894+#endif
6895+
6896 mm->get_unmapped_area = arch_get_unmapped_area;
6897 } else {
6898 mm->mmap_base = mmap_base(random_factor);
6899+
6900+#ifdef CONFIG_PAX_RANDMMAP
6901+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6902+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6903+#endif
6904+
6905 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6906 }
6907 }
6908
6909-static inline unsigned long brk_rnd(void)
6910-{
6911- unsigned long rnd = get_random_int();
6912-
6913- rnd = rnd << PAGE_SHIFT;
6914- /* 8MB for 32bit, 256MB for 64bit */
6915- if (TASK_IS_32BIT_ADDR)
6916- rnd = rnd & 0x7ffffful;
6917- else
6918- rnd = rnd & 0xffffffful;
6919-
6920- return rnd;
6921-}
6922-
6923-unsigned long arch_randomize_brk(struct mm_struct *mm)
6924-{
6925- unsigned long base = mm->brk;
6926- unsigned long ret;
6927-
6928- ret = PAGE_ALIGN(base + brk_rnd());
6929-
6930- if (ret < mm->brk)
6931- return mm->brk;
6932-
6933- return ret;
6934-}
6935-
6936 int __virt_addr_valid(const volatile void *kaddr)
6937 {
6938 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6939diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
6940index a2358b4..7cead4f 100644
6941--- a/arch/mips/sgi-ip27/ip27-nmi.c
6942+++ b/arch/mips/sgi-ip27/ip27-nmi.c
6943@@ -187,9 +187,9 @@ void
6944 cont_nmi_dump(void)
6945 {
6946 #ifndef REAL_NMI_SIGNAL
6947- static atomic_t nmied_cpus = ATOMIC_INIT(0);
6948+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
6949
6950- atomic_inc(&nmied_cpus);
6951+ atomic_inc_unchecked(&nmied_cpus);
6952 #endif
6953 /*
6954 * Only allow 1 cpu to proceed
6955@@ -233,7 +233,7 @@ cont_nmi_dump(void)
6956 udelay(10000);
6957 }
6958 #else
6959- while (atomic_read(&nmied_cpus) != num_online_cpus());
6960+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
6961 #endif
6962
6963 /*
6964diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6965index 967d144..db12197 100644
6966--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
6967+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6968@@ -11,12 +11,14 @@
6969 #ifndef _ASM_PROC_CACHE_H
6970 #define _ASM_PROC_CACHE_H
6971
6972+#include <linux/const.h>
6973+
6974 /* L1 cache */
6975
6976 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6977 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6978-#define L1_CACHE_BYTES 16 /* bytes per entry */
6979 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6980+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6981 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6982
6983 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6984diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6985index bcb5df2..84fabd2 100644
6986--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6987+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6988@@ -16,13 +16,15 @@
6989 #ifndef _ASM_PROC_CACHE_H
6990 #define _ASM_PROC_CACHE_H
6991
6992+#include <linux/const.h>
6993+
6994 /*
6995 * L1 cache
6996 */
6997 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6998 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6999-#define L1_CACHE_BYTES 32 /* bytes per entry */
7000 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7001+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7002 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7003
7004 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7005diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7006index 4ce7a01..449202a 100644
7007--- a/arch/openrisc/include/asm/cache.h
7008+++ b/arch/openrisc/include/asm/cache.h
7009@@ -19,11 +19,13 @@
7010 #ifndef __ASM_OPENRISC_CACHE_H
7011 #define __ASM_OPENRISC_CACHE_H
7012
7013+#include <linux/const.h>
7014+
7015 /* FIXME: How can we replace these with values from the CPU...
7016 * they shouldn't be hard-coded!
7017 */
7018
7019-#define L1_CACHE_BYTES 16
7020 #define L1_CACHE_SHIFT 4
7021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7022
7023 #endif /* __ASM_OPENRISC_CACHE_H */
7024diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7025index 472886c..00e7df9 100644
7026--- a/arch/parisc/include/asm/atomic.h
7027+++ b/arch/parisc/include/asm/atomic.h
7028@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7029 return dec;
7030 }
7031
7032+#define atomic64_read_unchecked(v) atomic64_read(v)
7033+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7034+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7035+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7036+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7037+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7038+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7039+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7040+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7041+
7042 #endif /* !CONFIG_64BIT */
7043
7044
7045diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7046index 47f11c7..3420df2 100644
7047--- a/arch/parisc/include/asm/cache.h
7048+++ b/arch/parisc/include/asm/cache.h
7049@@ -5,6 +5,7 @@
7050 #ifndef __ARCH_PARISC_CACHE_H
7051 #define __ARCH_PARISC_CACHE_H
7052
7053+#include <linux/const.h>
7054
7055 /*
7056 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7057@@ -15,13 +16,13 @@
7058 * just ruin performance.
7059 */
7060 #ifdef CONFIG_PA20
7061-#define L1_CACHE_BYTES 64
7062 #define L1_CACHE_SHIFT 6
7063 #else
7064-#define L1_CACHE_BYTES 32
7065 #define L1_CACHE_SHIFT 5
7066 #endif
7067
7068+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7069+
7070 #ifndef __ASSEMBLY__
7071
7072 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7073diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7074index ad2b503..bdf1651 100644
7075--- a/arch/parisc/include/asm/elf.h
7076+++ b/arch/parisc/include/asm/elf.h
7077@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7078
7079 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7080
7081+#ifdef CONFIG_PAX_ASLR
7082+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7083+
7084+#define PAX_DELTA_MMAP_LEN 16
7085+#define PAX_DELTA_STACK_LEN 16
7086+#endif
7087+
7088 /* This yields a mask that user programs can use to figure out what
7089 instruction set this CPU supports. This could be done in user space,
7090 but it's not easy, and we've already done it here. */
7091diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7092index fc987a1..6e068ef 100644
7093--- a/arch/parisc/include/asm/pgalloc.h
7094+++ b/arch/parisc/include/asm/pgalloc.h
7095@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7096 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7097 }
7098
7099+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7100+{
7101+ pgd_populate(mm, pgd, pmd);
7102+}
7103+
7104 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7105 {
7106 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7107@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7108 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7109 #define pmd_free(mm, x) do { } while (0)
7110 #define pgd_populate(mm, pmd, pte) BUG()
7111+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7112
7113 #endif
7114
7115diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7116index 34899b5..02dd060 100644
7117--- a/arch/parisc/include/asm/pgtable.h
7118+++ b/arch/parisc/include/asm/pgtable.h
7119@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7120 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7121 #define PAGE_COPY PAGE_EXECREAD
7122 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7123+
7124+#ifdef CONFIG_PAX_PAGEEXEC
7125+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7126+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7127+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7128+#else
7129+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7130+# define PAGE_COPY_NOEXEC PAGE_COPY
7131+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7132+#endif
7133+
7134 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7135 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7136 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7137diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7138index e0a8235..ce2f1e1 100644
7139--- a/arch/parisc/include/asm/uaccess.h
7140+++ b/arch/parisc/include/asm/uaccess.h
7141@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7142 const void __user *from,
7143 unsigned long n)
7144 {
7145- int sz = __compiletime_object_size(to);
7146+ size_t sz = __compiletime_object_size(to);
7147 int ret = -EFAULT;
7148
7149- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7150+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7151 ret = __copy_from_user(to, from, n);
7152 else
7153 copy_from_user_overflow();
7154diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7155index 2a625fb..9908930 100644
7156--- a/arch/parisc/kernel/module.c
7157+++ b/arch/parisc/kernel/module.c
7158@@ -98,16 +98,38 @@
7159
7160 /* three functions to determine where in the module core
7161 * or init pieces the location is */
7162+static inline int in_init_rx(struct module *me, void *loc)
7163+{
7164+ return (loc >= me->module_init_rx &&
7165+ loc < (me->module_init_rx + me->init_size_rx));
7166+}
7167+
7168+static inline int in_init_rw(struct module *me, void *loc)
7169+{
7170+ return (loc >= me->module_init_rw &&
7171+ loc < (me->module_init_rw + me->init_size_rw));
7172+}
7173+
7174 static inline int in_init(struct module *me, void *loc)
7175 {
7176- return (loc >= me->module_init &&
7177- loc <= (me->module_init + me->init_size));
7178+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7179+}
7180+
7181+static inline int in_core_rx(struct module *me, void *loc)
7182+{
7183+ return (loc >= me->module_core_rx &&
7184+ loc < (me->module_core_rx + me->core_size_rx));
7185+}
7186+
7187+static inline int in_core_rw(struct module *me, void *loc)
7188+{
7189+ return (loc >= me->module_core_rw &&
7190+ loc < (me->module_core_rw + me->core_size_rw));
7191 }
7192
7193 static inline int in_core(struct module *me, void *loc)
7194 {
7195- return (loc >= me->module_core &&
7196- loc <= (me->module_core + me->core_size));
7197+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7198 }
7199
7200 static inline int in_local(struct module *me, void *loc)
7201@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7202 }
7203
7204 /* align things a bit */
7205- me->core_size = ALIGN(me->core_size, 16);
7206- me->arch.got_offset = me->core_size;
7207- me->core_size += gots * sizeof(struct got_entry);
7208+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7209+ me->arch.got_offset = me->core_size_rw;
7210+ me->core_size_rw += gots * sizeof(struct got_entry);
7211
7212- me->core_size = ALIGN(me->core_size, 16);
7213- me->arch.fdesc_offset = me->core_size;
7214- me->core_size += fdescs * sizeof(Elf_Fdesc);
7215+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7216+ me->arch.fdesc_offset = me->core_size_rw;
7217+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7218
7219 me->arch.got_max = gots;
7220 me->arch.fdesc_max = fdescs;
7221@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7222
7223 BUG_ON(value == 0);
7224
7225- got = me->module_core + me->arch.got_offset;
7226+ got = me->module_core_rw + me->arch.got_offset;
7227 for (i = 0; got[i].addr; i++)
7228 if (got[i].addr == value)
7229 goto out;
7230@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7231 #ifdef CONFIG_64BIT
7232 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7233 {
7234- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7235+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7236
7237 if (!value) {
7238 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7239@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7240
7241 /* Create new one */
7242 fdesc->addr = value;
7243- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7244+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7245 return (Elf_Addr)fdesc;
7246 }
7247 #endif /* CONFIG_64BIT */
7248@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7249
7250 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7251 end = table + sechdrs[me->arch.unwind_section].sh_size;
7252- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7253+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7254
7255 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7256 me->arch.unwind_section, table, end, gp);
7257diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7258index 0d3a9d4..44975d0 100644
7259--- a/arch/parisc/kernel/sys_parisc.c
7260+++ b/arch/parisc/kernel/sys_parisc.c
7261@@ -33,9 +33,11 @@
7262 #include <linux/utsname.h>
7263 #include <linux/personality.h>
7264
7265-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7266+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
7267+ unsigned long flags)
7268 {
7269 struct vm_unmapped_area_info info;
7270+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7271
7272 info.flags = 0;
7273 info.length = len;
7274@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7275 info.high_limit = TASK_SIZE;
7276 info.align_mask = 0;
7277 info.align_offset = 0;
7278+ info.threadstack_offset = offset;
7279 return vm_unmapped_area(&info);
7280 }
7281
7282@@ -69,15 +72,17 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
7283 }
7284
7285 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7286- unsigned long len, unsigned long pgoff)
7287+ unsigned long len, unsigned long pgoff, unsigned long flags)
7288 {
7289 struct vm_unmapped_area_info info;
7290+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7291
7292 info.flags = 0;
7293 info.length = len;
7294 info.low_limit = PAGE_ALIGN(addr);
7295 info.high_limit = TASK_SIZE;
7296 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7297+ info.threadstack_offset = offset;
7298 info.align_offset = shared_align_offset(filp, pgoff);
7299 return vm_unmapped_area(&info);
7300 }
7301@@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7302 return -EINVAL;
7303 return addr;
7304 }
7305- if (!addr)
7306+ if (!addr) {
7307 addr = TASK_UNMAPPED_BASE;
7308
7309+#ifdef CONFIG_PAX_RANDMMAP
7310+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7311+ addr += current->mm->delta_mmap;
7312+#endif
7313+
7314+ }
7315+
7316 if (filp || (flags & MAP_SHARED))
7317- addr = get_shared_area(filp, addr, len, pgoff);
7318+ addr = get_shared_area(filp, addr, len, pgoff, flags);
7319 else
7320- addr = get_unshared_area(addr, len);
7321+ addr = get_unshared_area(addr, len, flags);
7322
7323 return addr;
7324 }
7325diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7326index 1cd1d0c..44ec918 100644
7327--- a/arch/parisc/kernel/traps.c
7328+++ b/arch/parisc/kernel/traps.c
7329@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7330
7331 down_read(&current->mm->mmap_sem);
7332 vma = find_vma(current->mm,regs->iaoq[0]);
7333- if (vma && (regs->iaoq[0] >= vma->vm_start)
7334- && (vma->vm_flags & VM_EXEC)) {
7335-
7336+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7337 fault_address = regs->iaoq[0];
7338 fault_space = regs->iasq[0];
7339
7340diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7341index 0293588..3b229aa 100644
7342--- a/arch/parisc/mm/fault.c
7343+++ b/arch/parisc/mm/fault.c
7344@@ -15,6 +15,7 @@
7345 #include <linux/sched.h>
7346 #include <linux/interrupt.h>
7347 #include <linux/module.h>
7348+#include <linux/unistd.h>
7349
7350 #include <asm/uaccess.h>
7351 #include <asm/traps.h>
7352@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7353 static unsigned long
7354 parisc_acctyp(unsigned long code, unsigned int inst)
7355 {
7356- if (code == 6 || code == 16)
7357+ if (code == 6 || code == 7 || code == 16)
7358 return VM_EXEC;
7359
7360 switch (inst & 0xf0000000) {
7361@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7362 }
7363 #endif
7364
7365+#ifdef CONFIG_PAX_PAGEEXEC
7366+/*
7367+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7368+ *
7369+ * returns 1 when task should be killed
7370+ * 2 when rt_sigreturn trampoline was detected
7371+ * 3 when unpatched PLT trampoline was detected
7372+ */
7373+static int pax_handle_fetch_fault(struct pt_regs *regs)
7374+{
7375+
7376+#ifdef CONFIG_PAX_EMUPLT
7377+ int err;
7378+
7379+ do { /* PaX: unpatched PLT emulation */
7380+ unsigned int bl, depwi;
7381+
7382+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7383+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7384+
7385+ if (err)
7386+ break;
7387+
7388+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7389+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7390+
7391+ err = get_user(ldw, (unsigned int *)addr);
7392+ err |= get_user(bv, (unsigned int *)(addr+4));
7393+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7394+
7395+ if (err)
7396+ break;
7397+
7398+ if (ldw == 0x0E801096U &&
7399+ bv == 0xEAC0C000U &&
7400+ ldw2 == 0x0E881095U)
7401+ {
7402+ unsigned int resolver, map;
7403+
7404+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7405+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7406+ if (err)
7407+ break;
7408+
7409+ regs->gr[20] = instruction_pointer(regs)+8;
7410+ regs->gr[21] = map;
7411+ regs->gr[22] = resolver;
7412+ regs->iaoq[0] = resolver | 3UL;
7413+ regs->iaoq[1] = regs->iaoq[0] + 4;
7414+ return 3;
7415+ }
7416+ }
7417+ } while (0);
7418+#endif
7419+
7420+#ifdef CONFIG_PAX_EMUTRAMP
7421+
7422+#ifndef CONFIG_PAX_EMUSIGRT
7423+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7424+ return 1;
7425+#endif
7426+
7427+ do { /* PaX: rt_sigreturn emulation */
7428+ unsigned int ldi1, ldi2, bel, nop;
7429+
7430+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7431+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7432+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7433+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7434+
7435+ if (err)
7436+ break;
7437+
7438+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7439+ ldi2 == 0x3414015AU &&
7440+ bel == 0xE4008200U &&
7441+ nop == 0x08000240U)
7442+ {
7443+ regs->gr[25] = (ldi1 & 2) >> 1;
7444+ regs->gr[20] = __NR_rt_sigreturn;
7445+ regs->gr[31] = regs->iaoq[1] + 16;
7446+ regs->sr[0] = regs->iasq[1];
7447+ regs->iaoq[0] = 0x100UL;
7448+ regs->iaoq[1] = regs->iaoq[0] + 4;
7449+ regs->iasq[0] = regs->sr[2];
7450+ regs->iasq[1] = regs->sr[2];
7451+ return 2;
7452+ }
7453+ } while (0);
7454+#endif
7455+
7456+ return 1;
7457+}
7458+
7459+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7460+{
7461+ unsigned long i;
7462+
7463+ printk(KERN_ERR "PAX: bytes at PC: ");
7464+ for (i = 0; i < 5; i++) {
7465+ unsigned int c;
7466+ if (get_user(c, (unsigned int *)pc+i))
7467+ printk(KERN_CONT "???????? ");
7468+ else
7469+ printk(KERN_CONT "%08x ", c);
7470+ }
7471+ printk("\n");
7472+}
7473+#endif
7474+
7475 int fixup_exception(struct pt_regs *regs)
7476 {
7477 const struct exception_table_entry *fix;
7478@@ -204,8 +315,33 @@ retry:
7479
7480 good_area:
7481
7482- if ((vma->vm_flags & acc_type) != acc_type)
7483+ if ((vma->vm_flags & acc_type) != acc_type) {
7484+
7485+#ifdef CONFIG_PAX_PAGEEXEC
7486+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7487+ (address & ~3UL) == instruction_pointer(regs))
7488+ {
7489+ up_read(&mm->mmap_sem);
7490+ switch (pax_handle_fetch_fault(regs)) {
7491+
7492+#ifdef CONFIG_PAX_EMUPLT
7493+ case 3:
7494+ return;
7495+#endif
7496+
7497+#ifdef CONFIG_PAX_EMUTRAMP
7498+ case 2:
7499+ return;
7500+#endif
7501+
7502+ }
7503+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7504+ do_group_exit(SIGKILL);
7505+ }
7506+#endif
7507+
7508 goto bad_area;
7509+ }
7510
7511 /*
7512 * If for any reason at all we couldn't handle the fault, make
7513diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7514index 38f3b7e..7e485c0 100644
7515--- a/arch/powerpc/Kconfig
7516+++ b/arch/powerpc/Kconfig
7517@@ -378,6 +378,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
7518 config KEXEC
7519 bool "kexec system call"
7520 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7521+ depends on !GRKERNSEC_KMEM
7522 help
7523 kexec is a system call that implements the ability to shutdown your
7524 current kernel, and to start another kernel. It is like a reboot
7525diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7526index e3b1d41..8e81edf 100644
7527--- a/arch/powerpc/include/asm/atomic.h
7528+++ b/arch/powerpc/include/asm/atomic.h
7529@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7530 return t1;
7531 }
7532
7533+#define atomic64_read_unchecked(v) atomic64_read(v)
7534+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7535+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7536+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7537+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7538+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7539+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7540+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7541+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7542+
7543 #endif /* __powerpc64__ */
7544
7545 #endif /* __KERNEL__ */
7546diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7547index 9e495c9..b6878e5 100644
7548--- a/arch/powerpc/include/asm/cache.h
7549+++ b/arch/powerpc/include/asm/cache.h
7550@@ -3,6 +3,7 @@
7551
7552 #ifdef __KERNEL__
7553
7554+#include <linux/const.h>
7555
7556 /* bytes per L1 cache line */
7557 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7558@@ -22,7 +23,7 @@
7559 #define L1_CACHE_SHIFT 7
7560 #endif
7561
7562-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7563+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7564
7565 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7566
7567diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7568index cc0655a..13eac2e 100644
7569--- a/arch/powerpc/include/asm/elf.h
7570+++ b/arch/powerpc/include/asm/elf.h
7571@@ -28,8 +28,19 @@
7572 the loader. We need to make sure that it is out of the way of the program
7573 that it will "exec", and that there is sufficient room for the brk. */
7574
7575-extern unsigned long randomize_et_dyn(unsigned long base);
7576-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7577+#define ELF_ET_DYN_BASE (0x20000000)
7578+
7579+#ifdef CONFIG_PAX_ASLR
7580+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7581+
7582+#ifdef __powerpc64__
7583+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7584+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7585+#else
7586+#define PAX_DELTA_MMAP_LEN 15
7587+#define PAX_DELTA_STACK_LEN 15
7588+#endif
7589+#endif
7590
7591 /*
7592 * Our registers are always unsigned longs, whether we're a 32 bit
7593@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7594 (0x7ff >> (PAGE_SHIFT - 12)) : \
7595 (0x3ffff >> (PAGE_SHIFT - 12)))
7596
7597-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7598-#define arch_randomize_brk arch_randomize_brk
7599-
7600-
7601 #ifdef CONFIG_SPU_BASE
7602 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7603 #define NT_SPU 1
7604diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7605index 8196e9c..d83a9f3 100644
7606--- a/arch/powerpc/include/asm/exec.h
7607+++ b/arch/powerpc/include/asm/exec.h
7608@@ -4,6 +4,6 @@
7609 #ifndef _ASM_POWERPC_EXEC_H
7610 #define _ASM_POWERPC_EXEC_H
7611
7612-extern unsigned long arch_align_stack(unsigned long sp);
7613+#define arch_align_stack(x) ((x) & ~0xfUL)
7614
7615 #endif /* _ASM_POWERPC_EXEC_H */
7616diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7617index 5acabbd..7ea14fa 100644
7618--- a/arch/powerpc/include/asm/kmap_types.h
7619+++ b/arch/powerpc/include/asm/kmap_types.h
7620@@ -10,7 +10,7 @@
7621 * 2 of the License, or (at your option) any later version.
7622 */
7623
7624-#define KM_TYPE_NR 16
7625+#define KM_TYPE_NR 17
7626
7627 #endif /* __KERNEL__ */
7628 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7629diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7630index 8565c25..2865190 100644
7631--- a/arch/powerpc/include/asm/mman.h
7632+++ b/arch/powerpc/include/asm/mman.h
7633@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7634 }
7635 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7636
7637-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7638+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7639 {
7640 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7641 }
7642diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7643index b9f4262..dcf04f7 100644
7644--- a/arch/powerpc/include/asm/page.h
7645+++ b/arch/powerpc/include/asm/page.h
7646@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
7647 * and needs to be executable. This means the whole heap ends
7648 * up being executable.
7649 */
7650-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7651- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7652+#define VM_DATA_DEFAULT_FLAGS32 \
7653+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7654+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7655
7656 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7657 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7658@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
7659 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7660 #endif
7661
7662+#define ktla_ktva(addr) (addr)
7663+#define ktva_ktla(addr) (addr)
7664+
7665 #ifndef CONFIG_PPC_BOOK3S_64
7666 /*
7667 * Use the top bit of the higher-level page table entries to indicate whether
7668diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7669index 88693ce..ac6f9ab 100644
7670--- a/arch/powerpc/include/asm/page_64.h
7671+++ b/arch/powerpc/include/asm/page_64.h
7672@@ -153,15 +153,18 @@ do { \
7673 * stack by default, so in the absence of a PT_GNU_STACK program header
7674 * we turn execute permission off.
7675 */
7676-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7677- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7678+#define VM_STACK_DEFAULT_FLAGS32 \
7679+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7680+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7681
7682 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7683 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7684
7685+#ifndef CONFIG_PAX_PAGEEXEC
7686 #define VM_STACK_DEFAULT_FLAGS \
7687 (is_32bit_task() ? \
7688 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7689+#endif
7690
7691 #include <asm-generic/getorder.h>
7692
7693diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7694index 256d6f8..b0166a7 100644
7695--- a/arch/powerpc/include/asm/pgalloc-64.h
7696+++ b/arch/powerpc/include/asm/pgalloc-64.h
7697@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7698 #ifndef CONFIG_PPC_64K_PAGES
7699
7700 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7701+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7702
7703 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7704 {
7705@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7706 pud_set(pud, (unsigned long)pmd);
7707 }
7708
7709+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7710+{
7711+ pud_populate(mm, pud, pmd);
7712+}
7713+
7714 #define pmd_populate(mm, pmd, pte_page) \
7715 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7716 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7717@@ -169,6 +175,7 @@ extern void __tlb_remove_table(void *_table);
7718 #endif
7719
7720 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7721+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7722
7723 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7724 pte_t *pte)
7725diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7726index 7d6eacf..14c0240 100644
7727--- a/arch/powerpc/include/asm/pgtable.h
7728+++ b/arch/powerpc/include/asm/pgtable.h
7729@@ -2,6 +2,7 @@
7730 #define _ASM_POWERPC_PGTABLE_H
7731 #ifdef __KERNEL__
7732
7733+#include <linux/const.h>
7734 #ifndef __ASSEMBLY__
7735 #include <asm/processor.h> /* For TASK_SIZE */
7736 #include <asm/mmu.h>
7737diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7738index 4aad413..85d86bf 100644
7739--- a/arch/powerpc/include/asm/pte-hash32.h
7740+++ b/arch/powerpc/include/asm/pte-hash32.h
7741@@ -21,6 +21,7 @@
7742 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7743 #define _PAGE_USER 0x004 /* usermode access allowed */
7744 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7745+#define _PAGE_EXEC _PAGE_GUARDED
7746 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7747 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7748 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7749diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7750index 10d1ef0..8f83abc 100644
7751--- a/arch/powerpc/include/asm/reg.h
7752+++ b/arch/powerpc/include/asm/reg.h
7753@@ -234,6 +234,7 @@
7754 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7755 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7756 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7757+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7758 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7759 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7760 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7761diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7762index 98da78e..dc68271 100644
7763--- a/arch/powerpc/include/asm/smp.h
7764+++ b/arch/powerpc/include/asm/smp.h
7765@@ -50,7 +50,7 @@ struct smp_ops_t {
7766 int (*cpu_disable)(void);
7767 void (*cpu_die)(unsigned int nr);
7768 int (*cpu_bootable)(unsigned int nr);
7769-};
7770+} __no_const;
7771
7772 extern void smp_send_debugger_break(void);
7773 extern void start_secondary_resume(void);
7774diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7775index ba7b197..d292e26 100644
7776--- a/arch/powerpc/include/asm/thread_info.h
7777+++ b/arch/powerpc/include/asm/thread_info.h
7778@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
7779 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7780 TIF_NEED_RESCHED */
7781 #define TIF_32BIT 4 /* 32 bit binary */
7782-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7783 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7784 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7785 #define TIF_SINGLESTEP 8 /* singlestepping active */
7786@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
7787 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
7788 for stack store? */
7789 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
7790+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
7791+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7792+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7793
7794 /* as above, but as bit values */
7795 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7796@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
7797 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7798 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7799 #define _TIF_NOHZ (1<<TIF_NOHZ)
7800+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7801 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7802 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7803- _TIF_NOHZ)
7804+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
7805
7806 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7807 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7808diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7809index 9485b43..4718d50 100644
7810--- a/arch/powerpc/include/asm/uaccess.h
7811+++ b/arch/powerpc/include/asm/uaccess.h
7812@@ -318,52 +318,6 @@ do { \
7813 extern unsigned long __copy_tofrom_user(void __user *to,
7814 const void __user *from, unsigned long size);
7815
7816-#ifndef __powerpc64__
7817-
7818-static inline unsigned long copy_from_user(void *to,
7819- const void __user *from, unsigned long n)
7820-{
7821- unsigned long over;
7822-
7823- if (access_ok(VERIFY_READ, from, n))
7824- return __copy_tofrom_user((__force void __user *)to, from, n);
7825- if ((unsigned long)from < TASK_SIZE) {
7826- over = (unsigned long)from + n - TASK_SIZE;
7827- return __copy_tofrom_user((__force void __user *)to, from,
7828- n - over) + over;
7829- }
7830- return n;
7831-}
7832-
7833-static inline unsigned long copy_to_user(void __user *to,
7834- const void *from, unsigned long n)
7835-{
7836- unsigned long over;
7837-
7838- if (access_ok(VERIFY_WRITE, to, n))
7839- return __copy_tofrom_user(to, (__force void __user *)from, n);
7840- if ((unsigned long)to < TASK_SIZE) {
7841- over = (unsigned long)to + n - TASK_SIZE;
7842- return __copy_tofrom_user(to, (__force void __user *)from,
7843- n - over) + over;
7844- }
7845- return n;
7846-}
7847-
7848-#else /* __powerpc64__ */
7849-
7850-#define __copy_in_user(to, from, size) \
7851- __copy_tofrom_user((to), (from), (size))
7852-
7853-extern unsigned long copy_from_user(void *to, const void __user *from,
7854- unsigned long n);
7855-extern unsigned long copy_to_user(void __user *to, const void *from,
7856- unsigned long n);
7857-extern unsigned long copy_in_user(void __user *to, const void __user *from,
7858- unsigned long n);
7859-
7860-#endif /* __powerpc64__ */
7861-
7862 static inline unsigned long __copy_from_user_inatomic(void *to,
7863 const void __user *from, unsigned long n)
7864 {
7865@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7866 if (ret == 0)
7867 return 0;
7868 }
7869+
7870+ if (!__builtin_constant_p(n))
7871+ check_object_size(to, n, false);
7872+
7873 return __copy_tofrom_user((__force void __user *)to, from, n);
7874 }
7875
7876@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7877 if (ret == 0)
7878 return 0;
7879 }
7880+
7881+ if (!__builtin_constant_p(n))
7882+ check_object_size(from, n, true);
7883+
7884 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7885 }
7886
7887@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7888 return __copy_to_user_inatomic(to, from, size);
7889 }
7890
7891+#ifndef __powerpc64__
7892+
7893+static inline unsigned long __must_check copy_from_user(void *to,
7894+ const void __user *from, unsigned long n)
7895+{
7896+ unsigned long over;
7897+
7898+ if ((long)n < 0)
7899+ return n;
7900+
7901+ if (access_ok(VERIFY_READ, from, n)) {
7902+ if (!__builtin_constant_p(n))
7903+ check_object_size(to, n, false);
7904+ return __copy_tofrom_user((__force void __user *)to, from, n);
7905+ }
7906+ if ((unsigned long)from < TASK_SIZE) {
7907+ over = (unsigned long)from + n - TASK_SIZE;
7908+ if (!__builtin_constant_p(n - over))
7909+ check_object_size(to, n - over, false);
7910+ return __copy_tofrom_user((__force void __user *)to, from,
7911+ n - over) + over;
7912+ }
7913+ return n;
7914+}
7915+
7916+static inline unsigned long __must_check copy_to_user(void __user *to,
7917+ const void *from, unsigned long n)
7918+{
7919+ unsigned long over;
7920+
7921+ if ((long)n < 0)
7922+ return n;
7923+
7924+ if (access_ok(VERIFY_WRITE, to, n)) {
7925+ if (!__builtin_constant_p(n))
7926+ check_object_size(from, n, true);
7927+ return __copy_tofrom_user(to, (__force void __user *)from, n);
7928+ }
7929+ if ((unsigned long)to < TASK_SIZE) {
7930+ over = (unsigned long)to + n - TASK_SIZE;
7931+ if (!__builtin_constant_p(n))
7932+ check_object_size(from, n - over, true);
7933+ return __copy_tofrom_user(to, (__force void __user *)from,
7934+ n - over) + over;
7935+ }
7936+ return n;
7937+}
7938+
7939+#else /* __powerpc64__ */
7940+
7941+#define __copy_in_user(to, from, size) \
7942+ __copy_tofrom_user((to), (from), (size))
7943+
7944+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7945+{
7946+ if ((long)n < 0 || n > INT_MAX)
7947+ return n;
7948+
7949+ if (!__builtin_constant_p(n))
7950+ check_object_size(to, n, false);
7951+
7952+ if (likely(access_ok(VERIFY_READ, from, n)))
7953+ n = __copy_from_user(to, from, n);
7954+ else
7955+ memset(to, 0, n);
7956+ return n;
7957+}
7958+
7959+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
7960+{
7961+ if ((long)n < 0 || n > INT_MAX)
7962+ return n;
7963+
7964+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
7965+ if (!__builtin_constant_p(n))
7966+ check_object_size(from, n, true);
7967+ n = __copy_to_user(to, from, n);
7968+ }
7969+ return n;
7970+}
7971+
7972+extern unsigned long copy_in_user(void __user *to, const void __user *from,
7973+ unsigned long n);
7974+
7975+#endif /* __powerpc64__ */
7976+
7977 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7978
7979 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7980diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7981index 2d06704..1616f1b 100644
7982--- a/arch/powerpc/kernel/exceptions-64e.S
7983+++ b/arch/powerpc/kernel/exceptions-64e.S
7984@@ -757,6 +757,7 @@ storage_fault_common:
7985 std r14,_DAR(r1)
7986 std r15,_DSISR(r1)
7987 addi r3,r1,STACK_FRAME_OVERHEAD
7988+ bl .save_nvgprs
7989 mr r4,r14
7990 mr r5,r15
7991 ld r14,PACA_EXGEN+EX_R14(r13)
7992@@ -765,8 +766,7 @@ storage_fault_common:
7993 cmpdi r3,0
7994 bne- 1f
7995 b .ret_from_except_lite
7996-1: bl .save_nvgprs
7997- mr r5,r3
7998+1: mr r5,r3
7999 addi r3,r1,STACK_FRAME_OVERHEAD
8000 ld r4,_DAR(r1)
8001 bl .bad_page_fault
8002diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8003index 3a9ed6a..b534681 100644
8004--- a/arch/powerpc/kernel/exceptions-64s.S
8005+++ b/arch/powerpc/kernel/exceptions-64s.S
8006@@ -1364,10 +1364,10 @@ handle_page_fault:
8007 11: ld r4,_DAR(r1)
8008 ld r5,_DSISR(r1)
8009 addi r3,r1,STACK_FRAME_OVERHEAD
8010+ bl .save_nvgprs
8011 bl .do_page_fault
8012 cmpdi r3,0
8013 beq+ 12f
8014- bl .save_nvgprs
8015 mr r5,r3
8016 addi r3,r1,STACK_FRAME_OVERHEAD
8017 lwz r4,_DAR(r1)
8018diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8019index 2e3200c..72095ce 100644
8020--- a/arch/powerpc/kernel/module_32.c
8021+++ b/arch/powerpc/kernel/module_32.c
8022@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8023 me->arch.core_plt_section = i;
8024 }
8025 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8026- printk("Module doesn't contain .plt or .init.plt sections.\n");
8027+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8028 return -ENOEXEC;
8029 }
8030
8031@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
8032
8033 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8034 /* Init, or core PLT? */
8035- if (location >= mod->module_core
8036- && location < mod->module_core + mod->core_size)
8037+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8038+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8039 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8040- else
8041+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8042+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8043 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8044+ else {
8045+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8046+ return ~0UL;
8047+ }
8048
8049 /* Find this entry, or if that fails, the next avail. entry */
8050 while (entry->jump[0]) {
8051diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8052index 96d2fdf..f6d10c8 100644
8053--- a/arch/powerpc/kernel/process.c
8054+++ b/arch/powerpc/kernel/process.c
8055@@ -886,8 +886,8 @@ void show_regs(struct pt_regs * regs)
8056 * Lookup NIP late so we have the best change of getting the
8057 * above info out without failing
8058 */
8059- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8060- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8061+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8062+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8063 #endif
8064 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
8065 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
8066@@ -1352,10 +1352,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8067 newsp = stack[0];
8068 ip = stack[STACK_FRAME_LR_SAVE];
8069 if (!firstframe || ip != lr) {
8070- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8071+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8072 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8073 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8074- printk(" (%pS)",
8075+ printk(" (%pA)",
8076 (void *)current->ret_stack[curr_frame].ret);
8077 curr_frame--;
8078 }
8079@@ -1375,7 +1375,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8080 struct pt_regs *regs = (struct pt_regs *)
8081 (sp + STACK_FRAME_OVERHEAD);
8082 lr = regs->link;
8083- printk("--- Exception: %lx at %pS\n LR = %pS\n",
8084+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
8085 regs->trap, (void *)regs->nip, (void *)lr);
8086 firstframe = 1;
8087 }
8088@@ -1411,58 +1411,3 @@ void notrace __ppc64_runlatch_off(void)
8089 mtspr(SPRN_CTRLT, ctrl);
8090 }
8091 #endif /* CONFIG_PPC64 */
8092-
8093-unsigned long arch_align_stack(unsigned long sp)
8094-{
8095- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8096- sp -= get_random_int() & ~PAGE_MASK;
8097- return sp & ~0xf;
8098-}
8099-
8100-static inline unsigned long brk_rnd(void)
8101-{
8102- unsigned long rnd = 0;
8103-
8104- /* 8MB for 32bit, 1GB for 64bit */
8105- if (is_32bit_task())
8106- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8107- else
8108- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8109-
8110- return rnd << PAGE_SHIFT;
8111-}
8112-
8113-unsigned long arch_randomize_brk(struct mm_struct *mm)
8114-{
8115- unsigned long base = mm->brk;
8116- unsigned long ret;
8117-
8118-#ifdef CONFIG_PPC_STD_MMU_64
8119- /*
8120- * If we are using 1TB segments and we are allowed to randomise
8121- * the heap, we can put it above 1TB so it is backed by a 1TB
8122- * segment. Otherwise the heap will be in the bottom 1TB
8123- * which always uses 256MB segments and this may result in a
8124- * performance penalty.
8125- */
8126- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8127- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8128-#endif
8129-
8130- ret = PAGE_ALIGN(base + brk_rnd());
8131-
8132- if (ret < mm->brk)
8133- return mm->brk;
8134-
8135- return ret;
8136-}
8137-
8138-unsigned long randomize_et_dyn(unsigned long base)
8139-{
8140- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8141-
8142- if (ret < base)
8143- return base;
8144-
8145- return ret;
8146-}
8147diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8148index 9a0d24c..e7fbedf 100644
8149--- a/arch/powerpc/kernel/ptrace.c
8150+++ b/arch/powerpc/kernel/ptrace.c
8151@@ -1761,6 +1761,10 @@ long arch_ptrace(struct task_struct *child, long request,
8152 return ret;
8153 }
8154
8155+#ifdef CONFIG_GRKERNSEC_SETXID
8156+extern void gr_delayed_cred_worker(void);
8157+#endif
8158+
8159 /*
8160 * We must return the syscall number to actually look up in the table.
8161 * This can be -1L to skip running any syscall at all.
8162@@ -1773,6 +1777,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8163
8164 secure_computing_strict(regs->gpr[0]);
8165
8166+#ifdef CONFIG_GRKERNSEC_SETXID
8167+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8168+ gr_delayed_cred_worker();
8169+#endif
8170+
8171 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8172 tracehook_report_syscall_entry(regs))
8173 /*
8174@@ -1807,6 +1816,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8175 {
8176 int step;
8177
8178+#ifdef CONFIG_GRKERNSEC_SETXID
8179+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8180+ gr_delayed_cred_worker();
8181+#endif
8182+
8183 audit_syscall_exit(regs);
8184
8185 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8186diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8187index fea2dba..a779f6b 100644
8188--- a/arch/powerpc/kernel/signal_32.c
8189+++ b/arch/powerpc/kernel/signal_32.c
8190@@ -1002,7 +1002,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8191 /* Save user registers on the stack */
8192 frame = &rt_sf->uc.uc_mcontext;
8193 addr = frame;
8194- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8195+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8196 sigret = 0;
8197 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8198 } else {
8199diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8200index 1e7ba88..17afb1b 100644
8201--- a/arch/powerpc/kernel/signal_64.c
8202+++ b/arch/powerpc/kernel/signal_64.c
8203@@ -763,7 +763,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8204 #endif
8205
8206 /* Set up to return from userspace. */
8207- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8208+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8209 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8210 } else {
8211 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8212diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8213index f783c93..619baf1 100644
8214--- a/arch/powerpc/kernel/traps.c
8215+++ b/arch/powerpc/kernel/traps.c
8216@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8217 return flags;
8218 }
8219
8220+extern void gr_handle_kernel_exploit(void);
8221+
8222 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8223 int signr)
8224 {
8225@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8226 panic("Fatal exception in interrupt");
8227 if (panic_on_oops)
8228 panic("Fatal exception");
8229+
8230+ gr_handle_kernel_exploit();
8231+
8232 do_exit(signr);
8233 }
8234
8235diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8236index 1d9c926..25f4741 100644
8237--- a/arch/powerpc/kernel/vdso.c
8238+++ b/arch/powerpc/kernel/vdso.c
8239@@ -34,6 +34,7 @@
8240 #include <asm/firmware.h>
8241 #include <asm/vdso.h>
8242 #include <asm/vdso_datapage.h>
8243+#include <asm/mman.h>
8244
8245 #include "setup.h"
8246
8247@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8248 vdso_base = VDSO32_MBASE;
8249 #endif
8250
8251- current->mm->context.vdso_base = 0;
8252+ current->mm->context.vdso_base = ~0UL;
8253
8254 /* vDSO has a problem and was disabled, just don't "enable" it for the
8255 * process
8256@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8257 vdso_base = get_unmapped_area(NULL, vdso_base,
8258 (vdso_pages << PAGE_SHIFT) +
8259 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8260- 0, 0);
8261+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8262 if (IS_ERR_VALUE(vdso_base)) {
8263 rc = vdso_base;
8264 goto fail_mmapsem;
8265diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8266index 5eea6f3..5d10396 100644
8267--- a/arch/powerpc/lib/usercopy_64.c
8268+++ b/arch/powerpc/lib/usercopy_64.c
8269@@ -9,22 +9,6 @@
8270 #include <linux/module.h>
8271 #include <asm/uaccess.h>
8272
8273-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8274-{
8275- if (likely(access_ok(VERIFY_READ, from, n)))
8276- n = __copy_from_user(to, from, n);
8277- else
8278- memset(to, 0, n);
8279- return n;
8280-}
8281-
8282-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8283-{
8284- if (likely(access_ok(VERIFY_WRITE, to, n)))
8285- n = __copy_to_user(to, from, n);
8286- return n;
8287-}
8288-
8289 unsigned long copy_in_user(void __user *to, const void __user *from,
8290 unsigned long n)
8291 {
8292@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8293 return n;
8294 }
8295
8296-EXPORT_SYMBOL(copy_from_user);
8297-EXPORT_SYMBOL(copy_to_user);
8298 EXPORT_SYMBOL(copy_in_user);
8299
8300diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8301index 51ab9e7..7d3c78b 100644
8302--- a/arch/powerpc/mm/fault.c
8303+++ b/arch/powerpc/mm/fault.c
8304@@ -33,6 +33,10 @@
8305 #include <linux/magic.h>
8306 #include <linux/ratelimit.h>
8307 #include <linux/context_tracking.h>
8308+#include <linux/slab.h>
8309+#include <linux/pagemap.h>
8310+#include <linux/compiler.h>
8311+#include <linux/unistd.h>
8312
8313 #include <asm/firmware.h>
8314 #include <asm/page.h>
8315@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8316 }
8317 #endif
8318
8319+#ifdef CONFIG_PAX_PAGEEXEC
8320+/*
8321+ * PaX: decide what to do with offenders (regs->nip = fault address)
8322+ *
8323+ * returns 1 when task should be killed
8324+ */
8325+static int pax_handle_fetch_fault(struct pt_regs *regs)
8326+{
8327+ return 1;
8328+}
8329+
8330+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8331+{
8332+ unsigned long i;
8333+
8334+ printk(KERN_ERR "PAX: bytes at PC: ");
8335+ for (i = 0; i < 5; i++) {
8336+ unsigned int c;
8337+ if (get_user(c, (unsigned int __user *)pc+i))
8338+ printk(KERN_CONT "???????? ");
8339+ else
8340+ printk(KERN_CONT "%08x ", c);
8341+ }
8342+ printk("\n");
8343+}
8344+#endif
8345+
8346 /*
8347 * Check whether the instruction at regs->nip is a store using
8348 * an update addressing form which will update r1.
8349@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8350 * indicate errors in DSISR but can validly be set in SRR1.
8351 */
8352 if (trap == 0x400)
8353- error_code &= 0x48200000;
8354+ error_code &= 0x58200000;
8355 else
8356 is_write = error_code & DSISR_ISSTORE;
8357 #else
8358@@ -378,7 +409,7 @@ good_area:
8359 * "undefined". Of those that can be set, this is the only
8360 * one which seems bad.
8361 */
8362- if (error_code & 0x10000000)
8363+ if (error_code & DSISR_GUARDED)
8364 /* Guarded storage error. */
8365 goto bad_area;
8366 #endif /* CONFIG_8xx */
8367@@ -393,7 +424,7 @@ good_area:
8368 * processors use the same I/D cache coherency mechanism
8369 * as embedded.
8370 */
8371- if (error_code & DSISR_PROTFAULT)
8372+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8373 goto bad_area;
8374 #endif /* CONFIG_PPC_STD_MMU */
8375
8376@@ -483,6 +514,23 @@ bad_area:
8377 bad_area_nosemaphore:
8378 /* User mode accesses cause a SIGSEGV */
8379 if (user_mode(regs)) {
8380+
8381+#ifdef CONFIG_PAX_PAGEEXEC
8382+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8383+#ifdef CONFIG_PPC_STD_MMU
8384+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8385+#else
8386+ if (is_exec && regs->nip == address) {
8387+#endif
8388+ switch (pax_handle_fetch_fault(regs)) {
8389+ }
8390+
8391+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8392+ do_group_exit(SIGKILL);
8393+ }
8394+ }
8395+#endif
8396+
8397 _exception(SIGSEGV, regs, code, address);
8398 goto bail;
8399 }
8400diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8401index cb8bdbe..d770680 100644
8402--- a/arch/powerpc/mm/mmap.c
8403+++ b/arch/powerpc/mm/mmap.c
8404@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8405 {
8406 unsigned long rnd = 0;
8407
8408+#ifdef CONFIG_PAX_RANDMMAP
8409+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8410+#endif
8411+
8412 if (current->flags & PF_RANDOMIZE) {
8413 /* 8MB for 32bit, 1GB for 64bit */
8414 if (is_32bit_task())
8415@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8416 */
8417 if (mmap_is_legacy()) {
8418 mm->mmap_base = TASK_UNMAPPED_BASE;
8419+
8420+#ifdef CONFIG_PAX_RANDMMAP
8421+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8422+ mm->mmap_base += mm->delta_mmap;
8423+#endif
8424+
8425 mm->get_unmapped_area = arch_get_unmapped_area;
8426 } else {
8427 mm->mmap_base = mmap_base();
8428+
8429+#ifdef CONFIG_PAX_RANDMMAP
8430+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8431+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8432+#endif
8433+
8434 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8435 }
8436 }
8437diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8438index 7ce9cf3..a964087 100644
8439--- a/arch/powerpc/mm/slice.c
8440+++ b/arch/powerpc/mm/slice.c
8441@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8442 if ((mm->task_size - len) < addr)
8443 return 0;
8444 vma = find_vma(mm, addr);
8445- return (!vma || (addr + len) <= vma->vm_start);
8446+ return check_heap_stack_gap(vma, addr, len, 0);
8447 }
8448
8449 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8450@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8451 info.align_offset = 0;
8452
8453 addr = TASK_UNMAPPED_BASE;
8454+
8455+#ifdef CONFIG_PAX_RANDMMAP
8456+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8457+ addr += mm->delta_mmap;
8458+#endif
8459+
8460 while (addr < TASK_SIZE) {
8461 info.low_limit = addr;
8462 if (!slice_scan_available(addr, available, 1, &addr))
8463@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8464 if (fixed && addr > (mm->task_size - len))
8465 return -EINVAL;
8466
8467+#ifdef CONFIG_PAX_RANDMMAP
8468+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8469+ addr = 0;
8470+#endif
8471+
8472 /* If hint, make sure it matches our alignment restrictions */
8473 if (!fixed && addr) {
8474 addr = _ALIGN_UP(addr, 1ul << pshift);
8475diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8476index 9098692..3d54cd1 100644
8477--- a/arch/powerpc/platforms/cell/spufs/file.c
8478+++ b/arch/powerpc/platforms/cell/spufs/file.c
8479@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8480 return VM_FAULT_NOPAGE;
8481 }
8482
8483-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8484+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8485 unsigned long address,
8486- void *buf, int len, int write)
8487+ void *buf, size_t len, int write)
8488 {
8489 struct spu_context *ctx = vma->vm_file->private_data;
8490 unsigned long offset = address - vma->vm_start;
8491diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8492index c797832..ce575c8 100644
8493--- a/arch/s390/include/asm/atomic.h
8494+++ b/arch/s390/include/asm/atomic.h
8495@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8496 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8497 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8498
8499+#define atomic64_read_unchecked(v) atomic64_read(v)
8500+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8501+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8502+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8503+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8504+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8505+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8506+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8507+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8508+
8509 #define smp_mb__before_atomic_dec() smp_mb()
8510 #define smp_mb__after_atomic_dec() smp_mb()
8511 #define smp_mb__before_atomic_inc() smp_mb()
8512diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8513index 4d7ccac..d03d0ad 100644
8514--- a/arch/s390/include/asm/cache.h
8515+++ b/arch/s390/include/asm/cache.h
8516@@ -9,8 +9,10 @@
8517 #ifndef __ARCH_S390_CACHE_H
8518 #define __ARCH_S390_CACHE_H
8519
8520-#define L1_CACHE_BYTES 256
8521+#include <linux/const.h>
8522+
8523 #define L1_CACHE_SHIFT 8
8524+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8525 #define NET_SKB_PAD 32
8526
8527 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8528diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8529index 78f4f87..598ce39 100644
8530--- a/arch/s390/include/asm/elf.h
8531+++ b/arch/s390/include/asm/elf.h
8532@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8533 the loader. We need to make sure that it is out of the way of the program
8534 that it will "exec", and that there is sufficient room for the brk. */
8535
8536-extern unsigned long randomize_et_dyn(unsigned long base);
8537-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8538+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8539+
8540+#ifdef CONFIG_PAX_ASLR
8541+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8542+
8543+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8544+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8545+#endif
8546
8547 /* This yields a mask that user programs can use to figure out what
8548 instruction set this CPU supports. */
8549@@ -222,9 +228,6 @@ struct linux_binprm;
8550 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8551 int arch_setup_additional_pages(struct linux_binprm *, int);
8552
8553-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8554-#define arch_randomize_brk arch_randomize_brk
8555-
8556 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8557
8558 #endif
8559diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8560index c4a93d6..4d2a9b4 100644
8561--- a/arch/s390/include/asm/exec.h
8562+++ b/arch/s390/include/asm/exec.h
8563@@ -7,6 +7,6 @@
8564 #ifndef __ASM_EXEC_H
8565 #define __ASM_EXEC_H
8566
8567-extern unsigned long arch_align_stack(unsigned long sp);
8568+#define arch_align_stack(x) ((x) & ~0xfUL)
8569
8570 #endif /* __ASM_EXEC_H */
8571diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8572index 9c33ed4..e40cbef 100644
8573--- a/arch/s390/include/asm/uaccess.h
8574+++ b/arch/s390/include/asm/uaccess.h
8575@@ -252,6 +252,10 @@ static inline unsigned long __must_check
8576 copy_to_user(void __user *to, const void *from, unsigned long n)
8577 {
8578 might_fault();
8579+
8580+ if ((long)n < 0)
8581+ return n;
8582+
8583 return __copy_to_user(to, from, n);
8584 }
8585
8586@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8587 static inline unsigned long __must_check
8588 __copy_from_user(void *to, const void __user *from, unsigned long n)
8589 {
8590+ if ((long)n < 0)
8591+ return n;
8592+
8593 if (__builtin_constant_p(n) && (n <= 256))
8594 return uaccess.copy_from_user_small(n, from, to);
8595 else
8596@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8597 static inline unsigned long __must_check
8598 copy_from_user(void *to, const void __user *from, unsigned long n)
8599 {
8600- unsigned int sz = __compiletime_object_size(to);
8601+ size_t sz = __compiletime_object_size(to);
8602
8603 might_fault();
8604- if (unlikely(sz != -1 && sz < n)) {
8605+
8606+ if ((long)n < 0)
8607+ return n;
8608+
8609+ if (unlikely(sz != (size_t)-1 && sz < n)) {
8610 copy_from_user_overflow();
8611 return n;
8612 }
8613diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8614index 7845e15..59c4353 100644
8615--- a/arch/s390/kernel/module.c
8616+++ b/arch/s390/kernel/module.c
8617@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8618
8619 /* Increase core size by size of got & plt and set start
8620 offsets for got and plt. */
8621- me->core_size = ALIGN(me->core_size, 4);
8622- me->arch.got_offset = me->core_size;
8623- me->core_size += me->arch.got_size;
8624- me->arch.plt_offset = me->core_size;
8625- me->core_size += me->arch.plt_size;
8626+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
8627+ me->arch.got_offset = me->core_size_rw;
8628+ me->core_size_rw += me->arch.got_size;
8629+ me->arch.plt_offset = me->core_size_rx;
8630+ me->core_size_rx += me->arch.plt_size;
8631 return 0;
8632 }
8633
8634@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8635 if (info->got_initialized == 0) {
8636 Elf_Addr *gotent;
8637
8638- gotent = me->module_core + me->arch.got_offset +
8639+ gotent = me->module_core_rw + me->arch.got_offset +
8640 info->got_offset;
8641 *gotent = val;
8642 info->got_initialized = 1;
8643@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8644 rc = apply_rela_bits(loc, val, 0, 64, 0);
8645 else if (r_type == R_390_GOTENT ||
8646 r_type == R_390_GOTPLTENT) {
8647- val += (Elf_Addr) me->module_core - loc;
8648+ val += (Elf_Addr) me->module_core_rw - loc;
8649 rc = apply_rela_bits(loc, val, 1, 32, 1);
8650 }
8651 break;
8652@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8653 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8654 if (info->plt_initialized == 0) {
8655 unsigned int *ip;
8656- ip = me->module_core + me->arch.plt_offset +
8657+ ip = me->module_core_rx + me->arch.plt_offset +
8658 info->plt_offset;
8659 #ifndef CONFIG_64BIT
8660 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8661@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8662 val - loc + 0xffffUL < 0x1ffffeUL) ||
8663 (r_type == R_390_PLT32DBL &&
8664 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8665- val = (Elf_Addr) me->module_core +
8666+ val = (Elf_Addr) me->module_core_rx +
8667 me->arch.plt_offset +
8668 info->plt_offset;
8669 val += rela->r_addend - loc;
8670@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8671 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8672 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8673 val = val + rela->r_addend -
8674- ((Elf_Addr) me->module_core + me->arch.got_offset);
8675+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8676 if (r_type == R_390_GOTOFF16)
8677 rc = apply_rela_bits(loc, val, 0, 16, 0);
8678 else if (r_type == R_390_GOTOFF32)
8679@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8680 break;
8681 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8682 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8683- val = (Elf_Addr) me->module_core + me->arch.got_offset +
8684+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8685 rela->r_addend - loc;
8686 if (r_type == R_390_GOTPC)
8687 rc = apply_rela_bits(loc, val, 1, 32, 0);
8688diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8689index c5dbb33..b41f4ee 100644
8690--- a/arch/s390/kernel/process.c
8691+++ b/arch/s390/kernel/process.c
8692@@ -237,39 +237,3 @@ unsigned long get_wchan(struct task_struct *p)
8693 }
8694 return 0;
8695 }
8696-
8697-unsigned long arch_align_stack(unsigned long sp)
8698-{
8699- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8700- sp -= get_random_int() & ~PAGE_MASK;
8701- return sp & ~0xf;
8702-}
8703-
8704-static inline unsigned long brk_rnd(void)
8705-{
8706- /* 8MB for 32bit, 1GB for 64bit */
8707- if (is_32bit_task())
8708- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8709- else
8710- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8711-}
8712-
8713-unsigned long arch_randomize_brk(struct mm_struct *mm)
8714-{
8715- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8716-
8717- if (ret < mm->brk)
8718- return mm->brk;
8719- return ret;
8720-}
8721-
8722-unsigned long randomize_et_dyn(unsigned long base)
8723-{
8724- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8725-
8726- if (!(current->flags & PF_RANDOMIZE))
8727- return base;
8728- if (ret < base)
8729- return base;
8730- return ret;
8731-}
8732diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8733index 4002329..99b67cb 100644
8734--- a/arch/s390/mm/mmap.c
8735+++ b/arch/s390/mm/mmap.c
8736@@ -90,9 +90,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8737 */
8738 if (mmap_is_legacy()) {
8739 mm->mmap_base = TASK_UNMAPPED_BASE;
8740+
8741+#ifdef CONFIG_PAX_RANDMMAP
8742+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8743+ mm->mmap_base += mm->delta_mmap;
8744+#endif
8745+
8746 mm->get_unmapped_area = arch_get_unmapped_area;
8747 } else {
8748 mm->mmap_base = mmap_base();
8749+
8750+#ifdef CONFIG_PAX_RANDMMAP
8751+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8752+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8753+#endif
8754+
8755 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8756 }
8757 }
8758@@ -173,9 +185,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8759 */
8760 if (mmap_is_legacy()) {
8761 mm->mmap_base = TASK_UNMAPPED_BASE;
8762+
8763+#ifdef CONFIG_PAX_RANDMMAP
8764+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8765+ mm->mmap_base += mm->delta_mmap;
8766+#endif
8767+
8768 mm->get_unmapped_area = s390_get_unmapped_area;
8769 } else {
8770 mm->mmap_base = mmap_base();
8771+
8772+#ifdef CONFIG_PAX_RANDMMAP
8773+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8774+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8775+#endif
8776+
8777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8778 }
8779 }
8780diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8781index ae3d59f..f65f075 100644
8782--- a/arch/score/include/asm/cache.h
8783+++ b/arch/score/include/asm/cache.h
8784@@ -1,7 +1,9 @@
8785 #ifndef _ASM_SCORE_CACHE_H
8786 #define _ASM_SCORE_CACHE_H
8787
8788+#include <linux/const.h>
8789+
8790 #define L1_CACHE_SHIFT 4
8791-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8792+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8793
8794 #endif /* _ASM_SCORE_CACHE_H */
8795diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8796index f9f3cd5..58ff438 100644
8797--- a/arch/score/include/asm/exec.h
8798+++ b/arch/score/include/asm/exec.h
8799@@ -1,6 +1,6 @@
8800 #ifndef _ASM_SCORE_EXEC_H
8801 #define _ASM_SCORE_EXEC_H
8802
8803-extern unsigned long arch_align_stack(unsigned long sp);
8804+#define arch_align_stack(x) (x)
8805
8806 #endif /* _ASM_SCORE_EXEC_H */
8807diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8808index a1519ad3..e8ac1ff 100644
8809--- a/arch/score/kernel/process.c
8810+++ b/arch/score/kernel/process.c
8811@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8812
8813 return task_pt_regs(task)->cp0_epc;
8814 }
8815-
8816-unsigned long arch_align_stack(unsigned long sp)
8817-{
8818- return sp;
8819-}
8820diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8821index ef9e555..331bd29 100644
8822--- a/arch/sh/include/asm/cache.h
8823+++ b/arch/sh/include/asm/cache.h
8824@@ -9,10 +9,11 @@
8825 #define __ASM_SH_CACHE_H
8826 #ifdef __KERNEL__
8827
8828+#include <linux/const.h>
8829 #include <linux/init.h>
8830 #include <cpu/cache.h>
8831
8832-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8833+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8834
8835 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8836
8837diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8838index 6777177..cb5e44f 100644
8839--- a/arch/sh/mm/mmap.c
8840+++ b/arch/sh/mm/mmap.c
8841@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8842 struct mm_struct *mm = current->mm;
8843 struct vm_area_struct *vma;
8844 int do_colour_align;
8845+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8846 struct vm_unmapped_area_info info;
8847
8848 if (flags & MAP_FIXED) {
8849@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8850 if (filp || (flags & MAP_SHARED))
8851 do_colour_align = 1;
8852
8853+#ifdef CONFIG_PAX_RANDMMAP
8854+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8855+#endif
8856+
8857 if (addr) {
8858 if (do_colour_align)
8859 addr = COLOUR_ALIGN(addr, pgoff);
8860@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8861 addr = PAGE_ALIGN(addr);
8862
8863 vma = find_vma(mm, addr);
8864- if (TASK_SIZE - len >= addr &&
8865- (!vma || addr + len <= vma->vm_start))
8866+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8867 return addr;
8868 }
8869
8870 info.flags = 0;
8871 info.length = len;
8872- info.low_limit = TASK_UNMAPPED_BASE;
8873+ info.low_limit = mm->mmap_base;
8874 info.high_limit = TASK_SIZE;
8875 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8876 info.align_offset = pgoff << PAGE_SHIFT;
8877@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8878 struct mm_struct *mm = current->mm;
8879 unsigned long addr = addr0;
8880 int do_colour_align;
8881+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8882 struct vm_unmapped_area_info info;
8883
8884 if (flags & MAP_FIXED) {
8885@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8886 if (filp || (flags & MAP_SHARED))
8887 do_colour_align = 1;
8888
8889+#ifdef CONFIG_PAX_RANDMMAP
8890+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8891+#endif
8892+
8893 /* requesting a specific address */
8894 if (addr) {
8895 if (do_colour_align)
8896@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8897 addr = PAGE_ALIGN(addr);
8898
8899 vma = find_vma(mm, addr);
8900- if (TASK_SIZE - len >= addr &&
8901- (!vma || addr + len <= vma->vm_start))
8902+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8903 return addr;
8904 }
8905
8906@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8907 VM_BUG_ON(addr != -ENOMEM);
8908 info.flags = 0;
8909 info.low_limit = TASK_UNMAPPED_BASE;
8910+
8911+#ifdef CONFIG_PAX_RANDMMAP
8912+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8913+ info.low_limit += mm->delta_mmap;
8914+#endif
8915+
8916 info.high_limit = TASK_SIZE;
8917 addr = vm_unmapped_area(&info);
8918 }
8919diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8920index be56a24..443328f 100644
8921--- a/arch/sparc/include/asm/atomic_64.h
8922+++ b/arch/sparc/include/asm/atomic_64.h
8923@@ -14,18 +14,40 @@
8924 #define ATOMIC64_INIT(i) { (i) }
8925
8926 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8927+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8928+{
8929+ return v->counter;
8930+}
8931 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8932+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8933+{
8934+ return v->counter;
8935+}
8936
8937 #define atomic_set(v, i) (((v)->counter) = i)
8938+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8939+{
8940+ v->counter = i;
8941+}
8942 #define atomic64_set(v, i) (((v)->counter) = i)
8943+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8944+{
8945+ v->counter = i;
8946+}
8947
8948 extern void atomic_add(int, atomic_t *);
8949+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8950 extern void atomic64_add(long, atomic64_t *);
8951+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8952 extern void atomic_sub(int, atomic_t *);
8953+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8954 extern void atomic64_sub(long, atomic64_t *);
8955+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8956
8957 extern int atomic_add_ret(int, atomic_t *);
8958+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8959 extern long atomic64_add_ret(long, atomic64_t *);
8960+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8961 extern int atomic_sub_ret(int, atomic_t *);
8962 extern long atomic64_sub_ret(long, atomic64_t *);
8963
8964@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8965 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8966
8967 #define atomic_inc_return(v) atomic_add_ret(1, v)
8968+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8969+{
8970+ return atomic_add_ret_unchecked(1, v);
8971+}
8972 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8973+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8974+{
8975+ return atomic64_add_ret_unchecked(1, v);
8976+}
8977
8978 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8979 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8980
8981 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8982+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8983+{
8984+ return atomic_add_ret_unchecked(i, v);
8985+}
8986 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8987+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8988+{
8989+ return atomic64_add_ret_unchecked(i, v);
8990+}
8991
8992 /*
8993 * atomic_inc_and_test - increment and test
8994@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8995 * other cases.
8996 */
8997 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8998+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8999+{
9000+ return atomic_inc_return_unchecked(v) == 0;
9001+}
9002 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9003
9004 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9005@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9006 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9007
9008 #define atomic_inc(v) atomic_add(1, v)
9009+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9010+{
9011+ atomic_add_unchecked(1, v);
9012+}
9013 #define atomic64_inc(v) atomic64_add(1, v)
9014+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9015+{
9016+ atomic64_add_unchecked(1, v);
9017+}
9018
9019 #define atomic_dec(v) atomic_sub(1, v)
9020+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9021+{
9022+ atomic_sub_unchecked(1, v);
9023+}
9024 #define atomic64_dec(v) atomic64_sub(1, v)
9025+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9026+{
9027+ atomic64_sub_unchecked(1, v);
9028+}
9029
9030 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9031 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9032
9033 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9034+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9035+{
9036+ return cmpxchg(&v->counter, old, new);
9037+}
9038 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9039+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9040+{
9041+ return xchg(&v->counter, new);
9042+}
9043
9044 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9045 {
9046- int c, old;
9047+ int c, old, new;
9048 c = atomic_read(v);
9049 for (;;) {
9050- if (unlikely(c == (u)))
9051+ if (unlikely(c == u))
9052 break;
9053- old = atomic_cmpxchg((v), c, c + (a));
9054+
9055+ asm volatile("addcc %2, %0, %0\n"
9056+
9057+#ifdef CONFIG_PAX_REFCOUNT
9058+ "tvs %%icc, 6\n"
9059+#endif
9060+
9061+ : "=r" (new)
9062+ : "0" (c), "ir" (a)
9063+ : "cc");
9064+
9065+ old = atomic_cmpxchg(v, c, new);
9066 if (likely(old == c))
9067 break;
9068 c = old;
9069@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9070 #define atomic64_cmpxchg(v, o, n) \
9071 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9072 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9073+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9074+{
9075+ return xchg(&v->counter, new);
9076+}
9077
9078 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9079 {
9080- long c, old;
9081+ long c, old, new;
9082 c = atomic64_read(v);
9083 for (;;) {
9084- if (unlikely(c == (u)))
9085+ if (unlikely(c == u))
9086 break;
9087- old = atomic64_cmpxchg((v), c, c + (a));
9088+
9089+ asm volatile("addcc %2, %0, %0\n"
9090+
9091+#ifdef CONFIG_PAX_REFCOUNT
9092+ "tvs %%xcc, 6\n"
9093+#endif
9094+
9095+ : "=r" (new)
9096+ : "0" (c), "ir" (a)
9097+ : "cc");
9098+
9099+ old = atomic64_cmpxchg(v, c, new);
9100 if (likely(old == c))
9101 break;
9102 c = old;
9103 }
9104- return c != (u);
9105+ return c != u;
9106 }
9107
9108 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9109diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9110index 5bb6991..5c2132e 100644
9111--- a/arch/sparc/include/asm/cache.h
9112+++ b/arch/sparc/include/asm/cache.h
9113@@ -7,10 +7,12 @@
9114 #ifndef _SPARC_CACHE_H
9115 #define _SPARC_CACHE_H
9116
9117+#include <linux/const.h>
9118+
9119 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9120
9121 #define L1_CACHE_SHIFT 5
9122-#define L1_CACHE_BYTES 32
9123+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9124
9125 #ifdef CONFIG_SPARC32
9126 #define SMP_CACHE_BYTES_SHIFT 5
9127diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9128index a24e41f..47677ff 100644
9129--- a/arch/sparc/include/asm/elf_32.h
9130+++ b/arch/sparc/include/asm/elf_32.h
9131@@ -114,6 +114,13 @@ typedef struct {
9132
9133 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9134
9135+#ifdef CONFIG_PAX_ASLR
9136+#define PAX_ELF_ET_DYN_BASE 0x10000UL
9137+
9138+#define PAX_DELTA_MMAP_LEN 16
9139+#define PAX_DELTA_STACK_LEN 16
9140+#endif
9141+
9142 /* This yields a mask that user programs can use to figure out what
9143 instruction set this cpu supports. This can NOT be done in userspace
9144 on Sparc. */
9145diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9146index 370ca1e..d4f4a98 100644
9147--- a/arch/sparc/include/asm/elf_64.h
9148+++ b/arch/sparc/include/asm/elf_64.h
9149@@ -189,6 +189,13 @@ typedef struct {
9150 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9151 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9152
9153+#ifdef CONFIG_PAX_ASLR
9154+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9155+
9156+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9157+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9158+#endif
9159+
9160 extern unsigned long sparc64_elf_hwcap;
9161 #define ELF_HWCAP sparc64_elf_hwcap
9162
9163diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9164index 9b1c36d..209298b 100644
9165--- a/arch/sparc/include/asm/pgalloc_32.h
9166+++ b/arch/sparc/include/asm/pgalloc_32.h
9167@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9168 }
9169
9170 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9171+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9172
9173 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9174 unsigned long address)
9175diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9176index bcfe063..b333142 100644
9177--- a/arch/sparc/include/asm/pgalloc_64.h
9178+++ b/arch/sparc/include/asm/pgalloc_64.h
9179@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9180 }
9181
9182 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9183+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9184
9185 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9186 {
9187diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9188index 502f632..da1917f 100644
9189--- a/arch/sparc/include/asm/pgtable_32.h
9190+++ b/arch/sparc/include/asm/pgtable_32.h
9191@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9192 #define PAGE_SHARED SRMMU_PAGE_SHARED
9193 #define PAGE_COPY SRMMU_PAGE_COPY
9194 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9195+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9196+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9197+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9198 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9199
9200 /* Top-level page directory - dummy used by init-mm.
9201@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9202
9203 /* xwr */
9204 #define __P000 PAGE_NONE
9205-#define __P001 PAGE_READONLY
9206-#define __P010 PAGE_COPY
9207-#define __P011 PAGE_COPY
9208+#define __P001 PAGE_READONLY_NOEXEC
9209+#define __P010 PAGE_COPY_NOEXEC
9210+#define __P011 PAGE_COPY_NOEXEC
9211 #define __P100 PAGE_READONLY
9212 #define __P101 PAGE_READONLY
9213 #define __P110 PAGE_COPY
9214 #define __P111 PAGE_COPY
9215
9216 #define __S000 PAGE_NONE
9217-#define __S001 PAGE_READONLY
9218-#define __S010 PAGE_SHARED
9219-#define __S011 PAGE_SHARED
9220+#define __S001 PAGE_READONLY_NOEXEC
9221+#define __S010 PAGE_SHARED_NOEXEC
9222+#define __S011 PAGE_SHARED_NOEXEC
9223 #define __S100 PAGE_READONLY
9224 #define __S101 PAGE_READONLY
9225 #define __S110 PAGE_SHARED
9226diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9227index 79da178..c2eede8 100644
9228--- a/arch/sparc/include/asm/pgtsrmmu.h
9229+++ b/arch/sparc/include/asm/pgtsrmmu.h
9230@@ -115,6 +115,11 @@
9231 SRMMU_EXEC | SRMMU_REF)
9232 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9233 SRMMU_EXEC | SRMMU_REF)
9234+
9235+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9236+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9237+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9238+
9239 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9240 SRMMU_DIRTY | SRMMU_REF)
9241
9242diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9243index 9689176..63c18ea 100644
9244--- a/arch/sparc/include/asm/spinlock_64.h
9245+++ b/arch/sparc/include/asm/spinlock_64.h
9246@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9247
9248 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9249
9250-static void inline arch_read_lock(arch_rwlock_t *lock)
9251+static inline void arch_read_lock(arch_rwlock_t *lock)
9252 {
9253 unsigned long tmp1, tmp2;
9254
9255 __asm__ __volatile__ (
9256 "1: ldsw [%2], %0\n"
9257 " brlz,pn %0, 2f\n"
9258-"4: add %0, 1, %1\n"
9259+"4: addcc %0, 1, %1\n"
9260+
9261+#ifdef CONFIG_PAX_REFCOUNT
9262+" tvs %%icc, 6\n"
9263+#endif
9264+
9265 " cas [%2], %0, %1\n"
9266 " cmp %0, %1\n"
9267 " bne,pn %%icc, 1b\n"
9268@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9269 " .previous"
9270 : "=&r" (tmp1), "=&r" (tmp2)
9271 : "r" (lock)
9272- : "memory");
9273+ : "memory", "cc");
9274 }
9275
9276-static int inline arch_read_trylock(arch_rwlock_t *lock)
9277+static inline int arch_read_trylock(arch_rwlock_t *lock)
9278 {
9279 int tmp1, tmp2;
9280
9281@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9282 "1: ldsw [%2], %0\n"
9283 " brlz,a,pn %0, 2f\n"
9284 " mov 0, %0\n"
9285-" add %0, 1, %1\n"
9286+" addcc %0, 1, %1\n"
9287+
9288+#ifdef CONFIG_PAX_REFCOUNT
9289+" tvs %%icc, 6\n"
9290+#endif
9291+
9292 " cas [%2], %0, %1\n"
9293 " cmp %0, %1\n"
9294 " bne,pn %%icc, 1b\n"
9295@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9296 return tmp1;
9297 }
9298
9299-static void inline arch_read_unlock(arch_rwlock_t *lock)
9300+static inline void arch_read_unlock(arch_rwlock_t *lock)
9301 {
9302 unsigned long tmp1, tmp2;
9303
9304 __asm__ __volatile__(
9305 "1: lduw [%2], %0\n"
9306-" sub %0, 1, %1\n"
9307+" subcc %0, 1, %1\n"
9308+
9309+#ifdef CONFIG_PAX_REFCOUNT
9310+" tvs %%icc, 6\n"
9311+#endif
9312+
9313 " cas [%2], %0, %1\n"
9314 " cmp %0, %1\n"
9315 " bne,pn %%xcc, 1b\n"
9316@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9317 : "memory");
9318 }
9319
9320-static void inline arch_write_lock(arch_rwlock_t *lock)
9321+static inline void arch_write_lock(arch_rwlock_t *lock)
9322 {
9323 unsigned long mask, tmp1, tmp2;
9324
9325@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9326 : "memory");
9327 }
9328
9329-static void inline arch_write_unlock(arch_rwlock_t *lock)
9330+static inline void arch_write_unlock(arch_rwlock_t *lock)
9331 {
9332 __asm__ __volatile__(
9333 " stw %%g0, [%0]"
9334@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9335 : "memory");
9336 }
9337
9338-static int inline arch_write_trylock(arch_rwlock_t *lock)
9339+static inline int arch_write_trylock(arch_rwlock_t *lock)
9340 {
9341 unsigned long mask, tmp1, tmp2, result;
9342
9343diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9344index dd38075..e7cac83 100644
9345--- a/arch/sparc/include/asm/thread_info_32.h
9346+++ b/arch/sparc/include/asm/thread_info_32.h
9347@@ -49,6 +49,8 @@ struct thread_info {
9348 unsigned long w_saved;
9349
9350 struct restart_block restart_block;
9351+
9352+ unsigned long lowest_stack;
9353 };
9354
9355 /*
9356diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9357index d5e5042..9bfee76 100644
9358--- a/arch/sparc/include/asm/thread_info_64.h
9359+++ b/arch/sparc/include/asm/thread_info_64.h
9360@@ -63,6 +63,8 @@ struct thread_info {
9361 struct pt_regs *kern_una_regs;
9362 unsigned int kern_una_insn;
9363
9364+ unsigned long lowest_stack;
9365+
9366 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9367 };
9368
9369@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
9370 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9371 /* flag bit 6 is available */
9372 #define TIF_32BIT 7 /* 32-bit binary */
9373-/* flag bit 8 is available */
9374+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
9375 #define TIF_SECCOMP 9 /* secure computing */
9376 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9377 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9378+
9379 /* NOTE: Thread flags >= 12 should be ones we have no interest
9380 * in using in assembly, else we can't use the mask as
9381 * an immediate value in instructions such as andcc.
9382@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9383 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9384 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9385 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9386+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9387
9388 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9389 _TIF_DO_NOTIFY_RESUME_MASK | \
9390 _TIF_NEED_RESCHED)
9391 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9392
9393+#define _TIF_WORK_SYSCALL \
9394+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9395+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
9396+
9397+
9398 /*
9399 * Thread-synchronous status.
9400 *
9401diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9402index 0167d26..767bb0c 100644
9403--- a/arch/sparc/include/asm/uaccess.h
9404+++ b/arch/sparc/include/asm/uaccess.h
9405@@ -1,5 +1,6 @@
9406 #ifndef ___ASM_SPARC_UACCESS_H
9407 #define ___ASM_SPARC_UACCESS_H
9408+
9409 #if defined(__sparc__) && defined(__arch64__)
9410 #include <asm/uaccess_64.h>
9411 #else
9412diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9413index 53a28dd..50c38c3 100644
9414--- a/arch/sparc/include/asm/uaccess_32.h
9415+++ b/arch/sparc/include/asm/uaccess_32.h
9416@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9417
9418 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9419 {
9420- if (n && __access_ok((unsigned long) to, n))
9421+ if ((long)n < 0)
9422+ return n;
9423+
9424+ if (n && __access_ok((unsigned long) to, n)) {
9425+ if (!__builtin_constant_p(n))
9426+ check_object_size(from, n, true);
9427 return __copy_user(to, (__force void __user *) from, n);
9428- else
9429+ } else
9430 return n;
9431 }
9432
9433 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9434 {
9435+ if ((long)n < 0)
9436+ return n;
9437+
9438+ if (!__builtin_constant_p(n))
9439+ check_object_size(from, n, true);
9440+
9441 return __copy_user(to, (__force void __user *) from, n);
9442 }
9443
9444 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9445 {
9446- if (n && __access_ok((unsigned long) from, n))
9447+ if ((long)n < 0)
9448+ return n;
9449+
9450+ if (n && __access_ok((unsigned long) from, n)) {
9451+ if (!__builtin_constant_p(n))
9452+ check_object_size(to, n, false);
9453 return __copy_user((__force void __user *) to, from, n);
9454- else
9455+ } else
9456 return n;
9457 }
9458
9459 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9460 {
9461+ if ((long)n < 0)
9462+ return n;
9463+
9464 return __copy_user((__force void __user *) to, from, n);
9465 }
9466
9467diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9468index e562d3c..191f176 100644
9469--- a/arch/sparc/include/asm/uaccess_64.h
9470+++ b/arch/sparc/include/asm/uaccess_64.h
9471@@ -10,6 +10,7 @@
9472 #include <linux/compiler.h>
9473 #include <linux/string.h>
9474 #include <linux/thread_info.h>
9475+#include <linux/kernel.h>
9476 #include <asm/asi.h>
9477 #include <asm/spitfire.h>
9478 #include <asm-generic/uaccess-unaligned.h>
9479@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9480 static inline unsigned long __must_check
9481 copy_from_user(void *to, const void __user *from, unsigned long size)
9482 {
9483- unsigned long ret = ___copy_from_user(to, from, size);
9484+ unsigned long ret;
9485
9486+ if ((long)size < 0 || size > INT_MAX)
9487+ return size;
9488+
9489+ if (!__builtin_constant_p(size))
9490+ check_object_size(to, size, false);
9491+
9492+ ret = ___copy_from_user(to, from, size);
9493 if (unlikely(ret))
9494 ret = copy_from_user_fixup(to, from, size);
9495
9496@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9497 static inline unsigned long __must_check
9498 copy_to_user(void __user *to, const void *from, unsigned long size)
9499 {
9500- unsigned long ret = ___copy_to_user(to, from, size);
9501+ unsigned long ret;
9502
9503+ if ((long)size < 0 || size > INT_MAX)
9504+ return size;
9505+
9506+ if (!__builtin_constant_p(size))
9507+ check_object_size(from, size, true);
9508+
9509+ ret = ___copy_to_user(to, from, size);
9510 if (unlikely(ret))
9511 ret = copy_to_user_fixup(to, from, size);
9512 return ret;
9513diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9514index d15cc17..d0ae796 100644
9515--- a/arch/sparc/kernel/Makefile
9516+++ b/arch/sparc/kernel/Makefile
9517@@ -4,7 +4,7 @@
9518 #
9519
9520 asflags-y := -ansi
9521-ccflags-y := -Werror
9522+#ccflags-y := -Werror
9523
9524 extra-y := head_$(BITS).o
9525
9526diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9527index fdd819d..5af08c8 100644
9528--- a/arch/sparc/kernel/process_32.c
9529+++ b/arch/sparc/kernel/process_32.c
9530@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9531
9532 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9533 r->psr, r->pc, r->npc, r->y, print_tainted());
9534- printk("PC: <%pS>\n", (void *) r->pc);
9535+ printk("PC: <%pA>\n", (void *) r->pc);
9536 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9537 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9538 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9539 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9540 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9541 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9542- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9543+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9544
9545 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9546 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9547@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9548 rw = (struct reg_window32 *) fp;
9549 pc = rw->ins[7];
9550 printk("[%08lx : ", pc);
9551- printk("%pS ] ", (void *) pc);
9552+ printk("%pA ] ", (void *) pc);
9553 fp = rw->ins[6];
9554 } while (++count < 16);
9555 printk("\n");
9556diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9557index baebab2..9cd13b1 100644
9558--- a/arch/sparc/kernel/process_64.c
9559+++ b/arch/sparc/kernel/process_64.c
9560@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
9561 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9562 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9563 if (regs->tstate & TSTATE_PRIV)
9564- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9565+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9566 }
9567
9568 void show_regs(struct pt_regs *regs)
9569@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
9570
9571 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9572 regs->tpc, regs->tnpc, regs->y, print_tainted());
9573- printk("TPC: <%pS>\n", (void *) regs->tpc);
9574+ printk("TPC: <%pA>\n", (void *) regs->tpc);
9575 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9576 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9577 regs->u_regs[3]);
9578@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
9579 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9580 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9581 regs->u_regs[15]);
9582- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9583+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9584 show_regwindow(regs);
9585 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9586 }
9587@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
9588 ((tp && tp->task) ? tp->task->pid : -1));
9589
9590 if (gp->tstate & TSTATE_PRIV) {
9591- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9592+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9593 (void *) gp->tpc,
9594 (void *) gp->o7,
9595 (void *) gp->i7,
9596diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9597index 79cc0d1..ec62734 100644
9598--- a/arch/sparc/kernel/prom_common.c
9599+++ b/arch/sparc/kernel/prom_common.c
9600@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9601
9602 unsigned int prom_early_allocated __initdata;
9603
9604-static struct of_pdt_ops prom_sparc_ops __initdata = {
9605+static struct of_pdt_ops prom_sparc_ops __initconst = {
9606 .nextprop = prom_common_nextprop,
9607 .getproplen = prom_getproplen,
9608 .getproperty = prom_getproperty,
9609diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9610index 773c1f2..a8bdd87 100644
9611--- a/arch/sparc/kernel/ptrace_64.c
9612+++ b/arch/sparc/kernel/ptrace_64.c
9613@@ -1059,6 +1059,10 @@ long arch_ptrace(struct task_struct *child, long request,
9614 return ret;
9615 }
9616
9617+#ifdef CONFIG_GRKERNSEC_SETXID
9618+extern void gr_delayed_cred_worker(void);
9619+#endif
9620+
9621 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9622 {
9623 int ret = 0;
9624@@ -1066,6 +1070,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9625 /* do the secure computing check first */
9626 secure_computing_strict(regs->u_regs[UREG_G1]);
9627
9628+#ifdef CONFIG_GRKERNSEC_SETXID
9629+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9630+ gr_delayed_cred_worker();
9631+#endif
9632+
9633 if (test_thread_flag(TIF_SYSCALL_TRACE))
9634 ret = tracehook_report_syscall_entry(regs);
9635
9636@@ -1086,6 +1095,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9637
9638 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9639 {
9640+#ifdef CONFIG_GRKERNSEC_SETXID
9641+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9642+ gr_delayed_cred_worker();
9643+#endif
9644+
9645 audit_syscall_exit(regs);
9646
9647 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9648diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
9649index e142545..fd29654 100644
9650--- a/arch/sparc/kernel/smp_64.c
9651+++ b/arch/sparc/kernel/smp_64.c
9652@@ -869,8 +869,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
9653 extern unsigned long xcall_flush_dcache_page_spitfire;
9654
9655 #ifdef CONFIG_DEBUG_DCFLUSH
9656-extern atomic_t dcpage_flushes;
9657-extern atomic_t dcpage_flushes_xcall;
9658+extern atomic_unchecked_t dcpage_flushes;
9659+extern atomic_unchecked_t dcpage_flushes_xcall;
9660 #endif
9661
9662 static inline void __local_flush_dcache_page(struct page *page)
9663@@ -894,7 +894,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9664 return;
9665
9666 #ifdef CONFIG_DEBUG_DCFLUSH
9667- atomic_inc(&dcpage_flushes);
9668+ atomic_inc_unchecked(&dcpage_flushes);
9669 #endif
9670
9671 this_cpu = get_cpu();
9672@@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9673 xcall_deliver(data0, __pa(pg_addr),
9674 (u64) pg_addr, cpumask_of(cpu));
9675 #ifdef CONFIG_DEBUG_DCFLUSH
9676- atomic_inc(&dcpage_flushes_xcall);
9677+ atomic_inc_unchecked(&dcpage_flushes_xcall);
9678 #endif
9679 }
9680 }
9681@@ -937,7 +937,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9682 preempt_disable();
9683
9684 #ifdef CONFIG_DEBUG_DCFLUSH
9685- atomic_inc(&dcpage_flushes);
9686+ atomic_inc_unchecked(&dcpage_flushes);
9687 #endif
9688 data0 = 0;
9689 pg_addr = page_address(page);
9690@@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9691 xcall_deliver(data0, __pa(pg_addr),
9692 (u64) pg_addr, cpu_online_mask);
9693 #ifdef CONFIG_DEBUG_DCFLUSH
9694- atomic_inc(&dcpage_flushes_xcall);
9695+ atomic_inc_unchecked(&dcpage_flushes_xcall);
9696 #endif
9697 }
9698 __local_flush_dcache_page(page);
9699diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9700index 3a8d184..49498a8 100644
9701--- a/arch/sparc/kernel/sys_sparc_32.c
9702+++ b/arch/sparc/kernel/sys_sparc_32.c
9703@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9704 if (len > TASK_SIZE - PAGE_SIZE)
9705 return -ENOMEM;
9706 if (!addr)
9707- addr = TASK_UNMAPPED_BASE;
9708+ addr = current->mm->mmap_base;
9709
9710 info.flags = 0;
9711 info.length = len;
9712diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9713index 51561b8..8256764 100644
9714--- a/arch/sparc/kernel/sys_sparc_64.c
9715+++ b/arch/sparc/kernel/sys_sparc_64.c
9716@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9717 struct vm_area_struct * vma;
9718 unsigned long task_size = TASK_SIZE;
9719 int do_color_align;
9720+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9721 struct vm_unmapped_area_info info;
9722
9723 if (flags & MAP_FIXED) {
9724 /* We do not accept a shared mapping if it would violate
9725 * cache aliasing constraints.
9726 */
9727- if ((flags & MAP_SHARED) &&
9728+ if ((filp || (flags & MAP_SHARED)) &&
9729 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9730 return -EINVAL;
9731 return addr;
9732@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9733 if (filp || (flags & MAP_SHARED))
9734 do_color_align = 1;
9735
9736+#ifdef CONFIG_PAX_RANDMMAP
9737+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9738+#endif
9739+
9740 if (addr) {
9741 if (do_color_align)
9742 addr = COLOR_ALIGN(addr, pgoff);
9743@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9744 addr = PAGE_ALIGN(addr);
9745
9746 vma = find_vma(mm, addr);
9747- if (task_size - len >= addr &&
9748- (!vma || addr + len <= vma->vm_start))
9749+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9750 return addr;
9751 }
9752
9753 info.flags = 0;
9754 info.length = len;
9755- info.low_limit = TASK_UNMAPPED_BASE;
9756+ info.low_limit = mm->mmap_base;
9757 info.high_limit = min(task_size, VA_EXCLUDE_START);
9758 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9759 info.align_offset = pgoff << PAGE_SHIFT;
9760+ info.threadstack_offset = offset;
9761 addr = vm_unmapped_area(&info);
9762
9763 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9764 VM_BUG_ON(addr != -ENOMEM);
9765 info.low_limit = VA_EXCLUDE_END;
9766+
9767+#ifdef CONFIG_PAX_RANDMMAP
9768+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9769+ info.low_limit += mm->delta_mmap;
9770+#endif
9771+
9772 info.high_limit = task_size;
9773 addr = vm_unmapped_area(&info);
9774 }
9775@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9776 unsigned long task_size = STACK_TOP32;
9777 unsigned long addr = addr0;
9778 int do_color_align;
9779+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9780 struct vm_unmapped_area_info info;
9781
9782 /* This should only ever run for 32-bit processes. */
9783@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9784 /* We do not accept a shared mapping if it would violate
9785 * cache aliasing constraints.
9786 */
9787- if ((flags & MAP_SHARED) &&
9788+ if ((filp || (flags & MAP_SHARED)) &&
9789 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9790 return -EINVAL;
9791 return addr;
9792@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9793 if (filp || (flags & MAP_SHARED))
9794 do_color_align = 1;
9795
9796+#ifdef CONFIG_PAX_RANDMMAP
9797+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9798+#endif
9799+
9800 /* requesting a specific address */
9801 if (addr) {
9802 if (do_color_align)
9803@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9804 addr = PAGE_ALIGN(addr);
9805
9806 vma = find_vma(mm, addr);
9807- if (task_size - len >= addr &&
9808- (!vma || addr + len <= vma->vm_start))
9809+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9810 return addr;
9811 }
9812
9813@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 info.high_limit = mm->mmap_base;
9815 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9816 info.align_offset = pgoff << PAGE_SHIFT;
9817+ info.threadstack_offset = offset;
9818 addr = vm_unmapped_area(&info);
9819
9820 /*
9821@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9822 VM_BUG_ON(addr != -ENOMEM);
9823 info.flags = 0;
9824 info.low_limit = TASK_UNMAPPED_BASE;
9825+
9826+#ifdef CONFIG_PAX_RANDMMAP
9827+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9828+ info.low_limit += mm->delta_mmap;
9829+#endif
9830+
9831 info.high_limit = STACK_TOP32;
9832 addr = vm_unmapped_area(&info);
9833 }
9834@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9835 EXPORT_SYMBOL(get_fb_unmapped_area);
9836
9837 /* Essentially the same as PowerPC. */
9838-static unsigned long mmap_rnd(void)
9839+static unsigned long mmap_rnd(struct mm_struct *mm)
9840 {
9841 unsigned long rnd = 0UL;
9842
9843+#ifdef CONFIG_PAX_RANDMMAP
9844+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9845+#endif
9846+
9847 if (current->flags & PF_RANDOMIZE) {
9848 unsigned long val = get_random_int();
9849 if (test_thread_flag(TIF_32BIT))
9850@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
9851
9852 void arch_pick_mmap_layout(struct mm_struct *mm)
9853 {
9854- unsigned long random_factor = mmap_rnd();
9855+ unsigned long random_factor = mmap_rnd(mm);
9856 unsigned long gap;
9857
9858 /*
9859@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9860 gap == RLIM_INFINITY ||
9861 sysctl_legacy_va_layout) {
9862 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9863+
9864+#ifdef CONFIG_PAX_RANDMMAP
9865+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9866+ mm->mmap_base += mm->delta_mmap;
9867+#endif
9868+
9869 mm->get_unmapped_area = arch_get_unmapped_area;
9870 } else {
9871 /* We know it's 32-bit */
9872@@ -300,6 +332,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9873 gap = (task_size / 6 * 5);
9874
9875 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9876+
9877+#ifdef CONFIG_PAX_RANDMMAP
9878+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9879+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9880+#endif
9881+
9882 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9883 }
9884 }
9885diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9886index d950197..192f9d8 100644
9887--- a/arch/sparc/kernel/syscalls.S
9888+++ b/arch/sparc/kernel/syscalls.S
9889@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9890 #endif
9891 .align 32
9892 1: ldx [%g6 + TI_FLAGS], %l5
9893- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9894+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9895 be,pt %icc, rtrap
9896 nop
9897 call syscall_trace_leave
9898@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9899
9900 srl %i3, 0, %o3 ! IEU0
9901 srl %i2, 0, %o2 ! IEU0 Group
9902- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9903+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9904 bne,pn %icc, linux_syscall_trace32 ! CTI
9905 mov %i0, %l5 ! IEU1
9906 5: call %l7 ! CTI Group brk forced
9907@@ -207,7 +207,7 @@ linux_sparc_syscall:
9908
9909 mov %i3, %o3 ! IEU1
9910 mov %i4, %o4 ! IEU0 Group
9911- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9912+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9913 bne,pn %icc, linux_syscall_trace ! CTI Group
9914 mov %i0, %l5 ! IEU0
9915 2: call %l7 ! CTI Group brk forced
9916@@ -223,7 +223,7 @@ ret_sys_call:
9917
9918 cmp %o0, -ERESTART_RESTARTBLOCK
9919 bgeu,pn %xcc, 1f
9920- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9921+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9922 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9923
9924 2:
9925diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9926index 6629829..036032d 100644
9927--- a/arch/sparc/kernel/traps_32.c
9928+++ b/arch/sparc/kernel/traps_32.c
9929@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9930 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9931 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9932
9933+extern void gr_handle_kernel_exploit(void);
9934+
9935 void die_if_kernel(char *str, struct pt_regs *regs)
9936 {
9937 static int die_counter;
9938@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9939 count++ < 30 &&
9940 (((unsigned long) rw) >= PAGE_OFFSET) &&
9941 !(((unsigned long) rw) & 0x7)) {
9942- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9943+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9944 (void *) rw->ins[7]);
9945 rw = (struct reg_window32 *)rw->ins[6];
9946 }
9947 }
9948 printk("Instruction DUMP:");
9949 instruction_dump ((unsigned long *) regs->pc);
9950- if(regs->psr & PSR_PS)
9951+ if(regs->psr & PSR_PS) {
9952+ gr_handle_kernel_exploit();
9953 do_exit(SIGKILL);
9954+ }
9955 do_exit(SIGSEGV);
9956 }
9957
9958diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9959index b3f833a..f485f80 100644
9960--- a/arch/sparc/kernel/traps_64.c
9961+++ b/arch/sparc/kernel/traps_64.c
9962@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9963 i + 1,
9964 p->trapstack[i].tstate, p->trapstack[i].tpc,
9965 p->trapstack[i].tnpc, p->trapstack[i].tt);
9966- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9967+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9968 }
9969 }
9970
9971@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9972
9973 lvl -= 0x100;
9974 if (regs->tstate & TSTATE_PRIV) {
9975+
9976+#ifdef CONFIG_PAX_REFCOUNT
9977+ if (lvl == 6)
9978+ pax_report_refcount_overflow(regs);
9979+#endif
9980+
9981 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9982 die_if_kernel(buffer, regs);
9983 }
9984@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9985 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9986 {
9987 char buffer[32];
9988-
9989+
9990 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9991 0, lvl, SIGTRAP) == NOTIFY_STOP)
9992 return;
9993
9994+#ifdef CONFIG_PAX_REFCOUNT
9995+ if (lvl == 6)
9996+ pax_report_refcount_overflow(regs);
9997+#endif
9998+
9999 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10000
10001 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10002@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10003 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10004 printk("%s" "ERROR(%d): ",
10005 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10006- printk("TPC<%pS>\n", (void *) regs->tpc);
10007+ printk("TPC<%pA>\n", (void *) regs->tpc);
10008 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10009 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10010 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10011@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10012 smp_processor_id(),
10013 (type & 0x1) ? 'I' : 'D',
10014 regs->tpc);
10015- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10016+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10017 panic("Irrecoverable Cheetah+ parity error.");
10018 }
10019
10020@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10021 smp_processor_id(),
10022 (type & 0x1) ? 'I' : 'D',
10023 regs->tpc);
10024- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10025+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10026 }
10027
10028 struct sun4v_error_entry {
10029@@ -1830,8 +1841,8 @@ struct sun4v_error_entry {
10030 /*0x38*/u64 reserved_5;
10031 };
10032
10033-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10034-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10035+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10036+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10037
10038 static const char *sun4v_err_type_to_str(u8 type)
10039 {
10040@@ -1923,7 +1934,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10041 }
10042
10043 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10044- int cpu, const char *pfx, atomic_t *ocnt)
10045+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10046 {
10047 u64 *raw_ptr = (u64 *) ent;
10048 u32 attrs;
10049@@ -1981,8 +1992,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10050
10051 show_regs(regs);
10052
10053- if ((cnt = atomic_read(ocnt)) != 0) {
10054- atomic_set(ocnt, 0);
10055+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10056+ atomic_set_unchecked(ocnt, 0);
10057 wmb();
10058 printk("%s: Queue overflowed %d times.\n",
10059 pfx, cnt);
10060@@ -2036,7 +2047,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
10061 */
10062 void sun4v_resum_overflow(struct pt_regs *regs)
10063 {
10064- atomic_inc(&sun4v_resum_oflow_cnt);
10065+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10066 }
10067
10068 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10069@@ -2089,7 +2100,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10070 /* XXX Actually even this can make not that much sense. Perhaps
10071 * XXX we should just pull the plug and panic directly from here?
10072 */
10073- atomic_inc(&sun4v_nonresum_oflow_cnt);
10074+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10075 }
10076
10077 unsigned long sun4v_err_itlb_vaddr;
10078@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10079
10080 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10081 regs->tpc, tl);
10082- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10083+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10084 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10085- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10086+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10087 (void *) regs->u_regs[UREG_I7]);
10088 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10089 "pte[%lx] error[%lx]\n",
10090@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10091
10092 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10093 regs->tpc, tl);
10094- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10095+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10096 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10097- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10098+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10099 (void *) regs->u_regs[UREG_I7]);
10100 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10101 "pte[%lx] error[%lx]\n",
10102@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10103 fp = (unsigned long)sf->fp + STACK_BIAS;
10104 }
10105
10106- printk(" [%016lx] %pS\n", pc, (void *) pc);
10107+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10108 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10109 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10110 int index = tsk->curr_ret_stack;
10111 if (tsk->ret_stack && index >= graph) {
10112 pc = tsk->ret_stack[index - graph].ret;
10113- printk(" [%016lx] %pS\n", pc, (void *) pc);
10114+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10115 graph++;
10116 }
10117 }
10118@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10119 return (struct reg_window *) (fp + STACK_BIAS);
10120 }
10121
10122+extern void gr_handle_kernel_exploit(void);
10123+
10124 void die_if_kernel(char *str, struct pt_regs *regs)
10125 {
10126 static int die_counter;
10127@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10128 while (rw &&
10129 count++ < 30 &&
10130 kstack_valid(tp, (unsigned long) rw)) {
10131- printk("Caller[%016lx]: %pS\n", rw->ins[7],
10132+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
10133 (void *) rw->ins[7]);
10134
10135 rw = kernel_stack_up(rw);
10136@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10137 }
10138 user_instruction_dump ((unsigned int __user *) regs->tpc);
10139 }
10140- if (regs->tstate & TSTATE_PRIV)
10141+ if (regs->tstate & TSTATE_PRIV) {
10142+ gr_handle_kernel_exploit();
10143 do_exit(SIGKILL);
10144+ }
10145 do_exit(SIGSEGV);
10146 }
10147 EXPORT_SYMBOL(die_if_kernel);
10148diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10149index 8201c25e..072a2a7 100644
10150--- a/arch/sparc/kernel/unaligned_64.c
10151+++ b/arch/sparc/kernel/unaligned_64.c
10152@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
10153 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10154
10155 if (__ratelimit(&ratelimit)) {
10156- printk("Kernel unaligned access at TPC[%lx] %pS\n",
10157+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
10158 regs->tpc, (void *) regs->tpc);
10159 }
10160 }
10161diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10162index dbe119b..089c7c1 100644
10163--- a/arch/sparc/lib/Makefile
10164+++ b/arch/sparc/lib/Makefile
10165@@ -2,7 +2,7 @@
10166 #
10167
10168 asflags-y := -ansi -DST_DIV0=0x02
10169-ccflags-y := -Werror
10170+#ccflags-y := -Werror
10171
10172 lib-$(CONFIG_SPARC32) += ashrdi3.o
10173 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10174diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10175index 85c233d..68500e0 100644
10176--- a/arch/sparc/lib/atomic_64.S
10177+++ b/arch/sparc/lib/atomic_64.S
10178@@ -17,7 +17,12 @@
10179 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10180 BACKOFF_SETUP(%o2)
10181 1: lduw [%o1], %g1
10182- add %g1, %o0, %g7
10183+ addcc %g1, %o0, %g7
10184+
10185+#ifdef CONFIG_PAX_REFCOUNT
10186+ tvs %icc, 6
10187+#endif
10188+
10189 cas [%o1], %g1, %g7
10190 cmp %g1, %g7
10191 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10192@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10193 2: BACKOFF_SPIN(%o2, %o3, 1b)
10194 ENDPROC(atomic_add)
10195
10196+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10197+ BACKOFF_SETUP(%o2)
10198+1: lduw [%o1], %g1
10199+ add %g1, %o0, %g7
10200+ cas [%o1], %g1, %g7
10201+ cmp %g1, %g7
10202+ bne,pn %icc, 2f
10203+ nop
10204+ retl
10205+ nop
10206+2: BACKOFF_SPIN(%o2, %o3, 1b)
10207+ENDPROC(atomic_add_unchecked)
10208+
10209 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10210 BACKOFF_SETUP(%o2)
10211 1: lduw [%o1], %g1
10212- sub %g1, %o0, %g7
10213+ subcc %g1, %o0, %g7
10214+
10215+#ifdef CONFIG_PAX_REFCOUNT
10216+ tvs %icc, 6
10217+#endif
10218+
10219 cas [%o1], %g1, %g7
10220 cmp %g1, %g7
10221 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10222@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10223 2: BACKOFF_SPIN(%o2, %o3, 1b)
10224 ENDPROC(atomic_sub)
10225
10226+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10227+ BACKOFF_SETUP(%o2)
10228+1: lduw [%o1], %g1
10229+ sub %g1, %o0, %g7
10230+ cas [%o1], %g1, %g7
10231+ cmp %g1, %g7
10232+ bne,pn %icc, 2f
10233+ nop
10234+ retl
10235+ nop
10236+2: BACKOFF_SPIN(%o2, %o3, 1b)
10237+ENDPROC(atomic_sub_unchecked)
10238+
10239 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10240 BACKOFF_SETUP(%o2)
10241 1: lduw [%o1], %g1
10242- add %g1, %o0, %g7
10243+ addcc %g1, %o0, %g7
10244+
10245+#ifdef CONFIG_PAX_REFCOUNT
10246+ tvs %icc, 6
10247+#endif
10248+
10249 cas [%o1], %g1, %g7
10250 cmp %g1, %g7
10251 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10252@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10253 2: BACKOFF_SPIN(%o2, %o3, 1b)
10254 ENDPROC(atomic_add_ret)
10255
10256+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10257+ BACKOFF_SETUP(%o2)
10258+1: lduw [%o1], %g1
10259+ addcc %g1, %o0, %g7
10260+ cas [%o1], %g1, %g7
10261+ cmp %g1, %g7
10262+ bne,pn %icc, 2f
10263+ add %g7, %o0, %g7
10264+ sra %g7, 0, %o0
10265+ retl
10266+ nop
10267+2: BACKOFF_SPIN(%o2, %o3, 1b)
10268+ENDPROC(atomic_add_ret_unchecked)
10269+
10270 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10271 BACKOFF_SETUP(%o2)
10272 1: lduw [%o1], %g1
10273- sub %g1, %o0, %g7
10274+ subcc %g1, %o0, %g7
10275+
10276+#ifdef CONFIG_PAX_REFCOUNT
10277+ tvs %icc, 6
10278+#endif
10279+
10280 cas [%o1], %g1, %g7
10281 cmp %g1, %g7
10282 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10283@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10284 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10285 BACKOFF_SETUP(%o2)
10286 1: ldx [%o1], %g1
10287- add %g1, %o0, %g7
10288+ addcc %g1, %o0, %g7
10289+
10290+#ifdef CONFIG_PAX_REFCOUNT
10291+ tvs %xcc, 6
10292+#endif
10293+
10294 casx [%o1], %g1, %g7
10295 cmp %g1, %g7
10296 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10297@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10298 2: BACKOFF_SPIN(%o2, %o3, 1b)
10299 ENDPROC(atomic64_add)
10300
10301+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10302+ BACKOFF_SETUP(%o2)
10303+1: ldx [%o1], %g1
10304+ addcc %g1, %o0, %g7
10305+ casx [%o1], %g1, %g7
10306+ cmp %g1, %g7
10307+ bne,pn %xcc, 2f
10308+ nop
10309+ retl
10310+ nop
10311+2: BACKOFF_SPIN(%o2, %o3, 1b)
10312+ENDPROC(atomic64_add_unchecked)
10313+
10314 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10315 BACKOFF_SETUP(%o2)
10316 1: ldx [%o1], %g1
10317- sub %g1, %o0, %g7
10318+ subcc %g1, %o0, %g7
10319+
10320+#ifdef CONFIG_PAX_REFCOUNT
10321+ tvs %xcc, 6
10322+#endif
10323+
10324 casx [%o1], %g1, %g7
10325 cmp %g1, %g7
10326 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10327@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10328 2: BACKOFF_SPIN(%o2, %o3, 1b)
10329 ENDPROC(atomic64_sub)
10330
10331+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10332+ BACKOFF_SETUP(%o2)
10333+1: ldx [%o1], %g1
10334+ subcc %g1, %o0, %g7
10335+ casx [%o1], %g1, %g7
10336+ cmp %g1, %g7
10337+ bne,pn %xcc, 2f
10338+ nop
10339+ retl
10340+ nop
10341+2: BACKOFF_SPIN(%o2, %o3, 1b)
10342+ENDPROC(atomic64_sub_unchecked)
10343+
10344 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10345 BACKOFF_SETUP(%o2)
10346 1: ldx [%o1], %g1
10347- add %g1, %o0, %g7
10348+ addcc %g1, %o0, %g7
10349+
10350+#ifdef CONFIG_PAX_REFCOUNT
10351+ tvs %xcc, 6
10352+#endif
10353+
10354 casx [%o1], %g1, %g7
10355 cmp %g1, %g7
10356 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10357@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10358 2: BACKOFF_SPIN(%o2, %o3, 1b)
10359 ENDPROC(atomic64_add_ret)
10360
10361+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10362+ BACKOFF_SETUP(%o2)
10363+1: ldx [%o1], %g1
10364+ addcc %g1, %o0, %g7
10365+ casx [%o1], %g1, %g7
10366+ cmp %g1, %g7
10367+ bne,pn %xcc, 2f
10368+ add %g7, %o0, %g7
10369+ mov %g7, %o0
10370+ retl
10371+ nop
10372+2: BACKOFF_SPIN(%o2, %o3, 1b)
10373+ENDPROC(atomic64_add_ret_unchecked)
10374+
10375 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10376 BACKOFF_SETUP(%o2)
10377 1: ldx [%o1], %g1
10378- sub %g1, %o0, %g7
10379+ subcc %g1, %o0, %g7
10380+
10381+#ifdef CONFIG_PAX_REFCOUNT
10382+ tvs %xcc, 6
10383+#endif
10384+
10385 casx [%o1], %g1, %g7
10386 cmp %g1, %g7
10387 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10388diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10389index 323335b..ed85ea2 100644
10390--- a/arch/sparc/lib/ksyms.c
10391+++ b/arch/sparc/lib/ksyms.c
10392@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
10393
10394 /* Atomic counter implementation. */
10395 EXPORT_SYMBOL(atomic_add);
10396+EXPORT_SYMBOL(atomic_add_unchecked);
10397 EXPORT_SYMBOL(atomic_add_ret);
10398+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10399 EXPORT_SYMBOL(atomic_sub);
10400+EXPORT_SYMBOL(atomic_sub_unchecked);
10401 EXPORT_SYMBOL(atomic_sub_ret);
10402 EXPORT_SYMBOL(atomic64_add);
10403+EXPORT_SYMBOL(atomic64_add_unchecked);
10404 EXPORT_SYMBOL(atomic64_add_ret);
10405+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10406 EXPORT_SYMBOL(atomic64_sub);
10407+EXPORT_SYMBOL(atomic64_sub_unchecked);
10408 EXPORT_SYMBOL(atomic64_sub_ret);
10409 EXPORT_SYMBOL(atomic64_dec_if_positive);
10410
10411diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10412index 30c3ecc..736f015 100644
10413--- a/arch/sparc/mm/Makefile
10414+++ b/arch/sparc/mm/Makefile
10415@@ -2,7 +2,7 @@
10416 #
10417
10418 asflags-y := -ansi
10419-ccflags-y := -Werror
10420+#ccflags-y := -Werror
10421
10422 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10423 obj-y += fault_$(BITS).o
10424diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10425index 59dbd46..1dd7f5e 100644
10426--- a/arch/sparc/mm/fault_32.c
10427+++ b/arch/sparc/mm/fault_32.c
10428@@ -21,6 +21,9 @@
10429 #include <linux/perf_event.h>
10430 #include <linux/interrupt.h>
10431 #include <linux/kdebug.h>
10432+#include <linux/slab.h>
10433+#include <linux/pagemap.h>
10434+#include <linux/compiler.h>
10435
10436 #include <asm/page.h>
10437 #include <asm/pgtable.h>
10438@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10439 return safe_compute_effective_address(regs, insn);
10440 }
10441
10442+#ifdef CONFIG_PAX_PAGEEXEC
10443+#ifdef CONFIG_PAX_DLRESOLVE
10444+static void pax_emuplt_close(struct vm_area_struct *vma)
10445+{
10446+ vma->vm_mm->call_dl_resolve = 0UL;
10447+}
10448+
10449+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10450+{
10451+ unsigned int *kaddr;
10452+
10453+ vmf->page = alloc_page(GFP_HIGHUSER);
10454+ if (!vmf->page)
10455+ return VM_FAULT_OOM;
10456+
10457+ kaddr = kmap(vmf->page);
10458+ memset(kaddr, 0, PAGE_SIZE);
10459+ kaddr[0] = 0x9DE3BFA8U; /* save */
10460+ flush_dcache_page(vmf->page);
10461+ kunmap(vmf->page);
10462+ return VM_FAULT_MAJOR;
10463+}
10464+
10465+static const struct vm_operations_struct pax_vm_ops = {
10466+ .close = pax_emuplt_close,
10467+ .fault = pax_emuplt_fault
10468+};
10469+
10470+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10471+{
10472+ int ret;
10473+
10474+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10475+ vma->vm_mm = current->mm;
10476+ vma->vm_start = addr;
10477+ vma->vm_end = addr + PAGE_SIZE;
10478+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10479+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10480+ vma->vm_ops = &pax_vm_ops;
10481+
10482+ ret = insert_vm_struct(current->mm, vma);
10483+ if (ret)
10484+ return ret;
10485+
10486+ ++current->mm->total_vm;
10487+ return 0;
10488+}
10489+#endif
10490+
10491+/*
10492+ * PaX: decide what to do with offenders (regs->pc = fault address)
10493+ *
10494+ * returns 1 when task should be killed
10495+ * 2 when patched PLT trampoline was detected
10496+ * 3 when unpatched PLT trampoline was detected
10497+ */
10498+static int pax_handle_fetch_fault(struct pt_regs *regs)
10499+{
10500+
10501+#ifdef CONFIG_PAX_EMUPLT
10502+ int err;
10503+
10504+ do { /* PaX: patched PLT emulation #1 */
10505+ unsigned int sethi1, sethi2, jmpl;
10506+
10507+ err = get_user(sethi1, (unsigned int *)regs->pc);
10508+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10509+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10510+
10511+ if (err)
10512+ break;
10513+
10514+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10515+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10516+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10517+ {
10518+ unsigned int addr;
10519+
10520+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10521+ addr = regs->u_regs[UREG_G1];
10522+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10523+ regs->pc = addr;
10524+ regs->npc = addr+4;
10525+ return 2;
10526+ }
10527+ } while (0);
10528+
10529+ do { /* PaX: patched PLT emulation #2 */
10530+ unsigned int ba;
10531+
10532+ err = get_user(ba, (unsigned int *)regs->pc);
10533+
10534+ if (err)
10535+ break;
10536+
10537+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10538+ unsigned int addr;
10539+
10540+ if ((ba & 0xFFC00000U) == 0x30800000U)
10541+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10542+ else
10543+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10544+ regs->pc = addr;
10545+ regs->npc = addr+4;
10546+ return 2;
10547+ }
10548+ } while (0);
10549+
10550+ do { /* PaX: patched PLT emulation #3 */
10551+ unsigned int sethi, bajmpl, nop;
10552+
10553+ err = get_user(sethi, (unsigned int *)regs->pc);
10554+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10555+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10556+
10557+ if (err)
10558+ break;
10559+
10560+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10561+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10562+ nop == 0x01000000U)
10563+ {
10564+ unsigned int addr;
10565+
10566+ addr = (sethi & 0x003FFFFFU) << 10;
10567+ regs->u_regs[UREG_G1] = addr;
10568+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10569+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10570+ else
10571+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10572+ regs->pc = addr;
10573+ regs->npc = addr+4;
10574+ return 2;
10575+ }
10576+ } while (0);
10577+
10578+ do { /* PaX: unpatched PLT emulation step 1 */
10579+ unsigned int sethi, ba, nop;
10580+
10581+ err = get_user(sethi, (unsigned int *)regs->pc);
10582+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
10583+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10584+
10585+ if (err)
10586+ break;
10587+
10588+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10589+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10590+ nop == 0x01000000U)
10591+ {
10592+ unsigned int addr, save, call;
10593+
10594+ if ((ba & 0xFFC00000U) == 0x30800000U)
10595+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10596+ else
10597+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10598+
10599+ err = get_user(save, (unsigned int *)addr);
10600+ err |= get_user(call, (unsigned int *)(addr+4));
10601+ err |= get_user(nop, (unsigned int *)(addr+8));
10602+ if (err)
10603+ break;
10604+
10605+#ifdef CONFIG_PAX_DLRESOLVE
10606+ if (save == 0x9DE3BFA8U &&
10607+ (call & 0xC0000000U) == 0x40000000U &&
10608+ nop == 0x01000000U)
10609+ {
10610+ struct vm_area_struct *vma;
10611+ unsigned long call_dl_resolve;
10612+
10613+ down_read(&current->mm->mmap_sem);
10614+ call_dl_resolve = current->mm->call_dl_resolve;
10615+ up_read(&current->mm->mmap_sem);
10616+ if (likely(call_dl_resolve))
10617+ goto emulate;
10618+
10619+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10620+
10621+ down_write(&current->mm->mmap_sem);
10622+ if (current->mm->call_dl_resolve) {
10623+ call_dl_resolve = current->mm->call_dl_resolve;
10624+ up_write(&current->mm->mmap_sem);
10625+ if (vma)
10626+ kmem_cache_free(vm_area_cachep, vma);
10627+ goto emulate;
10628+ }
10629+
10630+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10631+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10632+ up_write(&current->mm->mmap_sem);
10633+ if (vma)
10634+ kmem_cache_free(vm_area_cachep, vma);
10635+ return 1;
10636+ }
10637+
10638+ if (pax_insert_vma(vma, call_dl_resolve)) {
10639+ up_write(&current->mm->mmap_sem);
10640+ kmem_cache_free(vm_area_cachep, vma);
10641+ return 1;
10642+ }
10643+
10644+ current->mm->call_dl_resolve = call_dl_resolve;
10645+ up_write(&current->mm->mmap_sem);
10646+
10647+emulate:
10648+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10649+ regs->pc = call_dl_resolve;
10650+ regs->npc = addr+4;
10651+ return 3;
10652+ }
10653+#endif
10654+
10655+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10656+ if ((save & 0xFFC00000U) == 0x05000000U &&
10657+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10658+ nop == 0x01000000U)
10659+ {
10660+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10661+ regs->u_regs[UREG_G2] = addr + 4;
10662+ addr = (save & 0x003FFFFFU) << 10;
10663+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10664+ regs->pc = addr;
10665+ regs->npc = addr+4;
10666+ return 3;
10667+ }
10668+ }
10669+ } while (0);
10670+
10671+ do { /* PaX: unpatched PLT emulation step 2 */
10672+ unsigned int save, call, nop;
10673+
10674+ err = get_user(save, (unsigned int *)(regs->pc-4));
10675+ err |= get_user(call, (unsigned int *)regs->pc);
10676+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
10677+ if (err)
10678+ break;
10679+
10680+ if (save == 0x9DE3BFA8U &&
10681+ (call & 0xC0000000U) == 0x40000000U &&
10682+ nop == 0x01000000U)
10683+ {
10684+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10685+
10686+ regs->u_regs[UREG_RETPC] = regs->pc;
10687+ regs->pc = dl_resolve;
10688+ regs->npc = dl_resolve+4;
10689+ return 3;
10690+ }
10691+ } while (0);
10692+#endif
10693+
10694+ return 1;
10695+}
10696+
10697+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10698+{
10699+ unsigned long i;
10700+
10701+ printk(KERN_ERR "PAX: bytes at PC: ");
10702+ for (i = 0; i < 8; i++) {
10703+ unsigned int c;
10704+ if (get_user(c, (unsigned int *)pc+i))
10705+ printk(KERN_CONT "???????? ");
10706+ else
10707+ printk(KERN_CONT "%08x ", c);
10708+ }
10709+ printk("\n");
10710+}
10711+#endif
10712+
10713 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10714 int text_fault)
10715 {
10716@@ -229,6 +503,24 @@ good_area:
10717 if (!(vma->vm_flags & VM_WRITE))
10718 goto bad_area;
10719 } else {
10720+
10721+#ifdef CONFIG_PAX_PAGEEXEC
10722+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10723+ up_read(&mm->mmap_sem);
10724+ switch (pax_handle_fetch_fault(regs)) {
10725+
10726+#ifdef CONFIG_PAX_EMUPLT
10727+ case 2:
10728+ case 3:
10729+ return;
10730+#endif
10731+
10732+ }
10733+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10734+ do_group_exit(SIGKILL);
10735+ }
10736+#endif
10737+
10738 /* Allow reads even for write-only mappings */
10739 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10740 goto bad_area;
10741diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10742index 2ebec26..b212598 100644
10743--- a/arch/sparc/mm/fault_64.c
10744+++ b/arch/sparc/mm/fault_64.c
10745@@ -21,6 +21,9 @@
10746 #include <linux/kprobes.h>
10747 #include <linux/kdebug.h>
10748 #include <linux/percpu.h>
10749+#include <linux/slab.h>
10750+#include <linux/pagemap.h>
10751+#include <linux/compiler.h>
10752
10753 #include <asm/page.h>
10754 #include <asm/pgtable.h>
10755@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10756 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10757 regs->tpc);
10758 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10759- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10760+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10761 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10762 dump_stack();
10763 unhandled_fault(regs->tpc, current, regs);
10764@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10765 show_regs(regs);
10766 }
10767
10768+#ifdef CONFIG_PAX_PAGEEXEC
10769+#ifdef CONFIG_PAX_DLRESOLVE
10770+static void pax_emuplt_close(struct vm_area_struct *vma)
10771+{
10772+ vma->vm_mm->call_dl_resolve = 0UL;
10773+}
10774+
10775+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10776+{
10777+ unsigned int *kaddr;
10778+
10779+ vmf->page = alloc_page(GFP_HIGHUSER);
10780+ if (!vmf->page)
10781+ return VM_FAULT_OOM;
10782+
10783+ kaddr = kmap(vmf->page);
10784+ memset(kaddr, 0, PAGE_SIZE);
10785+ kaddr[0] = 0x9DE3BFA8U; /* save */
10786+ flush_dcache_page(vmf->page);
10787+ kunmap(vmf->page);
10788+ return VM_FAULT_MAJOR;
10789+}
10790+
10791+static const struct vm_operations_struct pax_vm_ops = {
10792+ .close = pax_emuplt_close,
10793+ .fault = pax_emuplt_fault
10794+};
10795+
10796+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10797+{
10798+ int ret;
10799+
10800+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10801+ vma->vm_mm = current->mm;
10802+ vma->vm_start = addr;
10803+ vma->vm_end = addr + PAGE_SIZE;
10804+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10805+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10806+ vma->vm_ops = &pax_vm_ops;
10807+
10808+ ret = insert_vm_struct(current->mm, vma);
10809+ if (ret)
10810+ return ret;
10811+
10812+ ++current->mm->total_vm;
10813+ return 0;
10814+}
10815+#endif
10816+
10817+/*
10818+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10819+ *
10820+ * returns 1 when task should be killed
10821+ * 2 when patched PLT trampoline was detected
10822+ * 3 when unpatched PLT trampoline was detected
10823+ */
10824+static int pax_handle_fetch_fault(struct pt_regs *regs)
10825+{
10826+
10827+#ifdef CONFIG_PAX_EMUPLT
10828+ int err;
10829+
10830+ do { /* PaX: patched PLT emulation #1 */
10831+ unsigned int sethi1, sethi2, jmpl;
10832+
10833+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10834+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10835+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10836+
10837+ if (err)
10838+ break;
10839+
10840+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10841+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10842+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10843+ {
10844+ unsigned long addr;
10845+
10846+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10847+ addr = regs->u_regs[UREG_G1];
10848+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10849+
10850+ if (test_thread_flag(TIF_32BIT))
10851+ addr &= 0xFFFFFFFFUL;
10852+
10853+ regs->tpc = addr;
10854+ regs->tnpc = addr+4;
10855+ return 2;
10856+ }
10857+ } while (0);
10858+
10859+ do { /* PaX: patched PLT emulation #2 */
10860+ unsigned int ba;
10861+
10862+ err = get_user(ba, (unsigned int *)regs->tpc);
10863+
10864+ if (err)
10865+ break;
10866+
10867+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10868+ unsigned long addr;
10869+
10870+ if ((ba & 0xFFC00000U) == 0x30800000U)
10871+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10872+ else
10873+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10874+
10875+ if (test_thread_flag(TIF_32BIT))
10876+ addr &= 0xFFFFFFFFUL;
10877+
10878+ regs->tpc = addr;
10879+ regs->tnpc = addr+4;
10880+ return 2;
10881+ }
10882+ } while (0);
10883+
10884+ do { /* PaX: patched PLT emulation #3 */
10885+ unsigned int sethi, bajmpl, nop;
10886+
10887+ err = get_user(sethi, (unsigned int *)regs->tpc);
10888+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10889+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10890+
10891+ if (err)
10892+ break;
10893+
10894+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10895+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10896+ nop == 0x01000000U)
10897+ {
10898+ unsigned long addr;
10899+
10900+ addr = (sethi & 0x003FFFFFU) << 10;
10901+ regs->u_regs[UREG_G1] = addr;
10902+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10903+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10904+ else
10905+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10906+
10907+ if (test_thread_flag(TIF_32BIT))
10908+ addr &= 0xFFFFFFFFUL;
10909+
10910+ regs->tpc = addr;
10911+ regs->tnpc = addr+4;
10912+ return 2;
10913+ }
10914+ } while (0);
10915+
10916+ do { /* PaX: patched PLT emulation #4 */
10917+ unsigned int sethi, mov1, call, mov2;
10918+
10919+ err = get_user(sethi, (unsigned int *)regs->tpc);
10920+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10921+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10922+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10923+
10924+ if (err)
10925+ break;
10926+
10927+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10928+ mov1 == 0x8210000FU &&
10929+ (call & 0xC0000000U) == 0x40000000U &&
10930+ mov2 == 0x9E100001U)
10931+ {
10932+ unsigned long addr;
10933+
10934+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10935+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10936+
10937+ if (test_thread_flag(TIF_32BIT))
10938+ addr &= 0xFFFFFFFFUL;
10939+
10940+ regs->tpc = addr;
10941+ regs->tnpc = addr+4;
10942+ return 2;
10943+ }
10944+ } while (0);
10945+
10946+ do { /* PaX: patched PLT emulation #5 */
10947+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10948+
10949+ err = get_user(sethi, (unsigned int *)regs->tpc);
10950+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10951+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10952+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10953+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10954+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10955+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10956+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10957+
10958+ if (err)
10959+ break;
10960+
10961+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10962+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10963+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10964+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10965+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10966+ sllx == 0x83287020U &&
10967+ jmpl == 0x81C04005U &&
10968+ nop == 0x01000000U)
10969+ {
10970+ unsigned long addr;
10971+
10972+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10973+ regs->u_regs[UREG_G1] <<= 32;
10974+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10975+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10976+ regs->tpc = addr;
10977+ regs->tnpc = addr+4;
10978+ return 2;
10979+ }
10980+ } while (0);
10981+
10982+ do { /* PaX: patched PLT emulation #6 */
10983+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10984+
10985+ err = get_user(sethi, (unsigned int *)regs->tpc);
10986+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10987+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10988+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10989+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10990+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10991+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10992+
10993+ if (err)
10994+ break;
10995+
10996+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10997+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10998+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10999+ sllx == 0x83287020U &&
11000+ (or & 0xFFFFE000U) == 0x8A116000U &&
11001+ jmpl == 0x81C04005U &&
11002+ nop == 0x01000000U)
11003+ {
11004+ unsigned long addr;
11005+
11006+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11007+ regs->u_regs[UREG_G1] <<= 32;
11008+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11009+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11010+ regs->tpc = addr;
11011+ regs->tnpc = addr+4;
11012+ return 2;
11013+ }
11014+ } while (0);
11015+
11016+ do { /* PaX: unpatched PLT emulation step 1 */
11017+ unsigned int sethi, ba, nop;
11018+
11019+ err = get_user(sethi, (unsigned int *)regs->tpc);
11020+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11021+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11022+
11023+ if (err)
11024+ break;
11025+
11026+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11027+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11028+ nop == 0x01000000U)
11029+ {
11030+ unsigned long addr;
11031+ unsigned int save, call;
11032+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11033+
11034+ if ((ba & 0xFFC00000U) == 0x30800000U)
11035+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11036+ else
11037+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11038+
11039+ if (test_thread_flag(TIF_32BIT))
11040+ addr &= 0xFFFFFFFFUL;
11041+
11042+ err = get_user(save, (unsigned int *)addr);
11043+ err |= get_user(call, (unsigned int *)(addr+4));
11044+ err |= get_user(nop, (unsigned int *)(addr+8));
11045+ if (err)
11046+ break;
11047+
11048+#ifdef CONFIG_PAX_DLRESOLVE
11049+ if (save == 0x9DE3BFA8U &&
11050+ (call & 0xC0000000U) == 0x40000000U &&
11051+ nop == 0x01000000U)
11052+ {
11053+ struct vm_area_struct *vma;
11054+ unsigned long call_dl_resolve;
11055+
11056+ down_read(&current->mm->mmap_sem);
11057+ call_dl_resolve = current->mm->call_dl_resolve;
11058+ up_read(&current->mm->mmap_sem);
11059+ if (likely(call_dl_resolve))
11060+ goto emulate;
11061+
11062+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11063+
11064+ down_write(&current->mm->mmap_sem);
11065+ if (current->mm->call_dl_resolve) {
11066+ call_dl_resolve = current->mm->call_dl_resolve;
11067+ up_write(&current->mm->mmap_sem);
11068+ if (vma)
11069+ kmem_cache_free(vm_area_cachep, vma);
11070+ goto emulate;
11071+ }
11072+
11073+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11074+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11075+ up_write(&current->mm->mmap_sem);
11076+ if (vma)
11077+ kmem_cache_free(vm_area_cachep, vma);
11078+ return 1;
11079+ }
11080+
11081+ if (pax_insert_vma(vma, call_dl_resolve)) {
11082+ up_write(&current->mm->mmap_sem);
11083+ kmem_cache_free(vm_area_cachep, vma);
11084+ return 1;
11085+ }
11086+
11087+ current->mm->call_dl_resolve = call_dl_resolve;
11088+ up_write(&current->mm->mmap_sem);
11089+
11090+emulate:
11091+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11092+ regs->tpc = call_dl_resolve;
11093+ regs->tnpc = addr+4;
11094+ return 3;
11095+ }
11096+#endif
11097+
11098+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11099+ if ((save & 0xFFC00000U) == 0x05000000U &&
11100+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11101+ nop == 0x01000000U)
11102+ {
11103+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11104+ regs->u_regs[UREG_G2] = addr + 4;
11105+ addr = (save & 0x003FFFFFU) << 10;
11106+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11107+
11108+ if (test_thread_flag(TIF_32BIT))
11109+ addr &= 0xFFFFFFFFUL;
11110+
11111+ regs->tpc = addr;
11112+ regs->tnpc = addr+4;
11113+ return 3;
11114+ }
11115+
11116+ /* PaX: 64-bit PLT stub */
11117+ err = get_user(sethi1, (unsigned int *)addr);
11118+ err |= get_user(sethi2, (unsigned int *)(addr+4));
11119+ err |= get_user(or1, (unsigned int *)(addr+8));
11120+ err |= get_user(or2, (unsigned int *)(addr+12));
11121+ err |= get_user(sllx, (unsigned int *)(addr+16));
11122+ err |= get_user(add, (unsigned int *)(addr+20));
11123+ err |= get_user(jmpl, (unsigned int *)(addr+24));
11124+ err |= get_user(nop, (unsigned int *)(addr+28));
11125+ if (err)
11126+ break;
11127+
11128+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11129+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11130+ (or1 & 0xFFFFE000U) == 0x88112000U &&
11131+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11132+ sllx == 0x89293020U &&
11133+ add == 0x8A010005U &&
11134+ jmpl == 0x89C14000U &&
11135+ nop == 0x01000000U)
11136+ {
11137+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11138+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11139+ regs->u_regs[UREG_G4] <<= 32;
11140+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11141+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11142+ regs->u_regs[UREG_G4] = addr + 24;
11143+ addr = regs->u_regs[UREG_G5];
11144+ regs->tpc = addr;
11145+ regs->tnpc = addr+4;
11146+ return 3;
11147+ }
11148+ }
11149+ } while (0);
11150+
11151+#ifdef CONFIG_PAX_DLRESOLVE
11152+ do { /* PaX: unpatched PLT emulation step 2 */
11153+ unsigned int save, call, nop;
11154+
11155+ err = get_user(save, (unsigned int *)(regs->tpc-4));
11156+ err |= get_user(call, (unsigned int *)regs->tpc);
11157+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11158+ if (err)
11159+ break;
11160+
11161+ if (save == 0x9DE3BFA8U &&
11162+ (call & 0xC0000000U) == 0x40000000U &&
11163+ nop == 0x01000000U)
11164+ {
11165+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11166+
11167+ if (test_thread_flag(TIF_32BIT))
11168+ dl_resolve &= 0xFFFFFFFFUL;
11169+
11170+ regs->u_regs[UREG_RETPC] = regs->tpc;
11171+ regs->tpc = dl_resolve;
11172+ regs->tnpc = dl_resolve+4;
11173+ return 3;
11174+ }
11175+ } while (0);
11176+#endif
11177+
11178+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
11179+ unsigned int sethi, ba, nop;
11180+
11181+ err = get_user(sethi, (unsigned int *)regs->tpc);
11182+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11183+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11184+
11185+ if (err)
11186+ break;
11187+
11188+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11189+ (ba & 0xFFF00000U) == 0x30600000U &&
11190+ nop == 0x01000000U)
11191+ {
11192+ unsigned long addr;
11193+
11194+ addr = (sethi & 0x003FFFFFU) << 10;
11195+ regs->u_regs[UREG_G1] = addr;
11196+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11197+
11198+ if (test_thread_flag(TIF_32BIT))
11199+ addr &= 0xFFFFFFFFUL;
11200+
11201+ regs->tpc = addr;
11202+ regs->tnpc = addr+4;
11203+ return 2;
11204+ }
11205+ } while (0);
11206+
11207+#endif
11208+
11209+ return 1;
11210+}
11211+
11212+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11213+{
11214+ unsigned long i;
11215+
11216+ printk(KERN_ERR "PAX: bytes at PC: ");
11217+ for (i = 0; i < 8; i++) {
11218+ unsigned int c;
11219+ if (get_user(c, (unsigned int *)pc+i))
11220+ printk(KERN_CONT "???????? ");
11221+ else
11222+ printk(KERN_CONT "%08x ", c);
11223+ }
11224+ printk("\n");
11225+}
11226+#endif
11227+
11228 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11229 {
11230 struct mm_struct *mm = current->mm;
11231@@ -342,6 +805,29 @@ retry:
11232 if (!vma)
11233 goto bad_area;
11234
11235+#ifdef CONFIG_PAX_PAGEEXEC
11236+ /* PaX: detect ITLB misses on non-exec pages */
11237+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11238+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11239+ {
11240+ if (address != regs->tpc)
11241+ goto good_area;
11242+
11243+ up_read(&mm->mmap_sem);
11244+ switch (pax_handle_fetch_fault(regs)) {
11245+
11246+#ifdef CONFIG_PAX_EMUPLT
11247+ case 2:
11248+ case 3:
11249+ return;
11250+#endif
11251+
11252+ }
11253+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11254+ do_group_exit(SIGKILL);
11255+ }
11256+#endif
11257+
11258 /* Pure DTLB misses do not tell us whether the fault causing
11259 * load/store/atomic was a write or not, it only says that there
11260 * was no match. So in such a case we (carefully) read the
11261diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11262index 9639964..806cd0c 100644
11263--- a/arch/sparc/mm/hugetlbpage.c
11264+++ b/arch/sparc/mm/hugetlbpage.c
11265@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11266 unsigned long addr,
11267 unsigned long len,
11268 unsigned long pgoff,
11269- unsigned long flags)
11270+ unsigned long flags,
11271+ unsigned long offset)
11272 {
11273 unsigned long task_size = TASK_SIZE;
11274 struct vm_unmapped_area_info info;
11275@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11276
11277 info.flags = 0;
11278 info.length = len;
11279- info.low_limit = TASK_UNMAPPED_BASE;
11280+ info.low_limit = mm->mmap_base;
11281 info.high_limit = min(task_size, VA_EXCLUDE_START);
11282 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11283 info.align_offset = 0;
11284+ info.threadstack_offset = offset;
11285 addr = vm_unmapped_area(&info);
11286
11287 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11288 VM_BUG_ON(addr != -ENOMEM);
11289 info.low_limit = VA_EXCLUDE_END;
11290+
11291+#ifdef CONFIG_PAX_RANDMMAP
11292+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11293+ info.low_limit += mm->delta_mmap;
11294+#endif
11295+
11296 info.high_limit = task_size;
11297 addr = vm_unmapped_area(&info);
11298 }
11299@@ -58,7 +66,8 @@ static unsigned long
11300 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11301 const unsigned long len,
11302 const unsigned long pgoff,
11303- const unsigned long flags)
11304+ const unsigned long flags,
11305+ const unsigned long offset)
11306 {
11307 struct mm_struct *mm = current->mm;
11308 unsigned long addr = addr0;
11309@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11310 info.high_limit = mm->mmap_base;
11311 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11312 info.align_offset = 0;
11313+ info.threadstack_offset = offset;
11314 addr = vm_unmapped_area(&info);
11315
11316 /*
11317@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11318 VM_BUG_ON(addr != -ENOMEM);
11319 info.flags = 0;
11320 info.low_limit = TASK_UNMAPPED_BASE;
11321+
11322+#ifdef CONFIG_PAX_RANDMMAP
11323+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11324+ info.low_limit += mm->delta_mmap;
11325+#endif
11326+
11327 info.high_limit = STACK_TOP32;
11328 addr = vm_unmapped_area(&info);
11329 }
11330@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11331 struct mm_struct *mm = current->mm;
11332 struct vm_area_struct *vma;
11333 unsigned long task_size = TASK_SIZE;
11334+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11335
11336 if (test_thread_flag(TIF_32BIT))
11337 task_size = STACK_TOP32;
11338@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11339 return addr;
11340 }
11341
11342+#ifdef CONFIG_PAX_RANDMMAP
11343+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11344+#endif
11345+
11346 if (addr) {
11347 addr = ALIGN(addr, HPAGE_SIZE);
11348 vma = find_vma(mm, addr);
11349- if (task_size - len >= addr &&
11350- (!vma || addr + len <= vma->vm_start))
11351+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11352 return addr;
11353 }
11354 if (mm->get_unmapped_area == arch_get_unmapped_area)
11355 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11356- pgoff, flags);
11357+ pgoff, flags, offset);
11358 else
11359 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11360- pgoff, flags);
11361+ pgoff, flags, offset);
11362 }
11363
11364 pte_t *huge_pte_alloc(struct mm_struct *mm,
11365diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
11366index ed82eda..0d80e77 100644
11367--- a/arch/sparc/mm/init_64.c
11368+++ b/arch/sparc/mm/init_64.c
11369@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
11370 int num_kernel_image_mappings;
11371
11372 #ifdef CONFIG_DEBUG_DCFLUSH
11373-atomic_t dcpage_flushes = ATOMIC_INIT(0);
11374+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
11375 #ifdef CONFIG_SMP
11376-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11377+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11378 #endif
11379 #endif
11380
11381@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
11382 {
11383 BUG_ON(tlb_type == hypervisor);
11384 #ifdef CONFIG_DEBUG_DCFLUSH
11385- atomic_inc(&dcpage_flushes);
11386+ atomic_inc_unchecked(&dcpage_flushes);
11387 #endif
11388
11389 #ifdef DCACHE_ALIASING_POSSIBLE
11390@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
11391
11392 #ifdef CONFIG_DEBUG_DCFLUSH
11393 seq_printf(m, "DCPageFlushes\t: %d\n",
11394- atomic_read(&dcpage_flushes));
11395+ atomic_read_unchecked(&dcpage_flushes));
11396 #ifdef CONFIG_SMP
11397 seq_printf(m, "DCPageFlushesXC\t: %d\n",
11398- atomic_read(&dcpage_flushes_xcall));
11399+ atomic_read_unchecked(&dcpage_flushes_xcall));
11400 #endif /* CONFIG_SMP */
11401 #endif /* CONFIG_DEBUG_DCFLUSH */
11402 }
11403diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
11404index d45a2c4..3c05a78 100644
11405--- a/arch/tile/Kconfig
11406+++ b/arch/tile/Kconfig
11407@@ -185,6 +185,7 @@ source "kernel/Kconfig.hz"
11408
11409 config KEXEC
11410 bool "kexec system call"
11411+ depends on !GRKERNSEC_KMEM
11412 ---help---
11413 kexec is a system call that implements the ability to shutdown your
11414 current kernel, and to start another kernel. It is like a reboot
11415diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11416index ad220ee..2f537b3 100644
11417--- a/arch/tile/include/asm/atomic_64.h
11418+++ b/arch/tile/include/asm/atomic_64.h
11419@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11420
11421 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11422
11423+#define atomic64_read_unchecked(v) atomic64_read(v)
11424+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11425+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11426+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11427+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11428+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11429+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11430+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11431+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11432+
11433 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11434 #define smp_mb__before_atomic_dec() smp_mb()
11435 #define smp_mb__after_atomic_dec() smp_mb()
11436diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11437index 6160761..00cac88 100644
11438--- a/arch/tile/include/asm/cache.h
11439+++ b/arch/tile/include/asm/cache.h
11440@@ -15,11 +15,12 @@
11441 #ifndef _ASM_TILE_CACHE_H
11442 #define _ASM_TILE_CACHE_H
11443
11444+#include <linux/const.h>
11445 #include <arch/chip.h>
11446
11447 /* bytes per L1 data cache line */
11448 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11449-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11450+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11451
11452 /* bytes per L2 cache line */
11453 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11454diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11455index b6cde32..c0cb736 100644
11456--- a/arch/tile/include/asm/uaccess.h
11457+++ b/arch/tile/include/asm/uaccess.h
11458@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11459 const void __user *from,
11460 unsigned long n)
11461 {
11462- int sz = __compiletime_object_size(to);
11463+ size_t sz = __compiletime_object_size(to);
11464
11465- if (likely(sz == -1 || sz >= n))
11466+ if (likely(sz == (size_t)-1 || sz >= n))
11467 n = _copy_from_user(to, from, n);
11468 else
11469 copy_from_user_overflow();
11470diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11471index 0cb3bba..7338b2d 100644
11472--- a/arch/tile/mm/hugetlbpage.c
11473+++ b/arch/tile/mm/hugetlbpage.c
11474@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11475 info.high_limit = TASK_SIZE;
11476 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11477 info.align_offset = 0;
11478+ info.threadstack_offset = 0;
11479 return vm_unmapped_area(&info);
11480 }
11481
11482@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11483 info.high_limit = current->mm->mmap_base;
11484 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11485 info.align_offset = 0;
11486+ info.threadstack_offset = 0;
11487 addr = vm_unmapped_area(&info);
11488
11489 /*
11490diff --git a/arch/um/Makefile b/arch/um/Makefile
11491index 133f7de..1d6f2f1 100644
11492--- a/arch/um/Makefile
11493+++ b/arch/um/Makefile
11494@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11497
11498+ifdef CONSTIFY_PLUGIN
11499+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11500+endif
11501+
11502 #This will adjust *FLAGS accordingly to the platform.
11503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11504
11505diff --git a/arch/um/defconfig b/arch/um/defconfig
11506index 2665e6b..3e3822b 100644
11507--- a/arch/um/defconfig
11508+++ b/arch/um/defconfig
11509@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
11510 CONFIG_X86_L1_CACHE_SHIFT=5
11511 CONFIG_X86_XADD=y
11512 CONFIG_X86_PPRO_FENCE=y
11513-CONFIG_X86_WP_WORKS_OK=y
11514 CONFIG_X86_INVLPG=y
11515 CONFIG_X86_BSWAP=y
11516 CONFIG_X86_POPAD_OK=y
11517diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11518index 19e1bdd..3665b77 100644
11519--- a/arch/um/include/asm/cache.h
11520+++ b/arch/um/include/asm/cache.h
11521@@ -1,6 +1,7 @@
11522 #ifndef __UM_CACHE_H
11523 #define __UM_CACHE_H
11524
11525+#include <linux/const.h>
11526
11527 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11528 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11529@@ -12,6 +13,6 @@
11530 # define L1_CACHE_SHIFT 5
11531 #endif
11532
11533-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11534+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11535
11536 #endif
11537diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11538index 2e0a6b1..a64d0f5 100644
11539--- a/arch/um/include/asm/kmap_types.h
11540+++ b/arch/um/include/asm/kmap_types.h
11541@@ -8,6 +8,6 @@
11542
11543 /* No more #include "asm/arch/kmap_types.h" ! */
11544
11545-#define KM_TYPE_NR 14
11546+#define KM_TYPE_NR 15
11547
11548 #endif
11549diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11550index 5ff53d9..5850cdf 100644
11551--- a/arch/um/include/asm/page.h
11552+++ b/arch/um/include/asm/page.h
11553@@ -14,6 +14,9 @@
11554 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11555 #define PAGE_MASK (~(PAGE_SIZE-1))
11556
11557+#define ktla_ktva(addr) (addr)
11558+#define ktva_ktla(addr) (addr)
11559+
11560 #ifndef __ASSEMBLY__
11561
11562 struct page;
11563diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11564index 0032f92..cd151e0 100644
11565--- a/arch/um/include/asm/pgtable-3level.h
11566+++ b/arch/um/include/asm/pgtable-3level.h
11567@@ -58,6 +58,7 @@
11568 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11569 #define pud_populate(mm, pud, pmd) \
11570 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11571+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11572
11573 #ifdef CONFIG_64BIT
11574 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11575diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11576index bbcef52..6a2a483 100644
11577--- a/arch/um/kernel/process.c
11578+++ b/arch/um/kernel/process.c
11579@@ -367,22 +367,6 @@ int singlestepping(void * t)
11580 return 2;
11581 }
11582
11583-/*
11584- * Only x86 and x86_64 have an arch_align_stack().
11585- * All other arches have "#define arch_align_stack(x) (x)"
11586- * in their asm/system.h
11587- * As this is included in UML from asm-um/system-generic.h,
11588- * we can use it to behave as the subarch does.
11589- */
11590-#ifndef arch_align_stack
11591-unsigned long arch_align_stack(unsigned long sp)
11592-{
11593- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11594- sp -= get_random_int() % 8192;
11595- return sp & ~0xf;
11596-}
11597-#endif
11598-
11599 unsigned long get_wchan(struct task_struct *p)
11600 {
11601 unsigned long stack_page, sp, ip;
11602diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11603index ad8f795..2c7eec6 100644
11604--- a/arch/unicore32/include/asm/cache.h
11605+++ b/arch/unicore32/include/asm/cache.h
11606@@ -12,8 +12,10 @@
11607 #ifndef __UNICORE_CACHE_H__
11608 #define __UNICORE_CACHE_H__
11609
11610-#define L1_CACHE_SHIFT (5)
11611-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11612+#include <linux/const.h>
11613+
11614+#define L1_CACHE_SHIFT 5
11615+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11616
11617 /*
11618 * Memory returned by kmalloc() may be used for DMA, so we must make
11619diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11620index f67e839..bfd4748 100644
11621--- a/arch/x86/Kconfig
11622+++ b/arch/x86/Kconfig
11623@@ -247,7 +247,7 @@ config X86_HT
11624
11625 config X86_32_LAZY_GS
11626 def_bool y
11627- depends on X86_32 && !CC_STACKPROTECTOR
11628+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11629
11630 config ARCH_HWEIGHT_CFLAGS
11631 string
11632@@ -1099,6 +1099,7 @@ config MICROCODE_EARLY
11633
11634 config X86_MSR
11635 tristate "/dev/cpu/*/msr - Model-specific register support"
11636+ depends on !GRKERNSEC_KMEM
11637 ---help---
11638 This device gives privileged processes access to the x86
11639 Model-Specific Registers (MSRs). It is a character device with
11640@@ -1122,7 +1123,7 @@ choice
11641
11642 config NOHIGHMEM
11643 bool "off"
11644- depends on !X86_NUMAQ
11645+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11646 ---help---
11647 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11648 However, the address space of 32-bit x86 processors is only 4
11649@@ -1159,7 +1160,7 @@ config NOHIGHMEM
11650
11651 config HIGHMEM4G
11652 bool "4GB"
11653- depends on !X86_NUMAQ
11654+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11655 ---help---
11656 Select this if you have a 32-bit processor and between 1 and 4
11657 gigabytes of physical RAM.
11658@@ -1212,7 +1213,7 @@ config PAGE_OFFSET
11659 hex
11660 default 0xB0000000 if VMSPLIT_3G_OPT
11661 default 0x80000000 if VMSPLIT_2G
11662- default 0x78000000 if VMSPLIT_2G_OPT
11663+ default 0x70000000 if VMSPLIT_2G_OPT
11664 default 0x40000000 if VMSPLIT_1G
11665 default 0xC0000000
11666 depends on X86_32
11667@@ -1614,6 +1615,7 @@ config SECCOMP
11668
11669 config CC_STACKPROTECTOR
11670 bool "Enable -fstack-protector buffer overflow detection"
11671+ depends on X86_64 || !PAX_MEMORY_UDEREF
11672 ---help---
11673 This option turns on the -fstack-protector GCC feature. This
11674 feature puts, at the beginning of functions, a canary value on
11675@@ -1632,6 +1634,7 @@ source kernel/Kconfig.hz
11676
11677 config KEXEC
11678 bool "kexec system call"
11679+ depends on !GRKERNSEC_KMEM
11680 ---help---
11681 kexec is a system call that implements the ability to shutdown your
11682 current kernel, and to start another kernel. It is like a reboot
11683@@ -1733,6 +1736,8 @@ config X86_NEED_RELOCS
11684 config PHYSICAL_ALIGN
11685 hex "Alignment value to which kernel should be aligned"
11686 default "0x1000000"
11687+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11688+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11689 range 0x2000 0x1000000 if X86_32
11690 range 0x200000 0x1000000 if X86_64
11691 ---help---
11692@@ -1812,9 +1817,10 @@ config DEBUG_HOTPLUG_CPU0
11693 If unsure, say N.
11694
11695 config COMPAT_VDSO
11696- def_bool y
11697+ def_bool n
11698 prompt "Compat VDSO support"
11699 depends on X86_32 || IA32_EMULATION
11700+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11701 ---help---
11702 Map the 32-bit VDSO to the predictable old-style address too.
11703
11704diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11705index c026cca..14657ae 100644
11706--- a/arch/x86/Kconfig.cpu
11707+++ b/arch/x86/Kconfig.cpu
11708@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11709
11710 config X86_F00F_BUG
11711 def_bool y
11712- depends on M586MMX || M586TSC || M586 || M486
11713+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11714
11715 config X86_INVD_BUG
11716 def_bool y
11717@@ -327,7 +327,7 @@ config X86_INVD_BUG
11718
11719 config X86_ALIGNMENT_16
11720 def_bool y
11721- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11722+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11723
11724 config X86_INTEL_USERCOPY
11725 def_bool y
11726@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11727 # generates cmov.
11728 config X86_CMOV
11729 def_bool y
11730- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11731+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11732
11733 config X86_MINIMUM_CPU_FAMILY
11734 int
11735diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11736index 78d91af..8ceb94b 100644
11737--- a/arch/x86/Kconfig.debug
11738+++ b/arch/x86/Kconfig.debug
11739@@ -74,7 +74,7 @@ config X86_PTDUMP
11740 config DEBUG_RODATA
11741 bool "Write protect kernel read-only data structures"
11742 default y
11743- depends on DEBUG_KERNEL
11744+ depends on DEBUG_KERNEL && BROKEN
11745 ---help---
11746 Mark the kernel read-only data as write-protected in the pagetables,
11747 in order to catch accidental (and incorrect) writes to such const
11748@@ -92,7 +92,7 @@ config DEBUG_RODATA_TEST
11749
11750 config DEBUG_SET_MODULE_RONX
11751 bool "Set loadable kernel module data as NX and text as RO"
11752- depends on MODULES
11753+ depends on MODULES && BROKEN
11754 ---help---
11755 This option helps catch unintended modifications to loadable
11756 kernel module's text and read-only data. It also prevents execution
11757diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11758index 57d0215..b4373fb 100644
11759--- a/arch/x86/Makefile
11760+++ b/arch/x86/Makefile
11761@@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
11762 # CPU-specific tuning. Anything which can be shared with UML should go here.
11763 include $(srctree)/arch/x86/Makefile_32.cpu
11764 KBUILD_CFLAGS += $(cflags-y)
11765-
11766- # temporary until string.h is fixed
11767- KBUILD_CFLAGS += -ffreestanding
11768 else
11769 BITS := 64
11770 UTS_MACHINE := x86_64
11771 CHECKFLAGS += -D__x86_64__ -m64
11772
11773+ biarch := $(call cc-option,-m64)
11774 KBUILD_AFLAGS += -m64
11775 KBUILD_CFLAGS += -m64
11776
11777@@ -89,6 +87,9 @@ else
11778 KBUILD_CFLAGS += -maccumulate-outgoing-args
11779 endif
11780
11781+# temporary until string.h is fixed
11782+KBUILD_CFLAGS += -ffreestanding
11783+
11784 ifdef CONFIG_CC_STACKPROTECTOR
11785 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
11786 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
11787@@ -247,3 +248,12 @@ define archhelp
11788 echo ' FDINITRD=file initrd for the booted kernel'
11789 echo ' kvmconfig - Enable additional options for guest kernel support'
11790 endef
11791+
11792+define OLD_LD
11793+
11794+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11795+*** Please upgrade your binutils to 2.18 or newer
11796+endef
11797+
11798+archprepare:
11799+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11800diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11801index 6cf0111..f2e2398 100644
11802--- a/arch/x86/boot/Makefile
11803+++ b/arch/x86/boot/Makefile
11804@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
11805 $(call cc-option, -fno-unit-at-a-time)) \
11806 $(call cc-option, -fno-stack-protector) \
11807 $(call cc-option, -mpreferred-stack-boundary=2)
11808+ifdef CONSTIFY_PLUGIN
11809+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11810+endif
11811 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11812 GCOV_PROFILE := n
11813
11814diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11815index 878e4b9..20537ab 100644
11816--- a/arch/x86/boot/bitops.h
11817+++ b/arch/x86/boot/bitops.h
11818@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11819 u8 v;
11820 const u32 *p = (const u32 *)addr;
11821
11822- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11823+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11824 return v;
11825 }
11826
11827@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11828
11829 static inline void set_bit(int nr, void *addr)
11830 {
11831- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11832+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11833 }
11834
11835 #endif /* BOOT_BITOPS_H */
11836diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11837index ef72bae..353a184 100644
11838--- a/arch/x86/boot/boot.h
11839+++ b/arch/x86/boot/boot.h
11840@@ -85,7 +85,7 @@ static inline void io_delay(void)
11841 static inline u16 ds(void)
11842 {
11843 u16 seg;
11844- asm("movw %%ds,%0" : "=rm" (seg));
11845+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11846 return seg;
11847 }
11848
11849@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11850 static inline int memcmp(const void *s1, const void *s2, size_t len)
11851 {
11852 u8 diff;
11853- asm("repe; cmpsb; setnz %0"
11854+ asm volatile("repe; cmpsb; setnz %0"
11855 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11856 return diff;
11857 }
11858diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11859index c8a6792..2402765 100644
11860--- a/arch/x86/boot/compressed/Makefile
11861+++ b/arch/x86/boot/compressed/Makefile
11862@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
11863 KBUILD_CFLAGS += -mno-mmx -mno-sse
11864 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11865 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11866+ifdef CONSTIFY_PLUGIN
11867+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11868+endif
11869
11870 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11871 GCOV_PROFILE := n
11872diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
11873index b7388a4..03844ec 100644
11874--- a/arch/x86/boot/compressed/eboot.c
11875+++ b/arch/x86/boot/compressed/eboot.c
11876@@ -150,7 +150,6 @@ again:
11877 *addr = max_addr;
11878 }
11879
11880-free_pool:
11881 efi_call_phys1(sys_table->boottime->free_pool, map);
11882
11883 fail:
11884@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
11885 if (i == map_size / desc_size)
11886 status = EFI_NOT_FOUND;
11887
11888-free_pool:
11889 efi_call_phys1(sys_table->boottime->free_pool, map);
11890 fail:
11891 return status;
11892diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11893index a53440e..c3dbf1e 100644
11894--- a/arch/x86/boot/compressed/efi_stub_32.S
11895+++ b/arch/x86/boot/compressed/efi_stub_32.S
11896@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11897 * parameter 2, ..., param n. To make things easy, we save the return
11898 * address of efi_call_phys in a global variable.
11899 */
11900- popl %ecx
11901- movl %ecx, saved_return_addr(%edx)
11902- /* get the function pointer into ECX*/
11903- popl %ecx
11904- movl %ecx, efi_rt_function_ptr(%edx)
11905+ popl saved_return_addr(%edx)
11906+ popl efi_rt_function_ptr(%edx)
11907
11908 /*
11909 * 3. Call the physical function.
11910 */
11911- call *%ecx
11912+ call *efi_rt_function_ptr(%edx)
11913
11914 /*
11915 * 4. Balance the stack. And because EAX contain the return value,
11916@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11917 1: popl %edx
11918 subl $1b, %edx
11919
11920- movl efi_rt_function_ptr(%edx), %ecx
11921- pushl %ecx
11922+ pushl efi_rt_function_ptr(%edx)
11923
11924 /*
11925 * 10. Push the saved return address onto the stack and return.
11926 */
11927- movl saved_return_addr(%edx), %ecx
11928- pushl %ecx
11929- ret
11930+ jmpl *saved_return_addr(%edx)
11931 ENDPROC(efi_call_phys)
11932 .previous
11933
11934diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11935index 5d6f689..9d06730 100644
11936--- a/arch/x86/boot/compressed/head_32.S
11937+++ b/arch/x86/boot/compressed/head_32.S
11938@@ -118,7 +118,7 @@ preferred_addr:
11939 notl %eax
11940 andl %eax, %ebx
11941 #else
11942- movl $LOAD_PHYSICAL_ADDR, %ebx
11943+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11944 #endif
11945
11946 /* Target address to relocate to for decompression */
11947diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11948index c337422..2c5be72 100644
11949--- a/arch/x86/boot/compressed/head_64.S
11950+++ b/arch/x86/boot/compressed/head_64.S
11951@@ -95,7 +95,7 @@ ENTRY(startup_32)
11952 notl %eax
11953 andl %eax, %ebx
11954 #else
11955- movl $LOAD_PHYSICAL_ADDR, %ebx
11956+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11957 #endif
11958
11959 /* Target address to relocate to for decompression */
11960@@ -270,7 +270,7 @@ preferred_addr:
11961 notq %rax
11962 andq %rax, %rbp
11963 #else
11964- movq $LOAD_PHYSICAL_ADDR, %rbp
11965+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11966 #endif
11967
11968 /* Target address to relocate to for decompression */
11969@@ -362,8 +362,8 @@ gdt:
11970 .long gdt
11971 .word 0
11972 .quad 0x0000000000000000 /* NULL descriptor */
11973- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11974- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11975+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11976+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11977 .quad 0x0080890000000000 /* TS descriptor */
11978 .quad 0x0000000000000000 /* TS continued */
11979 gdt_end:
11980diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11981index 434f077..b6b4b38 100644
11982--- a/arch/x86/boot/compressed/misc.c
11983+++ b/arch/x86/boot/compressed/misc.c
11984@@ -283,7 +283,7 @@ static void handle_relocations(void *output, unsigned long output_len)
11985 * Calculate the delta between where vmlinux was linked to load
11986 * and where it was actually loaded.
11987 */
11988- delta = min_addr - LOAD_PHYSICAL_ADDR;
11989+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
11990 if (!delta) {
11991 debug_putstr("No relocation needed... ");
11992 return;
11993@@ -380,7 +380,7 @@ static void parse_elf(void *output)
11994 case PT_LOAD:
11995 #ifdef CONFIG_RELOCATABLE
11996 dest = output;
11997- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11998+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11999 #else
12000 dest = (void *)(phdr->p_paddr);
12001 #endif
12002@@ -432,7 +432,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
12003 error("Destination address too large");
12004 #endif
12005 #ifndef CONFIG_RELOCATABLE
12006- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12007+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12008 error("Wrong destination address");
12009 #endif
12010
12011diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12012index 4d3ff03..e4972ff 100644
12013--- a/arch/x86/boot/cpucheck.c
12014+++ b/arch/x86/boot/cpucheck.c
12015@@ -74,7 +74,7 @@ static int has_fpu(void)
12016 u16 fcw = -1, fsw = -1;
12017 u32 cr0;
12018
12019- asm("movl %%cr0,%0" : "=r" (cr0));
12020+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
12021 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
12022 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
12023 asm volatile("movl %0,%%cr0" : : "r" (cr0));
12024@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
12025 {
12026 u32 f0, f1;
12027
12028- asm("pushfl ; "
12029+ asm volatile("pushfl ; "
12030 "pushfl ; "
12031 "popl %0 ; "
12032 "movl %0,%1 ; "
12033@@ -115,7 +115,7 @@ static void get_flags(void)
12034 set_bit(X86_FEATURE_FPU, cpu.flags);
12035
12036 if (has_eflag(X86_EFLAGS_ID)) {
12037- asm("cpuid"
12038+ asm volatile("cpuid"
12039 : "=a" (max_intel_level),
12040 "=b" (cpu_vendor[0]),
12041 "=d" (cpu_vendor[1]),
12042@@ -124,7 +124,7 @@ static void get_flags(void)
12043
12044 if (max_intel_level >= 0x00000001 &&
12045 max_intel_level <= 0x0000ffff) {
12046- asm("cpuid"
12047+ asm volatile("cpuid"
12048 : "=a" (tfms),
12049 "=c" (cpu.flags[4]),
12050 "=d" (cpu.flags[0])
12051@@ -136,7 +136,7 @@ static void get_flags(void)
12052 cpu.model += ((tfms >> 16) & 0xf) << 4;
12053 }
12054
12055- asm("cpuid"
12056+ asm volatile("cpuid"
12057 : "=a" (max_amd_level)
12058 : "a" (0x80000000)
12059 : "ebx", "ecx", "edx");
12060@@ -144,7 +144,7 @@ static void get_flags(void)
12061 if (max_amd_level >= 0x80000001 &&
12062 max_amd_level <= 0x8000ffff) {
12063 u32 eax = 0x80000001;
12064- asm("cpuid"
12065+ asm volatile("cpuid"
12066 : "+a" (eax),
12067 "=c" (cpu.flags[6]),
12068 "=d" (cpu.flags[1])
12069@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12070 u32 ecx = MSR_K7_HWCR;
12071 u32 eax, edx;
12072
12073- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12074+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12075 eax &= ~(1 << 15);
12076- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12077+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12078
12079 get_flags(); /* Make sure it really did something */
12080 err = check_flags();
12081@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12082 u32 ecx = MSR_VIA_FCR;
12083 u32 eax, edx;
12084
12085- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12086+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12087 eax |= (1<<1)|(1<<7);
12088- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12089+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12090
12091 set_bit(X86_FEATURE_CX8, cpu.flags);
12092 err = check_flags();
12093@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12094 u32 eax, edx;
12095 u32 level = 1;
12096
12097- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12098- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12099- asm("cpuid"
12100+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12101+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12102+ asm volatile("cpuid"
12103 : "+a" (level), "=d" (cpu.flags[0])
12104 : : "ecx", "ebx");
12105- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12106+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12107
12108 err = check_flags();
12109 }
12110diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12111index 9ec06a1..2c25e79 100644
12112--- a/arch/x86/boot/header.S
12113+++ b/arch/x86/boot/header.S
12114@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12115 # single linked list of
12116 # struct setup_data
12117
12118-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12119+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12120
12121 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12122+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12123+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12124+#else
12125 #define VO_INIT_SIZE (VO__end - VO__text)
12126+#endif
12127 #if ZO_INIT_SIZE > VO_INIT_SIZE
12128 #define INIT_SIZE ZO_INIT_SIZE
12129 #else
12130diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12131index db75d07..8e6d0af 100644
12132--- a/arch/x86/boot/memory.c
12133+++ b/arch/x86/boot/memory.c
12134@@ -19,7 +19,7 @@
12135
12136 static int detect_memory_e820(void)
12137 {
12138- int count = 0;
12139+ unsigned int count = 0;
12140 struct biosregs ireg, oreg;
12141 struct e820entry *desc = boot_params.e820_map;
12142 static struct e820entry buf; /* static so it is zeroed */
12143diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12144index 11e8c6e..fdbb1ed 100644
12145--- a/arch/x86/boot/video-vesa.c
12146+++ b/arch/x86/boot/video-vesa.c
12147@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
12148
12149 boot_params.screen_info.vesapm_seg = oreg.es;
12150 boot_params.screen_info.vesapm_off = oreg.di;
12151+ boot_params.screen_info.vesapm_size = oreg.cx;
12152 }
12153
12154 /*
12155diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12156index 43eda28..5ab5fdb 100644
12157--- a/arch/x86/boot/video.c
12158+++ b/arch/x86/boot/video.c
12159@@ -96,7 +96,7 @@ static void store_mode_params(void)
12160 static unsigned int get_entry(void)
12161 {
12162 char entry_buf[4];
12163- int i, len = 0;
12164+ unsigned int i, len = 0;
12165 int key;
12166 unsigned int v;
12167
12168diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12169index 9105655..41779c1 100644
12170--- a/arch/x86/crypto/aes-x86_64-asm_64.S
12171+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12172@@ -8,6 +8,8 @@
12173 * including this sentence is retained in full.
12174 */
12175
12176+#include <asm/alternative-asm.h>
12177+
12178 .extern crypto_ft_tab
12179 .extern crypto_it_tab
12180 .extern crypto_fl_tab
12181@@ -70,6 +72,8 @@
12182 je B192; \
12183 leaq 32(r9),r9;
12184
12185+#define ret pax_force_retaddr; ret
12186+
12187 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12188 movq r1,r2; \
12189 movq r3,r4; \
12190diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12191index 477e9d7..c92c7d8 100644
12192--- a/arch/x86/crypto/aesni-intel_asm.S
12193+++ b/arch/x86/crypto/aesni-intel_asm.S
12194@@ -31,6 +31,7 @@
12195
12196 #include <linux/linkage.h>
12197 #include <asm/inst.h>
12198+#include <asm/alternative-asm.h>
12199
12200 #ifdef __x86_64__
12201 .data
12202@@ -205,7 +206,7 @@ enc: .octa 0x2
12203 * num_initial_blocks = b mod 4
12204 * encrypt the initial num_initial_blocks blocks and apply ghash on
12205 * the ciphertext
12206-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12207+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12208 * are clobbered
12209 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12210 */
12211@@ -214,8 +215,8 @@ enc: .octa 0x2
12212 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12213 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12214 mov arg7, %r10 # %r10 = AAD
12215- mov arg8, %r12 # %r12 = aadLen
12216- mov %r12, %r11
12217+ mov arg8, %r15 # %r15 = aadLen
12218+ mov %r15, %r11
12219 pxor %xmm\i, %xmm\i
12220 _get_AAD_loop\num_initial_blocks\operation:
12221 movd (%r10), \TMP1
12222@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12223 psrldq $4, %xmm\i
12224 pxor \TMP1, %xmm\i
12225 add $4, %r10
12226- sub $4, %r12
12227+ sub $4, %r15
12228 jne _get_AAD_loop\num_initial_blocks\operation
12229 cmp $16, %r11
12230 je _get_AAD_loop2_done\num_initial_blocks\operation
12231- mov $16, %r12
12232+ mov $16, %r15
12233 _get_AAD_loop2\num_initial_blocks\operation:
12234 psrldq $4, %xmm\i
12235- sub $4, %r12
12236- cmp %r11, %r12
12237+ sub $4, %r15
12238+ cmp %r11, %r15
12239 jne _get_AAD_loop2\num_initial_blocks\operation
12240 _get_AAD_loop2_done\num_initial_blocks\operation:
12241 movdqa SHUF_MASK(%rip), %xmm14
12242@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
12243 * num_initial_blocks = b mod 4
12244 * encrypt the initial num_initial_blocks blocks and apply ghash on
12245 * the ciphertext
12246-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12247+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12248 * are clobbered
12249 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12250 */
12251@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
12252 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12253 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12254 mov arg7, %r10 # %r10 = AAD
12255- mov arg8, %r12 # %r12 = aadLen
12256- mov %r12, %r11
12257+ mov arg8, %r15 # %r15 = aadLen
12258+ mov %r15, %r11
12259 pxor %xmm\i, %xmm\i
12260 _get_AAD_loop\num_initial_blocks\operation:
12261 movd (%r10), \TMP1
12262@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12263 psrldq $4, %xmm\i
12264 pxor \TMP1, %xmm\i
12265 add $4, %r10
12266- sub $4, %r12
12267+ sub $4, %r15
12268 jne _get_AAD_loop\num_initial_blocks\operation
12269 cmp $16, %r11
12270 je _get_AAD_loop2_done\num_initial_blocks\operation
12271- mov $16, %r12
12272+ mov $16, %r15
12273 _get_AAD_loop2\num_initial_blocks\operation:
12274 psrldq $4, %xmm\i
12275- sub $4, %r12
12276- cmp %r11, %r12
12277+ sub $4, %r15
12278+ cmp %r11, %r15
12279 jne _get_AAD_loop2\num_initial_blocks\operation
12280 _get_AAD_loop2_done\num_initial_blocks\operation:
12281 movdqa SHUF_MASK(%rip), %xmm14
12282@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
12283 *
12284 *****************************************************************************/
12285 ENTRY(aesni_gcm_dec)
12286- push %r12
12287+ push %r15
12288 push %r13
12289 push %r14
12290 mov %rsp, %r14
12291@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
12292 */
12293 sub $VARIABLE_OFFSET, %rsp
12294 and $~63, %rsp # align rsp to 64 bytes
12295- mov %arg6, %r12
12296- movdqu (%r12), %xmm13 # %xmm13 = HashKey
12297+ mov %arg6, %r15
12298+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
12299 movdqa SHUF_MASK(%rip), %xmm2
12300 PSHUFB_XMM %xmm2, %xmm13
12301
12302@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
12303 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
12304 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
12305 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
12306- mov %r13, %r12
12307- and $(3<<4), %r12
12308+ mov %r13, %r15
12309+ and $(3<<4), %r15
12310 jz _initial_num_blocks_is_0_decrypt
12311- cmp $(2<<4), %r12
12312+ cmp $(2<<4), %r15
12313 jb _initial_num_blocks_is_1_decrypt
12314 je _initial_num_blocks_is_2_decrypt
12315 _initial_num_blocks_is_3_decrypt:
12316@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
12317 sub $16, %r11
12318 add %r13, %r11
12319 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
12320- lea SHIFT_MASK+16(%rip), %r12
12321- sub %r13, %r12
12322+ lea SHIFT_MASK+16(%rip), %r15
12323+ sub %r13, %r15
12324 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
12325 # (%r13 is the number of bytes in plaintext mod 16)
12326- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12327+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12328 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
12329
12330 movdqa %xmm1, %xmm2
12331 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
12332- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12333+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12334 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
12335 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
12336 pand %xmm1, %xmm2
12337@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
12338 sub $1, %r13
12339 jne _less_than_8_bytes_left_decrypt
12340 _multiple_of_16_bytes_decrypt:
12341- mov arg8, %r12 # %r13 = aadLen (number of bytes)
12342- shl $3, %r12 # convert into number of bits
12343- movd %r12d, %xmm15 # len(A) in %xmm15
12344+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
12345+ shl $3, %r15 # convert into number of bits
12346+ movd %r15d, %xmm15 # len(A) in %xmm15
12347 shl $3, %arg4 # len(C) in bits (*128)
12348 MOVQ_R64_XMM %arg4, %xmm1
12349 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12350@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
12351 mov %r14, %rsp
12352 pop %r14
12353 pop %r13
12354- pop %r12
12355+ pop %r15
12356+ pax_force_retaddr
12357 ret
12358 ENDPROC(aesni_gcm_dec)
12359
12360@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
12361 * poly = x^128 + x^127 + x^126 + x^121 + 1
12362 ***************************************************************************/
12363 ENTRY(aesni_gcm_enc)
12364- push %r12
12365+ push %r15
12366 push %r13
12367 push %r14
12368 mov %rsp, %r14
12369@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
12370 #
12371 sub $VARIABLE_OFFSET, %rsp
12372 and $~63, %rsp
12373- mov %arg6, %r12
12374- movdqu (%r12), %xmm13
12375+ mov %arg6, %r15
12376+ movdqu (%r15), %xmm13
12377 movdqa SHUF_MASK(%rip), %xmm2
12378 PSHUFB_XMM %xmm2, %xmm13
12379
12380@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
12381 movdqa %xmm13, HashKey(%rsp)
12382 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
12383 and $-16, %r13
12384- mov %r13, %r12
12385+ mov %r13, %r15
12386
12387 # Encrypt first few blocks
12388
12389- and $(3<<4), %r12
12390+ and $(3<<4), %r15
12391 jz _initial_num_blocks_is_0_encrypt
12392- cmp $(2<<4), %r12
12393+ cmp $(2<<4), %r15
12394 jb _initial_num_blocks_is_1_encrypt
12395 je _initial_num_blocks_is_2_encrypt
12396 _initial_num_blocks_is_3_encrypt:
12397@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
12398 sub $16, %r11
12399 add %r13, %r11
12400 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
12401- lea SHIFT_MASK+16(%rip), %r12
12402- sub %r13, %r12
12403+ lea SHIFT_MASK+16(%rip), %r15
12404+ sub %r13, %r15
12405 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
12406 # (%r13 is the number of bytes in plaintext mod 16)
12407- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12408+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12409 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
12410 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
12411- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12412+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12413 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
12414 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
12415 movdqa SHUF_MASK(%rip), %xmm10
12416@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
12417 sub $1, %r13
12418 jne _less_than_8_bytes_left_encrypt
12419 _multiple_of_16_bytes_encrypt:
12420- mov arg8, %r12 # %r12 = addLen (number of bytes)
12421- shl $3, %r12
12422- movd %r12d, %xmm15 # len(A) in %xmm15
12423+ mov arg8, %r15 # %r15 = addLen (number of bytes)
12424+ shl $3, %r15
12425+ movd %r15d, %xmm15 # len(A) in %xmm15
12426 shl $3, %arg4 # len(C) in bits (*128)
12427 MOVQ_R64_XMM %arg4, %xmm1
12428 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12429@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
12430 mov %r14, %rsp
12431 pop %r14
12432 pop %r13
12433- pop %r12
12434+ pop %r15
12435+ pax_force_retaddr
12436 ret
12437 ENDPROC(aesni_gcm_enc)
12438
12439@@ -1722,6 +1725,7 @@ _key_expansion_256a:
12440 pxor %xmm1, %xmm0
12441 movaps %xmm0, (TKEYP)
12442 add $0x10, TKEYP
12443+ pax_force_retaddr
12444 ret
12445 ENDPROC(_key_expansion_128)
12446 ENDPROC(_key_expansion_256a)
12447@@ -1748,6 +1752,7 @@ _key_expansion_192a:
12448 shufps $0b01001110, %xmm2, %xmm1
12449 movaps %xmm1, 0x10(TKEYP)
12450 add $0x20, TKEYP
12451+ pax_force_retaddr
12452 ret
12453 ENDPROC(_key_expansion_192a)
12454
12455@@ -1768,6 +1773,7 @@ _key_expansion_192b:
12456
12457 movaps %xmm0, (TKEYP)
12458 add $0x10, TKEYP
12459+ pax_force_retaddr
12460 ret
12461 ENDPROC(_key_expansion_192b)
12462
12463@@ -1781,6 +1787,7 @@ _key_expansion_256b:
12464 pxor %xmm1, %xmm2
12465 movaps %xmm2, (TKEYP)
12466 add $0x10, TKEYP
12467+ pax_force_retaddr
12468 ret
12469 ENDPROC(_key_expansion_256b)
12470
12471@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
12472 #ifndef __x86_64__
12473 popl KEYP
12474 #endif
12475+ pax_force_retaddr
12476 ret
12477 ENDPROC(aesni_set_key)
12478
12479@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
12480 popl KLEN
12481 popl KEYP
12482 #endif
12483+ pax_force_retaddr
12484 ret
12485 ENDPROC(aesni_enc)
12486
12487@@ -1974,6 +1983,7 @@ _aesni_enc1:
12488 AESENC KEY STATE
12489 movaps 0x70(TKEYP), KEY
12490 AESENCLAST KEY STATE
12491+ pax_force_retaddr
12492 ret
12493 ENDPROC(_aesni_enc1)
12494
12495@@ -2083,6 +2093,7 @@ _aesni_enc4:
12496 AESENCLAST KEY STATE2
12497 AESENCLAST KEY STATE3
12498 AESENCLAST KEY STATE4
12499+ pax_force_retaddr
12500 ret
12501 ENDPROC(_aesni_enc4)
12502
12503@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12504 popl KLEN
12505 popl KEYP
12506 #endif
12507+ pax_force_retaddr
12508 ret
12509 ENDPROC(aesni_dec)
12510
12511@@ -2164,6 +2176,7 @@ _aesni_dec1:
12512 AESDEC KEY STATE
12513 movaps 0x70(TKEYP), KEY
12514 AESDECLAST KEY STATE
12515+ pax_force_retaddr
12516 ret
12517 ENDPROC(_aesni_dec1)
12518
12519@@ -2273,6 +2286,7 @@ _aesni_dec4:
12520 AESDECLAST KEY STATE2
12521 AESDECLAST KEY STATE3
12522 AESDECLAST KEY STATE4
12523+ pax_force_retaddr
12524 ret
12525 ENDPROC(_aesni_dec4)
12526
12527@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12528 popl KEYP
12529 popl LEN
12530 #endif
12531+ pax_force_retaddr
12532 ret
12533 ENDPROC(aesni_ecb_enc)
12534
12535@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12536 popl KEYP
12537 popl LEN
12538 #endif
12539+ pax_force_retaddr
12540 ret
12541 ENDPROC(aesni_ecb_dec)
12542
12543@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12544 popl LEN
12545 popl IVP
12546 #endif
12547+ pax_force_retaddr
12548 ret
12549 ENDPROC(aesni_cbc_enc)
12550
12551@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12552 popl LEN
12553 popl IVP
12554 #endif
12555+ pax_force_retaddr
12556 ret
12557 ENDPROC(aesni_cbc_dec)
12558
12559@@ -2550,6 +2568,7 @@ _aesni_inc_init:
12560 mov $1, TCTR_LOW
12561 MOVQ_R64_XMM TCTR_LOW INC
12562 MOVQ_R64_XMM CTR TCTR_LOW
12563+ pax_force_retaddr
12564 ret
12565 ENDPROC(_aesni_inc_init)
12566
12567@@ -2579,6 +2598,7 @@ _aesni_inc:
12568 .Linc_low:
12569 movaps CTR, IV
12570 PSHUFB_XMM BSWAP_MASK IV
12571+ pax_force_retaddr
12572 ret
12573 ENDPROC(_aesni_inc)
12574
12575@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12576 .Lctr_enc_ret:
12577 movups IV, (IVP)
12578 .Lctr_enc_just_ret:
12579+ pax_force_retaddr
12580 ret
12581 ENDPROC(aesni_ctr_enc)
12582
12583@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12584 pxor INC, STATE4
12585 movdqu STATE4, 0x70(OUTP)
12586
12587+ pax_force_retaddr
12588 ret
12589 ENDPROC(aesni_xts_crypt8)
12590
12591diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12592index 246c670..466e2d6 100644
12593--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12594+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12595@@ -21,6 +21,7 @@
12596 */
12597
12598 #include <linux/linkage.h>
12599+#include <asm/alternative-asm.h>
12600
12601 .file "blowfish-x86_64-asm.S"
12602 .text
12603@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12604 jnz .L__enc_xor;
12605
12606 write_block();
12607+ pax_force_retaddr
12608 ret;
12609 .L__enc_xor:
12610 xor_block();
12611+ pax_force_retaddr
12612 ret;
12613 ENDPROC(__blowfish_enc_blk)
12614
12615@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12616
12617 movq %r11, %rbp;
12618
12619+ pax_force_retaddr
12620 ret;
12621 ENDPROC(blowfish_dec_blk)
12622
12623@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12624
12625 popq %rbx;
12626 popq %rbp;
12627+ pax_force_retaddr
12628 ret;
12629
12630 .L__enc_xor4:
12631@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12632
12633 popq %rbx;
12634 popq %rbp;
12635+ pax_force_retaddr
12636 ret;
12637 ENDPROC(__blowfish_enc_blk_4way)
12638
12639@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12640 popq %rbx;
12641 popq %rbp;
12642
12643+ pax_force_retaddr
12644 ret;
12645 ENDPROC(blowfish_dec_blk_4way)
12646diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12647index ce71f92..1dce7ec 100644
12648--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12649+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12650@@ -16,6 +16,7 @@
12651 */
12652
12653 #include <linux/linkage.h>
12654+#include <asm/alternative-asm.h>
12655
12656 #define CAMELLIA_TABLE_BYTE_LEN 272
12657
12658@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12659 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12660 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12661 %rcx, (%r9));
12662+ pax_force_retaddr
12663 ret;
12664 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12665
12666@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12667 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12668 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12669 %rax, (%r9));
12670+ pax_force_retaddr
12671 ret;
12672 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12673
12674@@ -780,6 +783,7 @@ __camellia_enc_blk16:
12675 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12676 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12677
12678+ pax_force_retaddr
12679 ret;
12680
12681 .align 8
12682@@ -865,6 +869,7 @@ __camellia_dec_blk16:
12683 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12684 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12685
12686+ pax_force_retaddr
12687 ret;
12688
12689 .align 8
12690@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12691 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12692 %xmm8, %rsi);
12693
12694+ pax_force_retaddr
12695 ret;
12696 ENDPROC(camellia_ecb_enc_16way)
12697
12698@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12699 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12700 %xmm8, %rsi);
12701
12702+ pax_force_retaddr
12703 ret;
12704 ENDPROC(camellia_ecb_dec_16way)
12705
12706@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12707 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12708 %xmm8, %rsi);
12709
12710+ pax_force_retaddr
12711 ret;
12712 ENDPROC(camellia_cbc_dec_16way)
12713
12714@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12715 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12716 %xmm8, %rsi);
12717
12718+ pax_force_retaddr
12719 ret;
12720 ENDPROC(camellia_ctr_16way)
12721
12722@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12723 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12724 %xmm8, %rsi);
12725
12726+ pax_force_retaddr
12727 ret;
12728 ENDPROC(camellia_xts_crypt_16way)
12729
12730diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12731index 0e0b886..5a3123c 100644
12732--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12733+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12734@@ -11,6 +11,7 @@
12735 */
12736
12737 #include <linux/linkage.h>
12738+#include <asm/alternative-asm.h>
12739
12740 #define CAMELLIA_TABLE_BYTE_LEN 272
12741
12742@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12743 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12744 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12745 %rcx, (%r9));
12746+ pax_force_retaddr
12747 ret;
12748 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12749
12750@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12751 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12752 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12753 %rax, (%r9));
12754+ pax_force_retaddr
12755 ret;
12756 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12757
12758@@ -820,6 +823,7 @@ __camellia_enc_blk32:
12759 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12760 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12761
12762+ pax_force_retaddr
12763 ret;
12764
12765 .align 8
12766@@ -905,6 +909,7 @@ __camellia_dec_blk32:
12767 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12768 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12769
12770+ pax_force_retaddr
12771 ret;
12772
12773 .align 8
12774@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
12775
12776 vzeroupper;
12777
12778+ pax_force_retaddr
12779 ret;
12780 ENDPROC(camellia_ecb_enc_32way)
12781
12782@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
12783
12784 vzeroupper;
12785
12786+ pax_force_retaddr
12787 ret;
12788 ENDPROC(camellia_ecb_dec_32way)
12789
12790@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
12791
12792 vzeroupper;
12793
12794+ pax_force_retaddr
12795 ret;
12796 ENDPROC(camellia_cbc_dec_32way)
12797
12798@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
12799
12800 vzeroupper;
12801
12802+ pax_force_retaddr
12803 ret;
12804 ENDPROC(camellia_ctr_32way)
12805
12806@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
12807
12808 vzeroupper;
12809
12810+ pax_force_retaddr
12811 ret;
12812 ENDPROC(camellia_xts_crypt_32way)
12813
12814diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12815index 310319c..db3d7b5 100644
12816--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12817+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12818@@ -21,6 +21,7 @@
12819 */
12820
12821 #include <linux/linkage.h>
12822+#include <asm/alternative-asm.h>
12823
12824 .file "camellia-x86_64-asm_64.S"
12825 .text
12826@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12827 enc_outunpack(mov, RT1);
12828
12829 movq RRBP, %rbp;
12830+ pax_force_retaddr
12831 ret;
12832
12833 .L__enc_xor:
12834 enc_outunpack(xor, RT1);
12835
12836 movq RRBP, %rbp;
12837+ pax_force_retaddr
12838 ret;
12839 ENDPROC(__camellia_enc_blk)
12840
12841@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12842 dec_outunpack();
12843
12844 movq RRBP, %rbp;
12845+ pax_force_retaddr
12846 ret;
12847 ENDPROC(camellia_dec_blk)
12848
12849@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12850
12851 movq RRBP, %rbp;
12852 popq %rbx;
12853+ pax_force_retaddr
12854 ret;
12855
12856 .L__enc2_xor:
12857@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12858
12859 movq RRBP, %rbp;
12860 popq %rbx;
12861+ pax_force_retaddr
12862 ret;
12863 ENDPROC(__camellia_enc_blk_2way)
12864
12865@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12866
12867 movq RRBP, %rbp;
12868 movq RXOR, %rbx;
12869+ pax_force_retaddr
12870 ret;
12871 ENDPROC(camellia_dec_blk_2way)
12872diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12873index c35fd5d..2d8c7db 100644
12874--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12875+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12876@@ -24,6 +24,7 @@
12877 */
12878
12879 #include <linux/linkage.h>
12880+#include <asm/alternative-asm.h>
12881
12882 .file "cast5-avx-x86_64-asm_64.S"
12883
12884@@ -281,6 +282,7 @@ __cast5_enc_blk16:
12885 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12886 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12887
12888+ pax_force_retaddr
12889 ret;
12890 ENDPROC(__cast5_enc_blk16)
12891
12892@@ -352,6 +354,7 @@ __cast5_dec_blk16:
12893 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12894 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12895
12896+ pax_force_retaddr
12897 ret;
12898
12899 .L__skip_dec:
12900@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12901 vmovdqu RR4, (6*4*4)(%r11);
12902 vmovdqu RL4, (7*4*4)(%r11);
12903
12904+ pax_force_retaddr
12905 ret;
12906 ENDPROC(cast5_ecb_enc_16way)
12907
12908@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12909 vmovdqu RR4, (6*4*4)(%r11);
12910 vmovdqu RL4, (7*4*4)(%r11);
12911
12912+ pax_force_retaddr
12913 ret;
12914 ENDPROC(cast5_ecb_dec_16way)
12915
12916@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
12917 * %rdx: src
12918 */
12919
12920- pushq %r12;
12921+ pushq %r14;
12922
12923 movq %rsi, %r11;
12924- movq %rdx, %r12;
12925+ movq %rdx, %r14;
12926
12927 vmovdqu (0*16)(%rdx), RL1;
12928 vmovdqu (1*16)(%rdx), RR1;
12929@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
12930 call __cast5_dec_blk16;
12931
12932 /* xor with src */
12933- vmovq (%r12), RX;
12934+ vmovq (%r14), RX;
12935 vpshufd $0x4f, RX, RX;
12936 vpxor RX, RR1, RR1;
12937- vpxor 0*16+8(%r12), RL1, RL1;
12938- vpxor 1*16+8(%r12), RR2, RR2;
12939- vpxor 2*16+8(%r12), RL2, RL2;
12940- vpxor 3*16+8(%r12), RR3, RR3;
12941- vpxor 4*16+8(%r12), RL3, RL3;
12942- vpxor 5*16+8(%r12), RR4, RR4;
12943- vpxor 6*16+8(%r12), RL4, RL4;
12944+ vpxor 0*16+8(%r14), RL1, RL1;
12945+ vpxor 1*16+8(%r14), RR2, RR2;
12946+ vpxor 2*16+8(%r14), RL2, RL2;
12947+ vpxor 3*16+8(%r14), RR3, RR3;
12948+ vpxor 4*16+8(%r14), RL3, RL3;
12949+ vpxor 5*16+8(%r14), RR4, RR4;
12950+ vpxor 6*16+8(%r14), RL4, RL4;
12951
12952 vmovdqu RR1, (0*16)(%r11);
12953 vmovdqu RL1, (1*16)(%r11);
12954@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
12955 vmovdqu RR4, (6*16)(%r11);
12956 vmovdqu RL4, (7*16)(%r11);
12957
12958- popq %r12;
12959+ popq %r14;
12960
12961+ pax_force_retaddr
12962 ret;
12963 ENDPROC(cast5_cbc_dec_16way)
12964
12965@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
12966 * %rcx: iv (big endian, 64bit)
12967 */
12968
12969- pushq %r12;
12970+ pushq %r14;
12971
12972 movq %rsi, %r11;
12973- movq %rdx, %r12;
12974+ movq %rdx, %r14;
12975
12976 vpcmpeqd RTMP, RTMP, RTMP;
12977 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
12978@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
12979 call __cast5_enc_blk16;
12980
12981 /* dst = src ^ iv */
12982- vpxor (0*16)(%r12), RR1, RR1;
12983- vpxor (1*16)(%r12), RL1, RL1;
12984- vpxor (2*16)(%r12), RR2, RR2;
12985- vpxor (3*16)(%r12), RL2, RL2;
12986- vpxor (4*16)(%r12), RR3, RR3;
12987- vpxor (5*16)(%r12), RL3, RL3;
12988- vpxor (6*16)(%r12), RR4, RR4;
12989- vpxor (7*16)(%r12), RL4, RL4;
12990+ vpxor (0*16)(%r14), RR1, RR1;
12991+ vpxor (1*16)(%r14), RL1, RL1;
12992+ vpxor (2*16)(%r14), RR2, RR2;
12993+ vpxor (3*16)(%r14), RL2, RL2;
12994+ vpxor (4*16)(%r14), RR3, RR3;
12995+ vpxor (5*16)(%r14), RL3, RL3;
12996+ vpxor (6*16)(%r14), RR4, RR4;
12997+ vpxor (7*16)(%r14), RL4, RL4;
12998 vmovdqu RR1, (0*16)(%r11);
12999 vmovdqu RL1, (1*16)(%r11);
13000 vmovdqu RR2, (2*16)(%r11);
13001@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13002 vmovdqu RR4, (6*16)(%r11);
13003 vmovdqu RL4, (7*16)(%r11);
13004
13005- popq %r12;
13006+ popq %r14;
13007
13008+ pax_force_retaddr
13009 ret;
13010 ENDPROC(cast5_ctr_16way)
13011diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13012index e3531f8..e123f35 100644
13013--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13014+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13015@@ -24,6 +24,7 @@
13016 */
13017
13018 #include <linux/linkage.h>
13019+#include <asm/alternative-asm.h>
13020 #include "glue_helper-asm-avx.S"
13021
13022 .file "cast6-avx-x86_64-asm_64.S"
13023@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13024 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13025 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13026
13027+ pax_force_retaddr
13028 ret;
13029 ENDPROC(__cast6_enc_blk8)
13030
13031@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13032 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13033 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13034
13035+ pax_force_retaddr
13036 ret;
13037 ENDPROC(__cast6_dec_blk8)
13038
13039@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13040
13041 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13042
13043+ pax_force_retaddr
13044 ret;
13045 ENDPROC(cast6_ecb_enc_8way)
13046
13047@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13048
13049 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13050
13051+ pax_force_retaddr
13052 ret;
13053 ENDPROC(cast6_ecb_dec_8way)
13054
13055@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13056 * %rdx: src
13057 */
13058
13059- pushq %r12;
13060+ pushq %r14;
13061
13062 movq %rsi, %r11;
13063- movq %rdx, %r12;
13064+ movq %rdx, %r14;
13065
13066 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13067
13068 call __cast6_dec_blk8;
13069
13070- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13071+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13072
13073- popq %r12;
13074+ popq %r14;
13075
13076+ pax_force_retaddr
13077 ret;
13078 ENDPROC(cast6_cbc_dec_8way)
13079
13080@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13081 * %rcx: iv (little endian, 128bit)
13082 */
13083
13084- pushq %r12;
13085+ pushq %r14;
13086
13087 movq %rsi, %r11;
13088- movq %rdx, %r12;
13089+ movq %rdx, %r14;
13090
13091 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13092 RD2, RX, RKR, RKM);
13093
13094 call __cast6_enc_blk8;
13095
13096- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13097+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13098
13099- popq %r12;
13100+ popq %r14;
13101
13102+ pax_force_retaddr
13103 ret;
13104 ENDPROC(cast6_ctr_8way)
13105
13106@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13107 /* dst <= regs xor IVs(in dst) */
13108 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13109
13110+ pax_force_retaddr
13111 ret;
13112 ENDPROC(cast6_xts_enc_8way)
13113
13114@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13115 /* dst <= regs xor IVs(in dst) */
13116 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13117
13118+ pax_force_retaddr
13119 ret;
13120 ENDPROC(cast6_xts_dec_8way)
13121diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13122index dbc4339..de6e120 100644
13123--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13124+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13125@@ -45,6 +45,7 @@
13126
13127 #include <asm/inst.h>
13128 #include <linux/linkage.h>
13129+#include <asm/alternative-asm.h>
13130
13131 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13132
13133@@ -312,6 +313,7 @@ do_return:
13134 popq %rsi
13135 popq %rdi
13136 popq %rbx
13137+ pax_force_retaddr
13138 ret
13139
13140 ################################################################
13141diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13142index 586f41a..d02851e 100644
13143--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13144+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13145@@ -18,6 +18,7 @@
13146
13147 #include <linux/linkage.h>
13148 #include <asm/inst.h>
13149+#include <asm/alternative-asm.h>
13150
13151 .data
13152
13153@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
13154 psrlq $1, T2
13155 pxor T2, T1
13156 pxor T1, DATA
13157+ pax_force_retaddr
13158 ret
13159 ENDPROC(__clmul_gf128mul_ble)
13160
13161@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
13162 call __clmul_gf128mul_ble
13163 PSHUFB_XMM BSWAP DATA
13164 movups DATA, (%rdi)
13165+ pax_force_retaddr
13166 ret
13167 ENDPROC(clmul_ghash_mul)
13168
13169@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
13170 PSHUFB_XMM BSWAP DATA
13171 movups DATA, (%rdi)
13172 .Lupdate_just_ret:
13173+ pax_force_retaddr
13174 ret
13175 ENDPROC(clmul_ghash_update)
13176
13177@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
13178 pand .Lpoly, %xmm1
13179 pxor %xmm1, %xmm0
13180 movups %xmm0, (%rdi)
13181+ pax_force_retaddr
13182 ret
13183 ENDPROC(clmul_ghash_setkey)
13184diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13185index 9279e0b..c4b3d2c 100644
13186--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13187+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13188@@ -1,4 +1,5 @@
13189 #include <linux/linkage.h>
13190+#include <asm/alternative-asm.h>
13191
13192 # enter salsa20_encrypt_bytes
13193 ENTRY(salsa20_encrypt_bytes)
13194@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13195 add %r11,%rsp
13196 mov %rdi,%rax
13197 mov %rsi,%rdx
13198+ pax_force_retaddr
13199 ret
13200 # bytesatleast65:
13201 ._bytesatleast65:
13202@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13203 add %r11,%rsp
13204 mov %rdi,%rax
13205 mov %rsi,%rdx
13206+ pax_force_retaddr
13207 ret
13208 ENDPROC(salsa20_keysetup)
13209
13210@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13211 add %r11,%rsp
13212 mov %rdi,%rax
13213 mov %rsi,%rdx
13214+ pax_force_retaddr
13215 ret
13216 ENDPROC(salsa20_ivsetup)
13217diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13218index 2f202f4..d9164d6 100644
13219--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13220+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13221@@ -24,6 +24,7 @@
13222 */
13223
13224 #include <linux/linkage.h>
13225+#include <asm/alternative-asm.h>
13226 #include "glue_helper-asm-avx.S"
13227
13228 .file "serpent-avx-x86_64-asm_64.S"
13229@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
13230 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13231 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13232
13233+ pax_force_retaddr
13234 ret;
13235 ENDPROC(__serpent_enc_blk8_avx)
13236
13237@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
13238 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13239 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13240
13241+ pax_force_retaddr
13242 ret;
13243 ENDPROC(__serpent_dec_blk8_avx)
13244
13245@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
13246
13247 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13248
13249+ pax_force_retaddr
13250 ret;
13251 ENDPROC(serpent_ecb_enc_8way_avx)
13252
13253@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
13254
13255 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13256
13257+ pax_force_retaddr
13258 ret;
13259 ENDPROC(serpent_ecb_dec_8way_avx)
13260
13261@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
13262
13263 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13264
13265+ pax_force_retaddr
13266 ret;
13267 ENDPROC(serpent_cbc_dec_8way_avx)
13268
13269@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
13270
13271 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13272
13273+ pax_force_retaddr
13274 ret;
13275 ENDPROC(serpent_ctr_8way_avx)
13276
13277@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
13278 /* dst <= regs xor IVs(in dst) */
13279 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13280
13281+ pax_force_retaddr
13282 ret;
13283 ENDPROC(serpent_xts_enc_8way_avx)
13284
13285@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
13286 /* dst <= regs xor IVs(in dst) */
13287 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13288
13289+ pax_force_retaddr
13290 ret;
13291 ENDPROC(serpent_xts_dec_8way_avx)
13292diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
13293index b222085..abd483c 100644
13294--- a/arch/x86/crypto/serpent-avx2-asm_64.S
13295+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
13296@@ -15,6 +15,7 @@
13297 */
13298
13299 #include <linux/linkage.h>
13300+#include <asm/alternative-asm.h>
13301 #include "glue_helper-asm-avx2.S"
13302
13303 .file "serpent-avx2-asm_64.S"
13304@@ -610,6 +611,7 @@ __serpent_enc_blk16:
13305 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13306 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13307
13308+ pax_force_retaddr
13309 ret;
13310 ENDPROC(__serpent_enc_blk16)
13311
13312@@ -664,6 +666,7 @@ __serpent_dec_blk16:
13313 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13314 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13315
13316+ pax_force_retaddr
13317 ret;
13318 ENDPROC(__serpent_dec_blk16)
13319
13320@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
13321
13322 vzeroupper;
13323
13324+ pax_force_retaddr
13325 ret;
13326 ENDPROC(serpent_ecb_enc_16way)
13327
13328@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
13329
13330 vzeroupper;
13331
13332+ pax_force_retaddr
13333 ret;
13334 ENDPROC(serpent_ecb_dec_16way)
13335
13336@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
13337
13338 vzeroupper;
13339
13340+ pax_force_retaddr
13341 ret;
13342 ENDPROC(serpent_cbc_dec_16way)
13343
13344@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
13345
13346 vzeroupper;
13347
13348+ pax_force_retaddr
13349 ret;
13350 ENDPROC(serpent_ctr_16way)
13351
13352@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
13353
13354 vzeroupper;
13355
13356+ pax_force_retaddr
13357 ret;
13358 ENDPROC(serpent_xts_enc_16way)
13359
13360@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
13361
13362 vzeroupper;
13363
13364+ pax_force_retaddr
13365 ret;
13366 ENDPROC(serpent_xts_dec_16way)
13367diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13368index acc066c..1559cc4 100644
13369--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13370+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13371@@ -25,6 +25,7 @@
13372 */
13373
13374 #include <linux/linkage.h>
13375+#include <asm/alternative-asm.h>
13376
13377 .file "serpent-sse2-x86_64-asm_64.S"
13378 .text
13379@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
13380 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13381 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13382
13383+ pax_force_retaddr
13384 ret;
13385
13386 .L__enc_xor8:
13387 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13388 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13389
13390+ pax_force_retaddr
13391 ret;
13392 ENDPROC(__serpent_enc_blk_8way)
13393
13394@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
13395 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13396 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13397
13398+ pax_force_retaddr
13399 ret;
13400 ENDPROC(serpent_dec_blk_8way)
13401diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
13402index a410950..9dfe7ad 100644
13403--- a/arch/x86/crypto/sha1_ssse3_asm.S
13404+++ b/arch/x86/crypto/sha1_ssse3_asm.S
13405@@ -29,6 +29,7 @@
13406 */
13407
13408 #include <linux/linkage.h>
13409+#include <asm/alternative-asm.h>
13410
13411 #define CTX %rdi // arg1
13412 #define BUF %rsi // arg2
13413@@ -75,9 +76,9 @@
13414
13415 push %rbx
13416 push %rbp
13417- push %r12
13418+ push %r14
13419
13420- mov %rsp, %r12
13421+ mov %rsp, %r14
13422 sub $64, %rsp # allocate workspace
13423 and $~15, %rsp # align stack
13424
13425@@ -99,11 +100,12 @@
13426 xor %rax, %rax
13427 rep stosq
13428
13429- mov %r12, %rsp # deallocate workspace
13430+ mov %r14, %rsp # deallocate workspace
13431
13432- pop %r12
13433+ pop %r14
13434 pop %rbp
13435 pop %rbx
13436+ pax_force_retaddr
13437 ret
13438
13439 ENDPROC(\name)
13440diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
13441index 642f156..51a513c 100644
13442--- a/arch/x86/crypto/sha256-avx-asm.S
13443+++ b/arch/x86/crypto/sha256-avx-asm.S
13444@@ -49,6 +49,7 @@
13445
13446 #ifdef CONFIG_AS_AVX
13447 #include <linux/linkage.h>
13448+#include <asm/alternative-asm.h>
13449
13450 ## assume buffers not aligned
13451 #define VMOVDQ vmovdqu
13452@@ -460,6 +461,7 @@ done_hash:
13453 popq %r13
13454 popq %rbp
13455 popq %rbx
13456+ pax_force_retaddr
13457 ret
13458 ENDPROC(sha256_transform_avx)
13459
13460diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
13461index 9e86944..3795e6a 100644
13462--- a/arch/x86/crypto/sha256-avx2-asm.S
13463+++ b/arch/x86/crypto/sha256-avx2-asm.S
13464@@ -50,6 +50,7 @@
13465
13466 #ifdef CONFIG_AS_AVX2
13467 #include <linux/linkage.h>
13468+#include <asm/alternative-asm.h>
13469
13470 ## assume buffers not aligned
13471 #define VMOVDQ vmovdqu
13472@@ -720,6 +721,7 @@ done_hash:
13473 popq %r12
13474 popq %rbp
13475 popq %rbx
13476+ pax_force_retaddr
13477 ret
13478 ENDPROC(sha256_transform_rorx)
13479
13480diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
13481index f833b74..8c62a9e 100644
13482--- a/arch/x86/crypto/sha256-ssse3-asm.S
13483+++ b/arch/x86/crypto/sha256-ssse3-asm.S
13484@@ -47,6 +47,7 @@
13485 ########################################################################
13486
13487 #include <linux/linkage.h>
13488+#include <asm/alternative-asm.h>
13489
13490 ## assume buffers not aligned
13491 #define MOVDQ movdqu
13492@@ -471,6 +472,7 @@ done_hash:
13493 popq %rbp
13494 popq %rbx
13495
13496+ pax_force_retaddr
13497 ret
13498 ENDPROC(sha256_transform_ssse3)
13499
13500diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
13501index 974dde9..a823ff9 100644
13502--- a/arch/x86/crypto/sha512-avx-asm.S
13503+++ b/arch/x86/crypto/sha512-avx-asm.S
13504@@ -49,6 +49,7 @@
13505
13506 #ifdef CONFIG_AS_AVX
13507 #include <linux/linkage.h>
13508+#include <asm/alternative-asm.h>
13509
13510 .text
13511
13512@@ -364,6 +365,7 @@ updateblock:
13513 mov frame_RSPSAVE(%rsp), %rsp
13514
13515 nowork:
13516+ pax_force_retaddr
13517 ret
13518 ENDPROC(sha512_transform_avx)
13519
13520diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
13521index 568b961..ed20c37 100644
13522--- a/arch/x86/crypto/sha512-avx2-asm.S
13523+++ b/arch/x86/crypto/sha512-avx2-asm.S
13524@@ -51,6 +51,7 @@
13525
13526 #ifdef CONFIG_AS_AVX2
13527 #include <linux/linkage.h>
13528+#include <asm/alternative-asm.h>
13529
13530 .text
13531
13532@@ -678,6 +679,7 @@ done_hash:
13533
13534 # Restore Stack Pointer
13535 mov frame_RSPSAVE(%rsp), %rsp
13536+ pax_force_retaddr
13537 ret
13538 ENDPROC(sha512_transform_rorx)
13539
13540diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
13541index fb56855..6edd768 100644
13542--- a/arch/x86/crypto/sha512-ssse3-asm.S
13543+++ b/arch/x86/crypto/sha512-ssse3-asm.S
13544@@ -48,6 +48,7 @@
13545 ########################################################################
13546
13547 #include <linux/linkage.h>
13548+#include <asm/alternative-asm.h>
13549
13550 .text
13551
13552@@ -363,6 +364,7 @@ updateblock:
13553 mov frame_RSPSAVE(%rsp), %rsp
13554
13555 nowork:
13556+ pax_force_retaddr
13557 ret
13558 ENDPROC(sha512_transform_ssse3)
13559
13560diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13561index 0505813..b067311 100644
13562--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13563+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13564@@ -24,6 +24,7 @@
13565 */
13566
13567 #include <linux/linkage.h>
13568+#include <asm/alternative-asm.h>
13569 #include "glue_helper-asm-avx.S"
13570
13571 .file "twofish-avx-x86_64-asm_64.S"
13572@@ -284,6 +285,7 @@ __twofish_enc_blk8:
13573 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
13574 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
13575
13576+ pax_force_retaddr
13577 ret;
13578 ENDPROC(__twofish_enc_blk8)
13579
13580@@ -324,6 +326,7 @@ __twofish_dec_blk8:
13581 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13582 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13583
13584+ pax_force_retaddr
13585 ret;
13586 ENDPROC(__twofish_dec_blk8)
13587
13588@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13589
13590 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13591
13592+ pax_force_retaddr
13593 ret;
13594 ENDPROC(twofish_ecb_enc_8way)
13595
13596@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13597
13598 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13599
13600+ pax_force_retaddr
13601 ret;
13602 ENDPROC(twofish_ecb_dec_8way)
13603
13604@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
13605 * %rdx: src
13606 */
13607
13608- pushq %r12;
13609+ pushq %r14;
13610
13611 movq %rsi, %r11;
13612- movq %rdx, %r12;
13613+ movq %rdx, %r14;
13614
13615 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13616
13617 call __twofish_dec_blk8;
13618
13619- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13620+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13621
13622- popq %r12;
13623+ popq %r14;
13624
13625+ pax_force_retaddr
13626 ret;
13627 ENDPROC(twofish_cbc_dec_8way)
13628
13629@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
13630 * %rcx: iv (little endian, 128bit)
13631 */
13632
13633- pushq %r12;
13634+ pushq %r14;
13635
13636 movq %rsi, %r11;
13637- movq %rdx, %r12;
13638+ movq %rdx, %r14;
13639
13640 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13641 RD2, RX0, RX1, RY0);
13642
13643 call __twofish_enc_blk8;
13644
13645- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13646+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13647
13648- popq %r12;
13649+ popq %r14;
13650
13651+ pax_force_retaddr
13652 ret;
13653 ENDPROC(twofish_ctr_8way)
13654
13655@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13656 /* dst <= regs xor IVs(in dst) */
13657 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13658
13659+ pax_force_retaddr
13660 ret;
13661 ENDPROC(twofish_xts_enc_8way)
13662
13663@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13664 /* dst <= regs xor IVs(in dst) */
13665 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13666
13667+ pax_force_retaddr
13668 ret;
13669 ENDPROC(twofish_xts_dec_8way)
13670diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13671index 1c3b7ce..02f578d 100644
13672--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13673+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13674@@ -21,6 +21,7 @@
13675 */
13676
13677 #include <linux/linkage.h>
13678+#include <asm/alternative-asm.h>
13679
13680 .file "twofish-x86_64-asm-3way.S"
13681 .text
13682@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13683 popq %r13;
13684 popq %r14;
13685 popq %r15;
13686+ pax_force_retaddr
13687 ret;
13688
13689 .L__enc_xor3:
13690@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13691 popq %r13;
13692 popq %r14;
13693 popq %r15;
13694+ pax_force_retaddr
13695 ret;
13696 ENDPROC(__twofish_enc_blk_3way)
13697
13698@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13699 popq %r13;
13700 popq %r14;
13701 popq %r15;
13702+ pax_force_retaddr
13703 ret;
13704 ENDPROC(twofish_dec_blk_3way)
13705diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13706index a039d21..524b8b2 100644
13707--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13708+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13709@@ -22,6 +22,7 @@
13710
13711 #include <linux/linkage.h>
13712 #include <asm/asm-offsets.h>
13713+#include <asm/alternative-asm.h>
13714
13715 #define a_offset 0
13716 #define b_offset 4
13717@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13718
13719 popq R1
13720 movq $1,%rax
13721+ pax_force_retaddr
13722 ret
13723 ENDPROC(twofish_enc_blk)
13724
13725@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13726
13727 popq R1
13728 movq $1,%rax
13729+ pax_force_retaddr
13730 ret
13731 ENDPROC(twofish_dec_blk)
13732diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13733index bae3aba..c1788c1 100644
13734--- a/arch/x86/ia32/ia32_aout.c
13735+++ b/arch/x86/ia32/ia32_aout.c
13736@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
13737 unsigned long dump_start, dump_size;
13738 struct user32 dump;
13739
13740+ memset(&dump, 0, sizeof(dump));
13741+
13742 fs = get_fs();
13743 set_fs(KERNEL_DS);
13744 has_dumped = 1;
13745diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13746index 665a730..8e7a67a 100644
13747--- a/arch/x86/ia32/ia32_signal.c
13748+++ b/arch/x86/ia32/ia32_signal.c
13749@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13750 sp -= frame_size;
13751 /* Align the stack pointer according to the i386 ABI,
13752 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13753- sp = ((sp + 4) & -16ul) - 4;
13754+ sp = ((sp - 12) & -16ul) - 4;
13755 return (void __user *) sp;
13756 }
13757
13758@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13759 * These are actually not used anymore, but left because some
13760 * gdb versions depend on them as a marker.
13761 */
13762- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13763+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13764 } put_user_catch(err);
13765
13766 if (err)
13767@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13768 0xb8,
13769 __NR_ia32_rt_sigreturn,
13770 0x80cd,
13771- 0,
13772+ 0
13773 };
13774
13775 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13776@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13777
13778 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13779 restorer = ksig->ka.sa.sa_restorer;
13780+ else if (current->mm->context.vdso)
13781+ /* Return stub is in 32bit vsyscall page */
13782+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13783 else
13784- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13785- rt_sigreturn);
13786+ restorer = &frame->retcode;
13787 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13788
13789 /*
13790 * Not actually used anymore, but left because some gdb
13791 * versions need it.
13792 */
13793- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13794+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13795 } put_user_catch(err);
13796
13797 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13798diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13799index 4299eb0..c0687a7 100644
13800--- a/arch/x86/ia32/ia32entry.S
13801+++ b/arch/x86/ia32/ia32entry.S
13802@@ -15,8 +15,10 @@
13803 #include <asm/irqflags.h>
13804 #include <asm/asm.h>
13805 #include <asm/smap.h>
13806+#include <asm/pgtable.h>
13807 #include <linux/linkage.h>
13808 #include <linux/err.h>
13809+#include <asm/alternative-asm.h>
13810
13811 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13812 #include <linux/elf-em.h>
13813@@ -62,12 +64,12 @@
13814 */
13815 .macro LOAD_ARGS32 offset, _r9=0
13816 .if \_r9
13817- movl \offset+16(%rsp),%r9d
13818+ movl \offset+R9(%rsp),%r9d
13819 .endif
13820- movl \offset+40(%rsp),%ecx
13821- movl \offset+48(%rsp),%edx
13822- movl \offset+56(%rsp),%esi
13823- movl \offset+64(%rsp),%edi
13824+ movl \offset+RCX(%rsp),%ecx
13825+ movl \offset+RDX(%rsp),%edx
13826+ movl \offset+RSI(%rsp),%esi
13827+ movl \offset+RDI(%rsp),%edi
13828 movl %eax,%eax /* zero extension */
13829 .endm
13830
13831@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13832 ENDPROC(native_irq_enable_sysexit)
13833 #endif
13834
13835+ .macro pax_enter_kernel_user
13836+ pax_set_fptr_mask
13837+#ifdef CONFIG_PAX_MEMORY_UDEREF
13838+ call pax_enter_kernel_user
13839+#endif
13840+ .endm
13841+
13842+ .macro pax_exit_kernel_user
13843+#ifdef CONFIG_PAX_MEMORY_UDEREF
13844+ call pax_exit_kernel_user
13845+#endif
13846+#ifdef CONFIG_PAX_RANDKSTACK
13847+ pushq %rax
13848+ pushq %r11
13849+ call pax_randomize_kstack
13850+ popq %r11
13851+ popq %rax
13852+#endif
13853+ .endm
13854+
13855+ .macro pax_erase_kstack
13856+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13857+ call pax_erase_kstack
13858+#endif
13859+ .endm
13860+
13861 /*
13862 * 32bit SYSENTER instruction entry.
13863 *
13864@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13865 CFI_REGISTER rsp,rbp
13866 SWAPGS_UNSAFE_STACK
13867 movq PER_CPU_VAR(kernel_stack), %rsp
13868- addq $(KERNEL_STACK_OFFSET),%rsp
13869- /*
13870- * No need to follow this irqs on/off section: the syscall
13871- * disabled irqs, here we enable it straight after entry:
13872- */
13873- ENABLE_INTERRUPTS(CLBR_NONE)
13874 movl %ebp,%ebp /* zero extension */
13875 pushq_cfi $__USER32_DS
13876 /*CFI_REL_OFFSET ss,0*/
13877@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13878 CFI_REL_OFFSET rsp,0
13879 pushfq_cfi
13880 /*CFI_REL_OFFSET rflags,0*/
13881- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13882- CFI_REGISTER rip,r10
13883+ orl $X86_EFLAGS_IF,(%rsp)
13884+ GET_THREAD_INFO(%r11)
13885+ movl TI_sysenter_return(%r11), %r11d
13886+ CFI_REGISTER rip,r11
13887 pushq_cfi $__USER32_CS
13888 /*CFI_REL_OFFSET cs,0*/
13889 movl %eax, %eax
13890- pushq_cfi %r10
13891+ pushq_cfi %r11
13892 CFI_REL_OFFSET rip,0
13893 pushq_cfi %rax
13894 cld
13895 SAVE_ARGS 0,1,0
13896+ pax_enter_kernel_user
13897+
13898+#ifdef CONFIG_PAX_RANDKSTACK
13899+ pax_erase_kstack
13900+#endif
13901+
13902+ /*
13903+ * No need to follow this irqs on/off section: the syscall
13904+ * disabled irqs, here we enable it straight after entry:
13905+ */
13906+ ENABLE_INTERRUPTS(CLBR_NONE)
13907 /* no need to do an access_ok check here because rbp has been
13908 32bit zero extended */
13909+
13910+#ifdef CONFIG_PAX_MEMORY_UDEREF
13911+ addq pax_user_shadow_base,%rbp
13912+ ASM_PAX_OPEN_USERLAND
13913+#endif
13914+
13915 ASM_STAC
13916 1: movl (%rbp),%ebp
13917 _ASM_EXTABLE(1b,ia32_badarg)
13918 ASM_CLAC
13919- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13920- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13921+
13922+#ifdef CONFIG_PAX_MEMORY_UDEREF
13923+ ASM_PAX_CLOSE_USERLAND
13924+#endif
13925+
13926+ GET_THREAD_INFO(%r11)
13927+ orl $TS_COMPAT,TI_status(%r11)
13928+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13929 CFI_REMEMBER_STATE
13930 jnz sysenter_tracesys
13931 cmpq $(IA32_NR_syscalls-1),%rax
13932@@ -162,15 +209,18 @@ sysenter_do_call:
13933 sysenter_dispatch:
13934 call *ia32_sys_call_table(,%rax,8)
13935 movq %rax,RAX-ARGOFFSET(%rsp)
13936+ GET_THREAD_INFO(%r11)
13937 DISABLE_INTERRUPTS(CLBR_NONE)
13938 TRACE_IRQS_OFF
13939- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13940+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13941 jnz sysexit_audit
13942 sysexit_from_sys_call:
13943- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13944+ pax_exit_kernel_user
13945+ pax_erase_kstack
13946+ andl $~TS_COMPAT,TI_status(%r11)
13947 /* clear IF, that popfq doesn't enable interrupts early */
13948- andl $~0x200,EFLAGS-R11(%rsp)
13949- movl RIP-R11(%rsp),%edx /* User %eip */
13950+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
13951+ movl RIP(%rsp),%edx /* User %eip */
13952 CFI_REGISTER rip,rdx
13953 RESTORE_ARGS 0,24,0,0,0,0
13954 xorq %r8,%r8
13955@@ -193,6 +243,9 @@ sysexit_from_sys_call:
13956 movl %eax,%esi /* 2nd arg: syscall number */
13957 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
13958 call __audit_syscall_entry
13959+
13960+ pax_erase_kstack
13961+
13962 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
13963 cmpq $(IA32_NR_syscalls-1),%rax
13964 ja ia32_badsys
13965@@ -204,7 +257,7 @@ sysexit_from_sys_call:
13966 .endm
13967
13968 .macro auditsys_exit exit
13969- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13970+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13971 jnz ia32_ret_from_sys_call
13972 TRACE_IRQS_ON
13973 ENABLE_INTERRUPTS(CLBR_NONE)
13974@@ -215,11 +268,12 @@ sysexit_from_sys_call:
13975 1: setbe %al /* 1 if error, 0 if not */
13976 movzbl %al,%edi /* zero-extend that into %edi */
13977 call __audit_syscall_exit
13978+ GET_THREAD_INFO(%r11)
13979 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
13980 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
13981 DISABLE_INTERRUPTS(CLBR_NONE)
13982 TRACE_IRQS_OFF
13983- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13984+ testl %edi,TI_flags(%r11)
13985 jz \exit
13986 CLEAR_RREGS -ARGOFFSET
13987 jmp int_with_check
13988@@ -237,7 +291,7 @@ sysexit_audit:
13989
13990 sysenter_tracesys:
13991 #ifdef CONFIG_AUDITSYSCALL
13992- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13993+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13994 jz sysenter_auditsys
13995 #endif
13996 SAVE_REST
13997@@ -249,6 +303,9 @@ sysenter_tracesys:
13998 RESTORE_REST
13999 cmpq $(IA32_NR_syscalls-1),%rax
14000 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14001+
14002+ pax_erase_kstack
14003+
14004 jmp sysenter_do_call
14005 CFI_ENDPROC
14006 ENDPROC(ia32_sysenter_target)
14007@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14008 ENTRY(ia32_cstar_target)
14009 CFI_STARTPROC32 simple
14010 CFI_SIGNAL_FRAME
14011- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14012+ CFI_DEF_CFA rsp,0
14013 CFI_REGISTER rip,rcx
14014 /*CFI_REGISTER rflags,r11*/
14015 SWAPGS_UNSAFE_STACK
14016 movl %esp,%r8d
14017 CFI_REGISTER rsp,r8
14018 movq PER_CPU_VAR(kernel_stack),%rsp
14019+ SAVE_ARGS 8*6,0,0
14020+ pax_enter_kernel_user
14021+
14022+#ifdef CONFIG_PAX_RANDKSTACK
14023+ pax_erase_kstack
14024+#endif
14025+
14026 /*
14027 * No need to follow this irqs on/off section: the syscall
14028 * disabled irqs and here we enable it straight after entry:
14029 */
14030 ENABLE_INTERRUPTS(CLBR_NONE)
14031- SAVE_ARGS 8,0,0
14032 movl %eax,%eax /* zero extension */
14033 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14034 movq %rcx,RIP-ARGOFFSET(%rsp)
14035@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
14036 /* no need to do an access_ok check here because r8 has been
14037 32bit zero extended */
14038 /* hardware stack frame is complete now */
14039+
14040+#ifdef CONFIG_PAX_MEMORY_UDEREF
14041+ ASM_PAX_OPEN_USERLAND
14042+ movq pax_user_shadow_base,%r8
14043+ addq RSP-ARGOFFSET(%rsp),%r8
14044+#endif
14045+
14046 ASM_STAC
14047 1: movl (%r8),%r9d
14048 _ASM_EXTABLE(1b,ia32_badarg)
14049 ASM_CLAC
14050- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14051- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14052+
14053+#ifdef CONFIG_PAX_MEMORY_UDEREF
14054+ ASM_PAX_CLOSE_USERLAND
14055+#endif
14056+
14057+ GET_THREAD_INFO(%r11)
14058+ orl $TS_COMPAT,TI_status(%r11)
14059+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14060 CFI_REMEMBER_STATE
14061 jnz cstar_tracesys
14062 cmpq $IA32_NR_syscalls-1,%rax
14063@@ -319,13 +395,16 @@ cstar_do_call:
14064 cstar_dispatch:
14065 call *ia32_sys_call_table(,%rax,8)
14066 movq %rax,RAX-ARGOFFSET(%rsp)
14067+ GET_THREAD_INFO(%r11)
14068 DISABLE_INTERRUPTS(CLBR_NONE)
14069 TRACE_IRQS_OFF
14070- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14071+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14072 jnz sysretl_audit
14073 sysretl_from_sys_call:
14074- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14075- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14076+ pax_exit_kernel_user
14077+ pax_erase_kstack
14078+ andl $~TS_COMPAT,TI_status(%r11)
14079+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14080 movl RIP-ARGOFFSET(%rsp),%ecx
14081 CFI_REGISTER rip,rcx
14082 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14083@@ -352,7 +431,7 @@ sysretl_audit:
14084
14085 cstar_tracesys:
14086 #ifdef CONFIG_AUDITSYSCALL
14087- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14088+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14089 jz cstar_auditsys
14090 #endif
14091 xchgl %r9d,%ebp
14092@@ -366,11 +445,19 @@ cstar_tracesys:
14093 xchgl %ebp,%r9d
14094 cmpq $(IA32_NR_syscalls-1),%rax
14095 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14096+
14097+ pax_erase_kstack
14098+
14099 jmp cstar_do_call
14100 END(ia32_cstar_target)
14101
14102 ia32_badarg:
14103 ASM_CLAC
14104+
14105+#ifdef CONFIG_PAX_MEMORY_UDEREF
14106+ ASM_PAX_CLOSE_USERLAND
14107+#endif
14108+
14109 movq $-EFAULT,%rax
14110 jmp ia32_sysret
14111 CFI_ENDPROC
14112@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
14113 CFI_REL_OFFSET rip,RIP-RIP
14114 PARAVIRT_ADJUST_EXCEPTION_FRAME
14115 SWAPGS
14116- /*
14117- * No need to follow this irqs on/off section: the syscall
14118- * disabled irqs and here we enable it straight after entry:
14119- */
14120- ENABLE_INTERRUPTS(CLBR_NONE)
14121 movl %eax,%eax
14122 pushq_cfi %rax
14123 cld
14124 /* note the registers are not zero extended to the sf.
14125 this could be a problem. */
14126 SAVE_ARGS 0,1,0
14127- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14128- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14129+ pax_enter_kernel_user
14130+
14131+#ifdef CONFIG_PAX_RANDKSTACK
14132+ pax_erase_kstack
14133+#endif
14134+
14135+ /*
14136+ * No need to follow this irqs on/off section: the syscall
14137+ * disabled irqs and here we enable it straight after entry:
14138+ */
14139+ ENABLE_INTERRUPTS(CLBR_NONE)
14140+ GET_THREAD_INFO(%r11)
14141+ orl $TS_COMPAT,TI_status(%r11)
14142+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14143 jnz ia32_tracesys
14144 cmpq $(IA32_NR_syscalls-1),%rax
14145 ja ia32_badsys
14146@@ -442,6 +536,9 @@ ia32_tracesys:
14147 RESTORE_REST
14148 cmpq $(IA32_NR_syscalls-1),%rax
14149 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14150+
14151+ pax_erase_kstack
14152+
14153 jmp ia32_do_call
14154 END(ia32_syscall)
14155
14156diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14157index 8e0ceec..af13504 100644
14158--- a/arch/x86/ia32/sys_ia32.c
14159+++ b/arch/x86/ia32/sys_ia32.c
14160@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14161 */
14162 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14163 {
14164- typeof(ubuf->st_uid) uid = 0;
14165- typeof(ubuf->st_gid) gid = 0;
14166+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
14167+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
14168 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14169 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14170 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14171diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14172index 372231c..51b537d 100644
14173--- a/arch/x86/include/asm/alternative-asm.h
14174+++ b/arch/x86/include/asm/alternative-asm.h
14175@@ -18,6 +18,45 @@
14176 .endm
14177 #endif
14178
14179+#ifdef KERNEXEC_PLUGIN
14180+ .macro pax_force_retaddr_bts rip=0
14181+ btsq $63,\rip(%rsp)
14182+ .endm
14183+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14184+ .macro pax_force_retaddr rip=0, reload=0
14185+ btsq $63,\rip(%rsp)
14186+ .endm
14187+ .macro pax_force_fptr ptr
14188+ btsq $63,\ptr
14189+ .endm
14190+ .macro pax_set_fptr_mask
14191+ .endm
14192+#endif
14193+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14194+ .macro pax_force_retaddr rip=0, reload=0
14195+ .if \reload
14196+ pax_set_fptr_mask
14197+ .endif
14198+ orq %r12,\rip(%rsp)
14199+ .endm
14200+ .macro pax_force_fptr ptr
14201+ orq %r12,\ptr
14202+ .endm
14203+ .macro pax_set_fptr_mask
14204+ movabs $0x8000000000000000,%r12
14205+ .endm
14206+#endif
14207+#else
14208+ .macro pax_force_retaddr rip=0, reload=0
14209+ .endm
14210+ .macro pax_force_fptr ptr
14211+ .endm
14212+ .macro pax_force_retaddr_bts rip=0
14213+ .endm
14214+ .macro pax_set_fptr_mask
14215+ .endm
14216+#endif
14217+
14218 .macro altinstruction_entry orig alt feature orig_len alt_len
14219 .long \orig - .
14220 .long \alt - .
14221diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
14222index 0a3f9c9..c9d081d 100644
14223--- a/arch/x86/include/asm/alternative.h
14224+++ b/arch/x86/include/asm/alternative.h
14225@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14226 ".pushsection .discard,\"aw\",@progbits\n" \
14227 DISCARD_ENTRY(1) \
14228 ".popsection\n" \
14229- ".pushsection .altinstr_replacement, \"ax\"\n" \
14230+ ".pushsection .altinstr_replacement, \"a\"\n" \
14231 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
14232 ".popsection"
14233
14234@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14235 DISCARD_ENTRY(1) \
14236 DISCARD_ENTRY(2) \
14237 ".popsection\n" \
14238- ".pushsection .altinstr_replacement, \"ax\"\n" \
14239+ ".pushsection .altinstr_replacement, \"a\"\n" \
14240 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
14241 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
14242 ".popsection"
14243diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
14244index 1d2091a..f5074c1 100644
14245--- a/arch/x86/include/asm/apic.h
14246+++ b/arch/x86/include/asm/apic.h
14247@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
14248
14249 #ifdef CONFIG_X86_LOCAL_APIC
14250
14251-extern unsigned int apic_verbosity;
14252+extern int apic_verbosity;
14253 extern int local_apic_timer_c2_ok;
14254
14255 extern int disable_apic;
14256diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
14257index 20370c6..a2eb9b0 100644
14258--- a/arch/x86/include/asm/apm.h
14259+++ b/arch/x86/include/asm/apm.h
14260@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
14261 __asm__ __volatile__(APM_DO_ZERO_SEGS
14262 "pushl %%edi\n\t"
14263 "pushl %%ebp\n\t"
14264- "lcall *%%cs:apm_bios_entry\n\t"
14265+ "lcall *%%ss:apm_bios_entry\n\t"
14266 "setc %%al\n\t"
14267 "popl %%ebp\n\t"
14268 "popl %%edi\n\t"
14269@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
14270 __asm__ __volatile__(APM_DO_ZERO_SEGS
14271 "pushl %%edi\n\t"
14272 "pushl %%ebp\n\t"
14273- "lcall *%%cs:apm_bios_entry\n\t"
14274+ "lcall *%%ss:apm_bios_entry\n\t"
14275 "setc %%bl\n\t"
14276 "popl %%ebp\n\t"
14277 "popl %%edi\n\t"
14278diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
14279index 722aa3b..c392d85 100644
14280--- a/arch/x86/include/asm/atomic.h
14281+++ b/arch/x86/include/asm/atomic.h
14282@@ -22,7 +22,18 @@
14283 */
14284 static inline int atomic_read(const atomic_t *v)
14285 {
14286- return (*(volatile int *)&(v)->counter);
14287+ return (*(volatile const int *)&(v)->counter);
14288+}
14289+
14290+/**
14291+ * atomic_read_unchecked - read atomic variable
14292+ * @v: pointer of type atomic_unchecked_t
14293+ *
14294+ * Atomically reads the value of @v.
14295+ */
14296+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
14297+{
14298+ return (*(volatile const int *)&(v)->counter);
14299 }
14300
14301 /**
14302@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
14303 }
14304
14305 /**
14306+ * atomic_set_unchecked - set atomic variable
14307+ * @v: pointer of type atomic_unchecked_t
14308+ * @i: required value
14309+ *
14310+ * Atomically sets the value of @v to @i.
14311+ */
14312+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
14313+{
14314+ v->counter = i;
14315+}
14316+
14317+/**
14318 * atomic_add - add integer to atomic variable
14319 * @i: integer value to add
14320 * @v: pointer of type atomic_t
14321@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
14322 */
14323 static inline void atomic_add(int i, atomic_t *v)
14324 {
14325- asm volatile(LOCK_PREFIX "addl %1,%0"
14326+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14327+
14328+#ifdef CONFIG_PAX_REFCOUNT
14329+ "jno 0f\n"
14330+ LOCK_PREFIX "subl %1,%0\n"
14331+ "int $4\n0:\n"
14332+ _ASM_EXTABLE(0b, 0b)
14333+#endif
14334+
14335+ : "+m" (v->counter)
14336+ : "ir" (i));
14337+}
14338+
14339+/**
14340+ * atomic_add_unchecked - add integer to atomic variable
14341+ * @i: integer value to add
14342+ * @v: pointer of type atomic_unchecked_t
14343+ *
14344+ * Atomically adds @i to @v.
14345+ */
14346+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
14347+{
14348+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14349 : "+m" (v->counter)
14350 : "ir" (i));
14351 }
14352@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
14353 */
14354 static inline void atomic_sub(int i, atomic_t *v)
14355 {
14356- asm volatile(LOCK_PREFIX "subl %1,%0"
14357+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14358+
14359+#ifdef CONFIG_PAX_REFCOUNT
14360+ "jno 0f\n"
14361+ LOCK_PREFIX "addl %1,%0\n"
14362+ "int $4\n0:\n"
14363+ _ASM_EXTABLE(0b, 0b)
14364+#endif
14365+
14366+ : "+m" (v->counter)
14367+ : "ir" (i));
14368+}
14369+
14370+/**
14371+ * atomic_sub_unchecked - subtract integer from atomic variable
14372+ * @i: integer value to subtract
14373+ * @v: pointer of type atomic_unchecked_t
14374+ *
14375+ * Atomically subtracts @i from @v.
14376+ */
14377+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
14378+{
14379+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14380 : "+m" (v->counter)
14381 : "ir" (i));
14382 }
14383@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14384 {
14385 unsigned char c;
14386
14387- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
14388+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
14389+
14390+#ifdef CONFIG_PAX_REFCOUNT
14391+ "jno 0f\n"
14392+ LOCK_PREFIX "addl %2,%0\n"
14393+ "int $4\n0:\n"
14394+ _ASM_EXTABLE(0b, 0b)
14395+#endif
14396+
14397+ "sete %1\n"
14398 : "+m" (v->counter), "=qm" (c)
14399 : "ir" (i) : "memory");
14400 return c;
14401@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14402 */
14403 static inline void atomic_inc(atomic_t *v)
14404 {
14405- asm volatile(LOCK_PREFIX "incl %0"
14406+ asm volatile(LOCK_PREFIX "incl %0\n"
14407+
14408+#ifdef CONFIG_PAX_REFCOUNT
14409+ "jno 0f\n"
14410+ LOCK_PREFIX "decl %0\n"
14411+ "int $4\n0:\n"
14412+ _ASM_EXTABLE(0b, 0b)
14413+#endif
14414+
14415+ : "+m" (v->counter));
14416+}
14417+
14418+/**
14419+ * atomic_inc_unchecked - increment atomic variable
14420+ * @v: pointer of type atomic_unchecked_t
14421+ *
14422+ * Atomically increments @v by 1.
14423+ */
14424+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
14425+{
14426+ asm volatile(LOCK_PREFIX "incl %0\n"
14427 : "+m" (v->counter));
14428 }
14429
14430@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
14431 */
14432 static inline void atomic_dec(atomic_t *v)
14433 {
14434- asm volatile(LOCK_PREFIX "decl %0"
14435+ asm volatile(LOCK_PREFIX "decl %0\n"
14436+
14437+#ifdef CONFIG_PAX_REFCOUNT
14438+ "jno 0f\n"
14439+ LOCK_PREFIX "incl %0\n"
14440+ "int $4\n0:\n"
14441+ _ASM_EXTABLE(0b, 0b)
14442+#endif
14443+
14444+ : "+m" (v->counter));
14445+}
14446+
14447+/**
14448+ * atomic_dec_unchecked - decrement atomic variable
14449+ * @v: pointer of type atomic_unchecked_t
14450+ *
14451+ * Atomically decrements @v by 1.
14452+ */
14453+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
14454+{
14455+ asm volatile(LOCK_PREFIX "decl %0\n"
14456 : "+m" (v->counter));
14457 }
14458
14459@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
14460 {
14461 unsigned char c;
14462
14463- asm volatile(LOCK_PREFIX "decl %0; sete %1"
14464+ asm volatile(LOCK_PREFIX "decl %0\n"
14465+
14466+#ifdef CONFIG_PAX_REFCOUNT
14467+ "jno 0f\n"
14468+ LOCK_PREFIX "incl %0\n"
14469+ "int $4\n0:\n"
14470+ _ASM_EXTABLE(0b, 0b)
14471+#endif
14472+
14473+ "sete %1\n"
14474 : "+m" (v->counter), "=qm" (c)
14475 : : "memory");
14476 return c != 0;
14477@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
14478 {
14479 unsigned char c;
14480
14481- asm volatile(LOCK_PREFIX "incl %0; sete %1"
14482+ asm volatile(LOCK_PREFIX "incl %0\n"
14483+
14484+#ifdef CONFIG_PAX_REFCOUNT
14485+ "jno 0f\n"
14486+ LOCK_PREFIX "decl %0\n"
14487+ "int $4\n0:\n"
14488+ _ASM_EXTABLE(0b, 0b)
14489+#endif
14490+
14491+ "sete %1\n"
14492+ : "+m" (v->counter), "=qm" (c)
14493+ : : "memory");
14494+ return c != 0;
14495+}
14496+
14497+/**
14498+ * atomic_inc_and_test_unchecked - increment and test
14499+ * @v: pointer of type atomic_unchecked_t
14500+ *
14501+ * Atomically increments @v by 1
14502+ * and returns true if the result is zero, or false for all
14503+ * other cases.
14504+ */
14505+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
14506+{
14507+ unsigned char c;
14508+
14509+ asm volatile(LOCK_PREFIX "incl %0\n"
14510+ "sete %1\n"
14511 : "+m" (v->counter), "=qm" (c)
14512 : : "memory");
14513 return c != 0;
14514@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
14515 {
14516 unsigned char c;
14517
14518- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
14519+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
14520+
14521+#ifdef CONFIG_PAX_REFCOUNT
14522+ "jno 0f\n"
14523+ LOCK_PREFIX "subl %2,%0\n"
14524+ "int $4\n0:\n"
14525+ _ASM_EXTABLE(0b, 0b)
14526+#endif
14527+
14528+ "sets %1\n"
14529 : "+m" (v->counter), "=qm" (c)
14530 : "ir" (i) : "memory");
14531 return c;
14532@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
14533 */
14534 static inline int atomic_add_return(int i, atomic_t *v)
14535 {
14536+ return i + xadd_check_overflow(&v->counter, i);
14537+}
14538+
14539+/**
14540+ * atomic_add_return_unchecked - add integer and return
14541+ * @i: integer value to add
14542+ * @v: pointer of type atomic_unchecked_t
14543+ *
14544+ * Atomically adds @i to @v and returns @i + @v
14545+ */
14546+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
14547+{
14548 return i + xadd(&v->counter, i);
14549 }
14550
14551@@ -188,9 +362,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
14552 }
14553
14554 #define atomic_inc_return(v) (atomic_add_return(1, v))
14555+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
14556+{
14557+ return atomic_add_return_unchecked(1, v);
14558+}
14559 #define atomic_dec_return(v) (atomic_sub_return(1, v))
14560
14561-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14562+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
14563+{
14564+ return cmpxchg(&v->counter, old, new);
14565+}
14566+
14567+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14568 {
14569 return cmpxchg(&v->counter, old, new);
14570 }
14571@@ -200,6 +383,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
14572 return xchg(&v->counter, new);
14573 }
14574
14575+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14576+{
14577+ return xchg(&v->counter, new);
14578+}
14579+
14580 /**
14581 * __atomic_add_unless - add unless the number is already a given value
14582 * @v: pointer of type atomic_t
14583@@ -209,14 +397,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
14584 * Atomically adds @a to @v, so long as @v was not already @u.
14585 * Returns the old value of @v.
14586 */
14587-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14588+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
14589 {
14590- int c, old;
14591+ int c, old, new;
14592 c = atomic_read(v);
14593 for (;;) {
14594- if (unlikely(c == (u)))
14595+ if (unlikely(c == u))
14596 break;
14597- old = atomic_cmpxchg((v), c, c + (a));
14598+
14599+ asm volatile("addl %2,%0\n"
14600+
14601+#ifdef CONFIG_PAX_REFCOUNT
14602+ "jno 0f\n"
14603+ "subl %2,%0\n"
14604+ "int $4\n0:\n"
14605+ _ASM_EXTABLE(0b, 0b)
14606+#endif
14607+
14608+ : "=r" (new)
14609+ : "0" (c), "ir" (a));
14610+
14611+ old = atomic_cmpxchg(v, c, new);
14612 if (likely(old == c))
14613 break;
14614 c = old;
14615@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14616 }
14617
14618 /**
14619+ * atomic_inc_not_zero_hint - increment if not null
14620+ * @v: pointer of type atomic_t
14621+ * @hint: probable value of the atomic before the increment
14622+ *
14623+ * This version of atomic_inc_not_zero() gives a hint of probable
14624+ * value of the atomic. This helps processor to not read the memory
14625+ * before doing the atomic read/modify/write cycle, lowering
14626+ * number of bus transactions on some arches.
14627+ *
14628+ * Returns: 0 if increment was not done, 1 otherwise.
14629+ */
14630+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14631+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14632+{
14633+ int val, c = hint, new;
14634+
14635+ /* sanity test, should be removed by compiler if hint is a constant */
14636+ if (!hint)
14637+ return __atomic_add_unless(v, 1, 0);
14638+
14639+ do {
14640+ asm volatile("incl %0\n"
14641+
14642+#ifdef CONFIG_PAX_REFCOUNT
14643+ "jno 0f\n"
14644+ "decl %0\n"
14645+ "int $4\n0:\n"
14646+ _ASM_EXTABLE(0b, 0b)
14647+#endif
14648+
14649+ : "=r" (new)
14650+ : "0" (c));
14651+
14652+ val = atomic_cmpxchg(v, c, new);
14653+ if (val == c)
14654+ return 1;
14655+ c = val;
14656+ } while (c);
14657+
14658+ return 0;
14659+}
14660+
14661+/**
14662 * atomic_inc_short - increment of a short integer
14663 * @v: pointer to type int
14664 *
14665@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14666 #endif
14667
14668 /* These are x86-specific, used by some header files */
14669-#define atomic_clear_mask(mask, addr) \
14670- asm volatile(LOCK_PREFIX "andl %0,%1" \
14671- : : "r" (~(mask)), "m" (*(addr)) : "memory")
14672+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14673+{
14674+ asm volatile(LOCK_PREFIX "andl %1,%0"
14675+ : "+m" (v->counter)
14676+ : "r" (~(mask))
14677+ : "memory");
14678+}
14679
14680-#define atomic_set_mask(mask, addr) \
14681- asm volatile(LOCK_PREFIX "orl %0,%1" \
14682- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14683- : "memory")
14684+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14685+{
14686+ asm volatile(LOCK_PREFIX "andl %1,%0"
14687+ : "+m" (v->counter)
14688+ : "r" (~(mask))
14689+ : "memory");
14690+}
14691+
14692+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14693+{
14694+ asm volatile(LOCK_PREFIX "orl %1,%0"
14695+ : "+m" (v->counter)
14696+ : "r" (mask)
14697+ : "memory");
14698+}
14699+
14700+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14701+{
14702+ asm volatile(LOCK_PREFIX "orl %1,%0"
14703+ : "+m" (v->counter)
14704+ : "r" (mask)
14705+ : "memory");
14706+}
14707
14708 /* Atomic operations are already serializing on x86 */
14709 #define smp_mb__before_atomic_dec() barrier()
14710diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14711index b154de7..bf18a5a 100644
14712--- a/arch/x86/include/asm/atomic64_32.h
14713+++ b/arch/x86/include/asm/atomic64_32.h
14714@@ -12,6 +12,14 @@ typedef struct {
14715 u64 __aligned(8) counter;
14716 } atomic64_t;
14717
14718+#ifdef CONFIG_PAX_REFCOUNT
14719+typedef struct {
14720+ u64 __aligned(8) counter;
14721+} atomic64_unchecked_t;
14722+#else
14723+typedef atomic64_t atomic64_unchecked_t;
14724+#endif
14725+
14726 #define ATOMIC64_INIT(val) { (val) }
14727
14728 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14729@@ -37,21 +45,31 @@ typedef struct {
14730 ATOMIC64_DECL_ONE(sym##_386)
14731
14732 ATOMIC64_DECL_ONE(add_386);
14733+ATOMIC64_DECL_ONE(add_unchecked_386);
14734 ATOMIC64_DECL_ONE(sub_386);
14735+ATOMIC64_DECL_ONE(sub_unchecked_386);
14736 ATOMIC64_DECL_ONE(inc_386);
14737+ATOMIC64_DECL_ONE(inc_unchecked_386);
14738 ATOMIC64_DECL_ONE(dec_386);
14739+ATOMIC64_DECL_ONE(dec_unchecked_386);
14740 #endif
14741
14742 #define alternative_atomic64(f, out, in...) \
14743 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14744
14745 ATOMIC64_DECL(read);
14746+ATOMIC64_DECL(read_unchecked);
14747 ATOMIC64_DECL(set);
14748+ATOMIC64_DECL(set_unchecked);
14749 ATOMIC64_DECL(xchg);
14750 ATOMIC64_DECL(add_return);
14751+ATOMIC64_DECL(add_return_unchecked);
14752 ATOMIC64_DECL(sub_return);
14753+ATOMIC64_DECL(sub_return_unchecked);
14754 ATOMIC64_DECL(inc_return);
14755+ATOMIC64_DECL(inc_return_unchecked);
14756 ATOMIC64_DECL(dec_return);
14757+ATOMIC64_DECL(dec_return_unchecked);
14758 ATOMIC64_DECL(dec_if_positive);
14759 ATOMIC64_DECL(inc_not_zero);
14760 ATOMIC64_DECL(add_unless);
14761@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14762 }
14763
14764 /**
14765+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14766+ * @p: pointer to type atomic64_unchecked_t
14767+ * @o: expected value
14768+ * @n: new value
14769+ *
14770+ * Atomically sets @v to @n if it was equal to @o and returns
14771+ * the old value.
14772+ */
14773+
14774+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14775+{
14776+ return cmpxchg64(&v->counter, o, n);
14777+}
14778+
14779+/**
14780 * atomic64_xchg - xchg atomic64 variable
14781 * @v: pointer to type atomic64_t
14782 * @n: value to assign
14783@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14784 }
14785
14786 /**
14787+ * atomic64_set_unchecked - set atomic64 variable
14788+ * @v: pointer to type atomic64_unchecked_t
14789+ * @n: value to assign
14790+ *
14791+ * Atomically sets the value of @v to @n.
14792+ */
14793+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14794+{
14795+ unsigned high = (unsigned)(i >> 32);
14796+ unsigned low = (unsigned)i;
14797+ alternative_atomic64(set, /* no output */,
14798+ "S" (v), "b" (low), "c" (high)
14799+ : "eax", "edx", "memory");
14800+}
14801+
14802+/**
14803 * atomic64_read - read atomic64 variable
14804 * @v: pointer to type atomic64_t
14805 *
14806@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14807 }
14808
14809 /**
14810+ * atomic64_read_unchecked - read atomic64 variable
14811+ * @v: pointer to type atomic64_unchecked_t
14812+ *
14813+ * Atomically reads the value of @v and returns it.
14814+ */
14815+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
14816+{
14817+ long long r;
14818+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14819+ return r;
14820+ }
14821+
14822+/**
14823 * atomic64_add_return - add and return
14824 * @i: integer value to add
14825 * @v: pointer to type atomic64_t
14826@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14827 return i;
14828 }
14829
14830+/**
14831+ * atomic64_add_return_unchecked - add and return
14832+ * @i: integer value to add
14833+ * @v: pointer to type atomic64_unchecked_t
14834+ *
14835+ * Atomically adds @i to @v and returns @i + *@v
14836+ */
14837+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14838+{
14839+ alternative_atomic64(add_return_unchecked,
14840+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14841+ ASM_NO_INPUT_CLOBBER("memory"));
14842+ return i;
14843+}
14844+
14845 /*
14846 * Other variants with different arithmetic operators:
14847 */
14848@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14849 return a;
14850 }
14851
14852+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14853+{
14854+ long long a;
14855+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
14856+ "S" (v) : "memory", "ecx");
14857+ return a;
14858+}
14859+
14860 static inline long long atomic64_dec_return(atomic64_t *v)
14861 {
14862 long long a;
14863@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14864 }
14865
14866 /**
14867+ * atomic64_add_unchecked - add integer to atomic64 variable
14868+ * @i: integer value to add
14869+ * @v: pointer to type atomic64_unchecked_t
14870+ *
14871+ * Atomically adds @i to @v.
14872+ */
14873+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14874+{
14875+ __alternative_atomic64(add_unchecked, add_return_unchecked,
14876+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14877+ ASM_NO_INPUT_CLOBBER("memory"));
14878+ return i;
14879+}
14880+
14881+/**
14882 * atomic64_sub - subtract the atomic64 variable
14883 * @i: integer value to subtract
14884 * @v: pointer to type atomic64_t
14885diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14886index 0e1cbfc..a891fc7 100644
14887--- a/arch/x86/include/asm/atomic64_64.h
14888+++ b/arch/x86/include/asm/atomic64_64.h
14889@@ -18,7 +18,19 @@
14890 */
14891 static inline long atomic64_read(const atomic64_t *v)
14892 {
14893- return (*(volatile long *)&(v)->counter);
14894+ return (*(volatile const long *)&(v)->counter);
14895+}
14896+
14897+/**
14898+ * atomic64_read_unchecked - read atomic64 variable
14899+ * @v: pointer of type atomic64_unchecked_t
14900+ *
14901+ * Atomically reads the value of @v.
14902+ * Doesn't imply a read memory barrier.
14903+ */
14904+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
14905+{
14906+ return (*(volatile const long *)&(v)->counter);
14907 }
14908
14909 /**
14910@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14911 }
14912
14913 /**
14914+ * atomic64_set_unchecked - set atomic64 variable
14915+ * @v: pointer to type atomic64_unchecked_t
14916+ * @i: required value
14917+ *
14918+ * Atomically sets the value of @v to @i.
14919+ */
14920+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14921+{
14922+ v->counter = i;
14923+}
14924+
14925+/**
14926 * atomic64_add - add integer to atomic64 variable
14927 * @i: integer value to add
14928 * @v: pointer to type atomic64_t
14929@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14930 */
14931 static inline void atomic64_add(long i, atomic64_t *v)
14932 {
14933+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
14934+
14935+#ifdef CONFIG_PAX_REFCOUNT
14936+ "jno 0f\n"
14937+ LOCK_PREFIX "subq %1,%0\n"
14938+ "int $4\n0:\n"
14939+ _ASM_EXTABLE(0b, 0b)
14940+#endif
14941+
14942+ : "=m" (v->counter)
14943+ : "er" (i), "m" (v->counter));
14944+}
14945+
14946+/**
14947+ * atomic64_add_unchecked - add integer to atomic64 variable
14948+ * @i: integer value to add
14949+ * @v: pointer to type atomic64_unchecked_t
14950+ *
14951+ * Atomically adds @i to @v.
14952+ */
14953+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14954+{
14955 asm volatile(LOCK_PREFIX "addq %1,%0"
14956 : "=m" (v->counter)
14957 : "er" (i), "m" (v->counter));
14958@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14959 */
14960 static inline void atomic64_sub(long i, atomic64_t *v)
14961 {
14962- asm volatile(LOCK_PREFIX "subq %1,%0"
14963+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14964+
14965+#ifdef CONFIG_PAX_REFCOUNT
14966+ "jno 0f\n"
14967+ LOCK_PREFIX "addq %1,%0\n"
14968+ "int $4\n0:\n"
14969+ _ASM_EXTABLE(0b, 0b)
14970+#endif
14971+
14972+ : "=m" (v->counter)
14973+ : "er" (i), "m" (v->counter));
14974+}
14975+
14976+/**
14977+ * atomic64_sub_unchecked - subtract the atomic64 variable
14978+ * @i: integer value to subtract
14979+ * @v: pointer to type atomic64_unchecked_t
14980+ *
14981+ * Atomically subtracts @i from @v.
14982+ */
14983+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14984+{
14985+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14986 : "=m" (v->counter)
14987 : "er" (i), "m" (v->counter));
14988 }
14989@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14990 {
14991 unsigned char c;
14992
14993- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
14994+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
14995+
14996+#ifdef CONFIG_PAX_REFCOUNT
14997+ "jno 0f\n"
14998+ LOCK_PREFIX "addq %2,%0\n"
14999+ "int $4\n0:\n"
15000+ _ASM_EXTABLE(0b, 0b)
15001+#endif
15002+
15003+ "sete %1\n"
15004 : "=m" (v->counter), "=qm" (c)
15005 : "er" (i), "m" (v->counter) : "memory");
15006 return c;
15007@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15008 */
15009 static inline void atomic64_inc(atomic64_t *v)
15010 {
15011+ asm volatile(LOCK_PREFIX "incq %0\n"
15012+
15013+#ifdef CONFIG_PAX_REFCOUNT
15014+ "jno 0f\n"
15015+ LOCK_PREFIX "decq %0\n"
15016+ "int $4\n0:\n"
15017+ _ASM_EXTABLE(0b, 0b)
15018+#endif
15019+
15020+ : "=m" (v->counter)
15021+ : "m" (v->counter));
15022+}
15023+
15024+/**
15025+ * atomic64_inc_unchecked - increment atomic64 variable
15026+ * @v: pointer to type atomic64_unchecked_t
15027+ *
15028+ * Atomically increments @v by 1.
15029+ */
15030+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15031+{
15032 asm volatile(LOCK_PREFIX "incq %0"
15033 : "=m" (v->counter)
15034 : "m" (v->counter));
15035@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
15036 */
15037 static inline void atomic64_dec(atomic64_t *v)
15038 {
15039- asm volatile(LOCK_PREFIX "decq %0"
15040+ asm volatile(LOCK_PREFIX "decq %0\n"
15041+
15042+#ifdef CONFIG_PAX_REFCOUNT
15043+ "jno 0f\n"
15044+ LOCK_PREFIX "incq %0\n"
15045+ "int $4\n0:\n"
15046+ _ASM_EXTABLE(0b, 0b)
15047+#endif
15048+
15049+ : "=m" (v->counter)
15050+ : "m" (v->counter));
15051+}
15052+
15053+/**
15054+ * atomic64_dec_unchecked - decrement atomic64 variable
15055+ * @v: pointer to type atomic64_t
15056+ *
15057+ * Atomically decrements @v by 1.
15058+ */
15059+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15060+{
15061+ asm volatile(LOCK_PREFIX "decq %0\n"
15062 : "=m" (v->counter)
15063 : "m" (v->counter));
15064 }
15065@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15066 {
15067 unsigned char c;
15068
15069- asm volatile(LOCK_PREFIX "decq %0; sete %1"
15070+ asm volatile(LOCK_PREFIX "decq %0\n"
15071+
15072+#ifdef CONFIG_PAX_REFCOUNT
15073+ "jno 0f\n"
15074+ LOCK_PREFIX "incq %0\n"
15075+ "int $4\n0:\n"
15076+ _ASM_EXTABLE(0b, 0b)
15077+#endif
15078+
15079+ "sete %1\n"
15080 : "=m" (v->counter), "=qm" (c)
15081 : "m" (v->counter) : "memory");
15082 return c != 0;
15083@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15084 {
15085 unsigned char c;
15086
15087- asm volatile(LOCK_PREFIX "incq %0; sete %1"
15088+ asm volatile(LOCK_PREFIX "incq %0\n"
15089+
15090+#ifdef CONFIG_PAX_REFCOUNT
15091+ "jno 0f\n"
15092+ LOCK_PREFIX "decq %0\n"
15093+ "int $4\n0:\n"
15094+ _ASM_EXTABLE(0b, 0b)
15095+#endif
15096+
15097+ "sete %1\n"
15098 : "=m" (v->counter), "=qm" (c)
15099 : "m" (v->counter) : "memory");
15100 return c != 0;
15101@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15102 {
15103 unsigned char c;
15104
15105- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
15106+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
15107+
15108+#ifdef CONFIG_PAX_REFCOUNT
15109+ "jno 0f\n"
15110+ LOCK_PREFIX "subq %2,%0\n"
15111+ "int $4\n0:\n"
15112+ _ASM_EXTABLE(0b, 0b)
15113+#endif
15114+
15115+ "sets %1\n"
15116 : "=m" (v->counter), "=qm" (c)
15117 : "er" (i), "m" (v->counter) : "memory");
15118 return c;
15119@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15120 */
15121 static inline long atomic64_add_return(long i, atomic64_t *v)
15122 {
15123+ return i + xadd_check_overflow(&v->counter, i);
15124+}
15125+
15126+/**
15127+ * atomic64_add_return_unchecked - add and return
15128+ * @i: integer value to add
15129+ * @v: pointer to type atomic64_unchecked_t
15130+ *
15131+ * Atomically adds @i to @v and returns @i + @v
15132+ */
15133+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15134+{
15135 return i + xadd(&v->counter, i);
15136 }
15137
15138@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15139 }
15140
15141 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15142+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15143+{
15144+ return atomic64_add_return_unchecked(1, v);
15145+}
15146 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15147
15148 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15149@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15150 return cmpxchg(&v->counter, old, new);
15151 }
15152
15153+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15154+{
15155+ return cmpxchg(&v->counter, old, new);
15156+}
15157+
15158 static inline long atomic64_xchg(atomic64_t *v, long new)
15159 {
15160 return xchg(&v->counter, new);
15161@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15162 */
15163 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15164 {
15165- long c, old;
15166+ long c, old, new;
15167 c = atomic64_read(v);
15168 for (;;) {
15169- if (unlikely(c == (u)))
15170+ if (unlikely(c == u))
15171 break;
15172- old = atomic64_cmpxchg((v), c, c + (a));
15173+
15174+ asm volatile("add %2,%0\n"
15175+
15176+#ifdef CONFIG_PAX_REFCOUNT
15177+ "jno 0f\n"
15178+ "sub %2,%0\n"
15179+ "int $4\n0:\n"
15180+ _ASM_EXTABLE(0b, 0b)
15181+#endif
15182+
15183+ : "=r" (new)
15184+ : "0" (c), "ir" (a));
15185+
15186+ old = atomic64_cmpxchg(v, c, new);
15187 if (likely(old == c))
15188 break;
15189 c = old;
15190 }
15191- return c != (u);
15192+ return c != u;
15193 }
15194
15195 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15196diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15197index 41639ce..ebce552 100644
15198--- a/arch/x86/include/asm/bitops.h
15199+++ b/arch/x86/include/asm/bitops.h
15200@@ -48,7 +48,7 @@
15201 * a mask operation on a byte.
15202 */
15203 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15204-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15205+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15206 #define CONST_MASK(nr) (1 << ((nr) & 7))
15207
15208 /**
15209@@ -361,7 +361,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15210 *
15211 * Undefined if no bit exists, so code should check against 0 first.
15212 */
15213-static inline unsigned long __ffs(unsigned long word)
15214+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
15215 {
15216 asm("rep; bsf %1,%0"
15217 : "=r" (word)
15218@@ -375,7 +375,7 @@ static inline unsigned long __ffs(unsigned long word)
15219 *
15220 * Undefined if no zero exists, so code should check against ~0UL first.
15221 */
15222-static inline unsigned long ffz(unsigned long word)
15223+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
15224 {
15225 asm("rep; bsf %1,%0"
15226 : "=r" (word)
15227@@ -389,7 +389,7 @@ static inline unsigned long ffz(unsigned long word)
15228 *
15229 * Undefined if no set bit exists, so code should check against 0 first.
15230 */
15231-static inline unsigned long __fls(unsigned long word)
15232+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
15233 {
15234 asm("bsr %1,%0"
15235 : "=r" (word)
15236@@ -452,7 +452,7 @@ static inline int ffs(int x)
15237 * set bit if value is nonzero. The last (most significant) bit is
15238 * at position 32.
15239 */
15240-static inline int fls(int x)
15241+static inline int __intentional_overflow(-1) fls(int x)
15242 {
15243 int r;
15244
15245@@ -494,7 +494,7 @@ static inline int fls(int x)
15246 * at position 64.
15247 */
15248 #ifdef CONFIG_X86_64
15249-static __always_inline int fls64(__u64 x)
15250+static __always_inline long fls64(__u64 x)
15251 {
15252 int bitpos = -1;
15253 /*
15254diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
15255index 4fa687a..60f2d39 100644
15256--- a/arch/x86/include/asm/boot.h
15257+++ b/arch/x86/include/asm/boot.h
15258@@ -6,10 +6,15 @@
15259 #include <uapi/asm/boot.h>
15260
15261 /* Physical address where kernel should be loaded. */
15262-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15263+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15264 + (CONFIG_PHYSICAL_ALIGN - 1)) \
15265 & ~(CONFIG_PHYSICAL_ALIGN - 1))
15266
15267+#ifndef __ASSEMBLY__
15268+extern unsigned char __LOAD_PHYSICAL_ADDR[];
15269+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
15270+#endif
15271+
15272 /* Minimum kernel alignment, as a power of two */
15273 #ifdef CONFIG_X86_64
15274 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
15275diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
15276index 48f99f1..d78ebf9 100644
15277--- a/arch/x86/include/asm/cache.h
15278+++ b/arch/x86/include/asm/cache.h
15279@@ -5,12 +5,13 @@
15280
15281 /* L1 cache line size */
15282 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
15283-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15284+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
15285
15286 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
15287+#define __read_only __attribute__((__section__(".data..read_only")))
15288
15289 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
15290-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15291+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
15292
15293 #ifdef CONFIG_X86_VSMP
15294 #ifdef CONFIG_SMP
15295diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
15296index 9863ee3..4a1f8e1 100644
15297--- a/arch/x86/include/asm/cacheflush.h
15298+++ b/arch/x86/include/asm/cacheflush.h
15299@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
15300 unsigned long pg_flags = pg->flags & _PGMT_MASK;
15301
15302 if (pg_flags == _PGMT_DEFAULT)
15303- return -1;
15304+ return ~0UL;
15305 else if (pg_flags == _PGMT_WC)
15306 return _PAGE_CACHE_WC;
15307 else if (pg_flags == _PGMT_UC_MINUS)
15308diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
15309index 0fa6750..cb7b2c3 100644
15310--- a/arch/x86/include/asm/calling.h
15311+++ b/arch/x86/include/asm/calling.h
15312@@ -80,103 +80,113 @@ For 32-bit we have the following conventions - kernel is built with
15313 #define RSP 152
15314 #define SS 160
15315
15316-#define ARGOFFSET R11
15317-#define SWFRAME ORIG_RAX
15318+#define ARGOFFSET R15
15319
15320 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
15321- subq $9*8+\addskip, %rsp
15322- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
15323- movq_cfi rdi, 8*8
15324- movq_cfi rsi, 7*8
15325- movq_cfi rdx, 6*8
15326+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
15327+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
15328+ movq_cfi rdi, RDI
15329+ movq_cfi rsi, RSI
15330+ movq_cfi rdx, RDX
15331
15332 .if \save_rcx
15333- movq_cfi rcx, 5*8
15334+ movq_cfi rcx, RCX
15335 .endif
15336
15337- movq_cfi rax, 4*8
15338+ movq_cfi rax, RAX
15339
15340 .if \save_r891011
15341- movq_cfi r8, 3*8
15342- movq_cfi r9, 2*8
15343- movq_cfi r10, 1*8
15344- movq_cfi r11, 0*8
15345+ movq_cfi r8, R8
15346+ movq_cfi r9, R9
15347+ movq_cfi r10, R10
15348+ movq_cfi r11, R11
15349 .endif
15350
15351+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15352+ movq_cfi r12, R12
15353+#endif
15354+
15355 .endm
15356
15357-#define ARG_SKIP (9*8)
15358+#define ARG_SKIP ORIG_RAX
15359
15360 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
15361 rstor_r8910=1, rstor_rdx=1
15362+
15363+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15364+ movq_cfi_restore R12, r12
15365+#endif
15366+
15367 .if \rstor_r11
15368- movq_cfi_restore 0*8, r11
15369+ movq_cfi_restore R11, r11
15370 .endif
15371
15372 .if \rstor_r8910
15373- movq_cfi_restore 1*8, r10
15374- movq_cfi_restore 2*8, r9
15375- movq_cfi_restore 3*8, r8
15376+ movq_cfi_restore R10, r10
15377+ movq_cfi_restore R9, r9
15378+ movq_cfi_restore R8, r8
15379 .endif
15380
15381 .if \rstor_rax
15382- movq_cfi_restore 4*8, rax
15383+ movq_cfi_restore RAX, rax
15384 .endif
15385
15386 .if \rstor_rcx
15387- movq_cfi_restore 5*8, rcx
15388+ movq_cfi_restore RCX, rcx
15389 .endif
15390
15391 .if \rstor_rdx
15392- movq_cfi_restore 6*8, rdx
15393+ movq_cfi_restore RDX, rdx
15394 .endif
15395
15396- movq_cfi_restore 7*8, rsi
15397- movq_cfi_restore 8*8, rdi
15398+ movq_cfi_restore RSI, rsi
15399+ movq_cfi_restore RDI, rdi
15400
15401- .if ARG_SKIP+\addskip > 0
15402- addq $ARG_SKIP+\addskip, %rsp
15403- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
15404+ .if ORIG_RAX+\addskip > 0
15405+ addq $ORIG_RAX+\addskip, %rsp
15406+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
15407 .endif
15408 .endm
15409
15410- .macro LOAD_ARGS offset, skiprax=0
15411- movq \offset(%rsp), %r11
15412- movq \offset+8(%rsp), %r10
15413- movq \offset+16(%rsp), %r9
15414- movq \offset+24(%rsp), %r8
15415- movq \offset+40(%rsp), %rcx
15416- movq \offset+48(%rsp), %rdx
15417- movq \offset+56(%rsp), %rsi
15418- movq \offset+64(%rsp), %rdi
15419+ .macro LOAD_ARGS skiprax=0
15420+ movq R11(%rsp), %r11
15421+ movq R10(%rsp), %r10
15422+ movq R9(%rsp), %r9
15423+ movq R8(%rsp), %r8
15424+ movq RCX(%rsp), %rcx
15425+ movq RDX(%rsp), %rdx
15426+ movq RSI(%rsp), %rsi
15427+ movq RDI(%rsp), %rdi
15428 .if \skiprax
15429 .else
15430- movq \offset+72(%rsp), %rax
15431+ movq RAX(%rsp), %rax
15432 .endif
15433 .endm
15434
15435-#define REST_SKIP (6*8)
15436-
15437 .macro SAVE_REST
15438- subq $REST_SKIP, %rsp
15439- CFI_ADJUST_CFA_OFFSET REST_SKIP
15440- movq_cfi rbx, 5*8
15441- movq_cfi rbp, 4*8
15442- movq_cfi r12, 3*8
15443- movq_cfi r13, 2*8
15444- movq_cfi r14, 1*8
15445- movq_cfi r15, 0*8
15446+ movq_cfi rbx, RBX
15447+ movq_cfi rbp, RBP
15448+
15449+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15450+ movq_cfi r12, R12
15451+#endif
15452+
15453+ movq_cfi r13, R13
15454+ movq_cfi r14, R14
15455+ movq_cfi r15, R15
15456 .endm
15457
15458 .macro RESTORE_REST
15459- movq_cfi_restore 0*8, r15
15460- movq_cfi_restore 1*8, r14
15461- movq_cfi_restore 2*8, r13
15462- movq_cfi_restore 3*8, r12
15463- movq_cfi_restore 4*8, rbp
15464- movq_cfi_restore 5*8, rbx
15465- addq $REST_SKIP, %rsp
15466- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
15467+ movq_cfi_restore R15, r15
15468+ movq_cfi_restore R14, r14
15469+ movq_cfi_restore R13, r13
15470+
15471+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15472+ movq_cfi_restore R12, r12
15473+#endif
15474+
15475+ movq_cfi_restore RBP, rbp
15476+ movq_cfi_restore RBX, rbx
15477 .endm
15478
15479 .macro SAVE_ALL
15480diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
15481index f50de69..2b0a458 100644
15482--- a/arch/x86/include/asm/checksum_32.h
15483+++ b/arch/x86/include/asm/checksum_32.h
15484@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
15485 int len, __wsum sum,
15486 int *src_err_ptr, int *dst_err_ptr);
15487
15488+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
15489+ int len, __wsum sum,
15490+ int *src_err_ptr, int *dst_err_ptr);
15491+
15492+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
15493+ int len, __wsum sum,
15494+ int *src_err_ptr, int *dst_err_ptr);
15495+
15496 /*
15497 * Note: when you get a NULL pointer exception here this means someone
15498 * passed in an incorrect kernel address to one of these functions.
15499@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
15500
15501 might_sleep();
15502 stac();
15503- ret = csum_partial_copy_generic((__force void *)src, dst,
15504+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
15505 len, sum, err_ptr, NULL);
15506 clac();
15507
15508@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
15509 might_sleep();
15510 if (access_ok(VERIFY_WRITE, dst, len)) {
15511 stac();
15512- ret = csum_partial_copy_generic(src, (__force void *)dst,
15513+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
15514 len, sum, NULL, err_ptr);
15515 clac();
15516 return ret;
15517diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
15518index d47786a..ce1b05d 100644
15519--- a/arch/x86/include/asm/cmpxchg.h
15520+++ b/arch/x86/include/asm/cmpxchg.h
15521@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
15522 __compiletime_error("Bad argument size for cmpxchg");
15523 extern void __xadd_wrong_size(void)
15524 __compiletime_error("Bad argument size for xadd");
15525+extern void __xadd_check_overflow_wrong_size(void)
15526+ __compiletime_error("Bad argument size for xadd_check_overflow");
15527 extern void __add_wrong_size(void)
15528 __compiletime_error("Bad argument size for add");
15529+extern void __add_check_overflow_wrong_size(void)
15530+ __compiletime_error("Bad argument size for add_check_overflow");
15531
15532 /*
15533 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
15534@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
15535 __ret; \
15536 })
15537
15538+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
15539+ ({ \
15540+ __typeof__ (*(ptr)) __ret = (arg); \
15541+ switch (sizeof(*(ptr))) { \
15542+ case __X86_CASE_L: \
15543+ asm volatile (lock #op "l %0, %1\n" \
15544+ "jno 0f\n" \
15545+ "mov %0,%1\n" \
15546+ "int $4\n0:\n" \
15547+ _ASM_EXTABLE(0b, 0b) \
15548+ : "+r" (__ret), "+m" (*(ptr)) \
15549+ : : "memory", "cc"); \
15550+ break; \
15551+ case __X86_CASE_Q: \
15552+ asm volatile (lock #op "q %q0, %1\n" \
15553+ "jno 0f\n" \
15554+ "mov %0,%1\n" \
15555+ "int $4\n0:\n" \
15556+ _ASM_EXTABLE(0b, 0b) \
15557+ : "+r" (__ret), "+m" (*(ptr)) \
15558+ : : "memory", "cc"); \
15559+ break; \
15560+ default: \
15561+ __ ## op ## _check_overflow_wrong_size(); \
15562+ } \
15563+ __ret; \
15564+ })
15565+
15566 /*
15567 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15568 * Since this is generally used to protect other memory information, we
15569@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
15570 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
15571 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
15572
15573+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
15574+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
15575+
15576 #define __add(ptr, inc, lock) \
15577 ({ \
15578 __typeof__ (*(ptr)) __ret = (inc); \
15579diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
15580index 59c6c40..5e0b22c 100644
15581--- a/arch/x86/include/asm/compat.h
15582+++ b/arch/x86/include/asm/compat.h
15583@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
15584 typedef u32 compat_uint_t;
15585 typedef u32 compat_ulong_t;
15586 typedef u64 __attribute__((aligned(4))) compat_u64;
15587-typedef u32 compat_uptr_t;
15588+typedef u32 __user compat_uptr_t;
15589
15590 struct compat_timespec {
15591 compat_time_t tv_sec;
15592diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
15593index 89270b4..f0abf8e 100644
15594--- a/arch/x86/include/asm/cpufeature.h
15595+++ b/arch/x86/include/asm/cpufeature.h
15596@@ -203,7 +203,7 @@
15597 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
15598 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
15599 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
15600-
15601+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
15602
15603 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
15604 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
15605@@ -211,7 +211,7 @@
15606 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
15607 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
15608 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
15609-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
15610+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
15611 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
15612 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
15613 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
15614@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
15615 #undef cpu_has_centaur_mcr
15616 #define cpu_has_centaur_mcr 0
15617
15618+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
15619 #endif /* CONFIG_X86_64 */
15620
15621 #if __GNUC__ >= 4
15622@@ -405,7 +406,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15623
15624 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
15625 t_warn:
15626- warn_pre_alternatives();
15627+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
15628+ warn_pre_alternatives();
15629 return false;
15630 #endif
15631
15632@@ -425,7 +427,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15633 ".section .discard,\"aw\",@progbits\n"
15634 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15635 ".previous\n"
15636- ".section .altinstr_replacement,\"ax\"\n"
15637+ ".section .altinstr_replacement,\"a\"\n"
15638 "3: movb $1,%0\n"
15639 "4:\n"
15640 ".previous\n"
15641@@ -462,7 +464,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15642 " .byte 2b - 1b\n" /* src len */
15643 " .byte 4f - 3f\n" /* repl len */
15644 ".previous\n"
15645- ".section .altinstr_replacement,\"ax\"\n"
15646+ ".section .altinstr_replacement,\"a\"\n"
15647 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
15648 "4:\n"
15649 ".previous\n"
15650@@ -495,7 +497,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15651 ".section .discard,\"aw\",@progbits\n"
15652 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15653 ".previous\n"
15654- ".section .altinstr_replacement,\"ax\"\n"
15655+ ".section .altinstr_replacement,\"a\"\n"
15656 "3: movb $0,%0\n"
15657 "4:\n"
15658 ".previous\n"
15659@@ -509,7 +511,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15660 ".section .discard,\"aw\",@progbits\n"
15661 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
15662 ".previous\n"
15663- ".section .altinstr_replacement,\"ax\"\n"
15664+ ".section .altinstr_replacement,\"a\"\n"
15665 "5: movb $1,%0\n"
15666 "6:\n"
15667 ".previous\n"
15668diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
15669index b90e5df..b462c91 100644
15670--- a/arch/x86/include/asm/desc.h
15671+++ b/arch/x86/include/asm/desc.h
15672@@ -4,6 +4,7 @@
15673 #include <asm/desc_defs.h>
15674 #include <asm/ldt.h>
15675 #include <asm/mmu.h>
15676+#include <asm/pgtable.h>
15677
15678 #include <linux/smp.h>
15679 #include <linux/percpu.h>
15680@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15681
15682 desc->type = (info->read_exec_only ^ 1) << 1;
15683 desc->type |= info->contents << 2;
15684+ desc->type |= info->seg_not_present ^ 1;
15685
15686 desc->s = 1;
15687 desc->dpl = 0x3;
15688@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15689 }
15690
15691 extern struct desc_ptr idt_descr;
15692-extern gate_desc idt_table[];
15693-extern struct desc_ptr debug_idt_descr;
15694-extern gate_desc debug_idt_table[];
15695-
15696-struct gdt_page {
15697- struct desc_struct gdt[GDT_ENTRIES];
15698-} __attribute__((aligned(PAGE_SIZE)));
15699-
15700-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
15701+extern gate_desc idt_table[IDT_ENTRIES];
15702+extern const struct desc_ptr debug_idt_descr;
15703+extern gate_desc debug_idt_table[IDT_ENTRIES];
15704
15705+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
15706 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
15707 {
15708- return per_cpu(gdt_page, cpu).gdt;
15709+ return cpu_gdt_table[cpu];
15710 }
15711
15712 #ifdef CONFIG_X86_64
15713@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
15714 unsigned long base, unsigned dpl, unsigned flags,
15715 unsigned short seg)
15716 {
15717- gate->a = (seg << 16) | (base & 0xffff);
15718- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
15719+ gate->gate.offset_low = base;
15720+ gate->gate.seg = seg;
15721+ gate->gate.reserved = 0;
15722+ gate->gate.type = type;
15723+ gate->gate.s = 0;
15724+ gate->gate.dpl = dpl;
15725+ gate->gate.p = 1;
15726+ gate->gate.offset_high = base >> 16;
15727 }
15728
15729 #endif
15730@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15731
15732 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
15733 {
15734+ pax_open_kernel();
15735 memcpy(&idt[entry], gate, sizeof(*gate));
15736+ pax_close_kernel();
15737 }
15738
15739 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
15740 {
15741+ pax_open_kernel();
15742 memcpy(&ldt[entry], desc, 8);
15743+ pax_close_kernel();
15744 }
15745
15746 static inline void
15747@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15748 default: size = sizeof(*gdt); break;
15749 }
15750
15751+ pax_open_kernel();
15752 memcpy(&gdt[entry], desc, size);
15753+ pax_close_kernel();
15754 }
15755
15756 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
15757@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
15758
15759 static inline void native_load_tr_desc(void)
15760 {
15761+ pax_open_kernel();
15762 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
15763+ pax_close_kernel();
15764 }
15765
15766 static inline void native_load_gdt(const struct desc_ptr *dtr)
15767@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
15768 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15769 unsigned int i;
15770
15771+ pax_open_kernel();
15772 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
15773 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
15774+ pax_close_kernel();
15775 }
15776
15777 #define _LDT_empty(info) \
15778@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
15779 preempt_enable();
15780 }
15781
15782-static inline unsigned long get_desc_base(const struct desc_struct *desc)
15783+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
15784 {
15785 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
15786 }
15787@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
15788 }
15789
15790 #ifdef CONFIG_X86_64
15791-static inline void set_nmi_gate(int gate, void *addr)
15792+static inline void set_nmi_gate(int gate, const void *addr)
15793 {
15794 gate_desc s;
15795
15796@@ -321,8 +334,8 @@ static inline void set_nmi_gate(int gate, void *addr)
15797 #endif
15798
15799 #ifdef CONFIG_TRACING
15800-extern struct desc_ptr trace_idt_descr;
15801-extern gate_desc trace_idt_table[];
15802+extern const struct desc_ptr trace_idt_descr;
15803+extern gate_desc trace_idt_table[IDT_ENTRIES];
15804 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15805 {
15806 write_idt_entry(trace_idt_table, entry, gate);
15807@@ -333,7 +346,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15808 }
15809 #endif
15810
15811-static inline void _set_gate(int gate, unsigned type, void *addr,
15812+static inline void _set_gate(int gate, unsigned type, const void *addr,
15813 unsigned dpl, unsigned ist, unsigned seg)
15814 {
15815 gate_desc s;
15816@@ -353,7 +366,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
15817 * Pentium F0 0F bugfix can have resulted in the mapped
15818 * IDT being write-protected.
15819 */
15820-static inline void set_intr_gate(unsigned int n, void *addr)
15821+static inline void set_intr_gate(unsigned int n, const void *addr)
15822 {
15823 BUG_ON((unsigned)n > 0xFF);
15824 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
15825@@ -410,19 +423,19 @@ static inline void __alloc_intr_gate(unsigned int n, void *addr)
15826 /*
15827 * This routine sets up an interrupt gate at directory privilege level 3.
15828 */
15829-static inline void set_system_intr_gate(unsigned int n, void *addr)
15830+static inline void set_system_intr_gate(unsigned int n, const void *addr)
15831 {
15832 BUG_ON((unsigned)n > 0xFF);
15833 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15834 }
15835
15836-static inline void set_system_trap_gate(unsigned int n, void *addr)
15837+static inline void set_system_trap_gate(unsigned int n, const void *addr)
15838 {
15839 BUG_ON((unsigned)n > 0xFF);
15840 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15841 }
15842
15843-static inline void set_trap_gate(unsigned int n, void *addr)
15844+static inline void set_trap_gate(unsigned int n, const void *addr)
15845 {
15846 BUG_ON((unsigned)n > 0xFF);
15847 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15848@@ -431,16 +444,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15849 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15850 {
15851 BUG_ON((unsigned)n > 0xFF);
15852- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15853+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15854 }
15855
15856-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15857+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15858 {
15859 BUG_ON((unsigned)n > 0xFF);
15860 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15861 }
15862
15863-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15864+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15865 {
15866 BUG_ON((unsigned)n > 0xFF);
15867 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15868@@ -512,4 +525,17 @@ static inline void load_current_idt(void)
15869 else
15870 load_idt((const struct desc_ptr *)&idt_descr);
15871 }
15872+
15873+#ifdef CONFIG_X86_32
15874+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15875+{
15876+ struct desc_struct d;
15877+
15878+ if (likely(limit))
15879+ limit = (limit - 1UL) >> PAGE_SHIFT;
15880+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
15881+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15882+}
15883+#endif
15884+
15885 #endif /* _ASM_X86_DESC_H */
15886diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15887index 278441f..b95a174 100644
15888--- a/arch/x86/include/asm/desc_defs.h
15889+++ b/arch/x86/include/asm/desc_defs.h
15890@@ -31,6 +31,12 @@ struct desc_struct {
15891 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15892 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15893 };
15894+ struct {
15895+ u16 offset_low;
15896+ u16 seg;
15897+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15898+ unsigned offset_high: 16;
15899+ } gate;
15900 };
15901 } __attribute__((packed));
15902
15903diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15904index ced283a..ffe04cc 100644
15905--- a/arch/x86/include/asm/div64.h
15906+++ b/arch/x86/include/asm/div64.h
15907@@ -39,7 +39,7 @@
15908 __mod; \
15909 })
15910
15911-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15912+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15913 {
15914 union {
15915 u64 v64;
15916diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15917index 9c999c1..3860cb8 100644
15918--- a/arch/x86/include/asm/elf.h
15919+++ b/arch/x86/include/asm/elf.h
15920@@ -243,7 +243,25 @@ extern int force_personality32;
15921 the loader. We need to make sure that it is out of the way of the program
15922 that it will "exec", and that there is sufficient room for the brk. */
15923
15924+#ifdef CONFIG_PAX_SEGMEXEC
15925+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15926+#else
15927 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15928+#endif
15929+
15930+#ifdef CONFIG_PAX_ASLR
15931+#ifdef CONFIG_X86_32
15932+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15933+
15934+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15935+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15936+#else
15937+#define PAX_ELF_ET_DYN_BASE 0x400000UL
15938+
15939+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15940+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15941+#endif
15942+#endif
15943
15944 /* This yields a mask that user programs can use to figure out what
15945 instruction set this CPU supports. This could be done in user space,
15946@@ -296,16 +314,12 @@ do { \
15947
15948 #define ARCH_DLINFO \
15949 do { \
15950- if (vdso_enabled) \
15951- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15952- (unsigned long)current->mm->context.vdso); \
15953+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15954 } while (0)
15955
15956 #define ARCH_DLINFO_X32 \
15957 do { \
15958- if (vdso_enabled) \
15959- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15960- (unsigned long)current->mm->context.vdso); \
15961+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15962 } while (0)
15963
15964 #define AT_SYSINFO 32
15965@@ -320,7 +334,7 @@ else \
15966
15967 #endif /* !CONFIG_X86_32 */
15968
15969-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15970+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15971
15972 #define VDSO_ENTRY \
15973 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15974@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15975 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15976 #define compat_arch_setup_additional_pages syscall32_setup_pages
15977
15978-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15979-#define arch_randomize_brk arch_randomize_brk
15980-
15981 /*
15982 * True on X86_32 or when emulating IA32 on X86_64
15983 */
15984diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15985index 77a99ac..39ff7f5 100644
15986--- a/arch/x86/include/asm/emergency-restart.h
15987+++ b/arch/x86/include/asm/emergency-restart.h
15988@@ -1,6 +1,6 @@
15989 #ifndef _ASM_X86_EMERGENCY_RESTART_H
15990 #define _ASM_X86_EMERGENCY_RESTART_H
15991
15992-extern void machine_emergency_restart(void);
15993+extern void machine_emergency_restart(void) __noreturn;
15994
15995 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
15996diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
15997index 4d0bda7..221da4d 100644
15998--- a/arch/x86/include/asm/fpu-internal.h
15999+++ b/arch/x86/include/asm/fpu-internal.h
16000@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16001 #define user_insn(insn, output, input...) \
16002 ({ \
16003 int err; \
16004+ pax_open_userland(); \
16005 asm volatile(ASM_STAC "\n" \
16006- "1:" #insn "\n\t" \
16007+ "1:" \
16008+ __copyuser_seg \
16009+ #insn "\n\t" \
16010 "2: " ASM_CLAC "\n" \
16011 ".section .fixup,\"ax\"\n" \
16012 "3: movl $-1,%[err]\n" \
16013@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16014 _ASM_EXTABLE(1b, 3b) \
16015 : [err] "=r" (err), output \
16016 : "0"(0), input); \
16017+ pax_close_userland(); \
16018 err; \
16019 })
16020
16021@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16022 "emms\n\t" /* clear stack tags */
16023 "fildl %P[addr]", /* set F?P to defined value */
16024 X86_FEATURE_FXSAVE_LEAK,
16025- [addr] "m" (tsk->thread.fpu.has_fpu));
16026+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16027
16028 return fpu_restore_checking(&tsk->thread.fpu);
16029 }
16030diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16031index be27ba1..04a8801 100644
16032--- a/arch/x86/include/asm/futex.h
16033+++ b/arch/x86/include/asm/futex.h
16034@@ -12,6 +12,7 @@
16035 #include <asm/smap.h>
16036
16037 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16038+ typecheck(u32 __user *, uaddr); \
16039 asm volatile("\t" ASM_STAC "\n" \
16040 "1:\t" insn "\n" \
16041 "2:\t" ASM_CLAC "\n" \
16042@@ -20,15 +21,16 @@
16043 "\tjmp\t2b\n" \
16044 "\t.previous\n" \
16045 _ASM_EXTABLE(1b, 3b) \
16046- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16047+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16048 : "i" (-EFAULT), "0" (oparg), "1" (0))
16049
16050 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16051+ typecheck(u32 __user *, uaddr); \
16052 asm volatile("\t" ASM_STAC "\n" \
16053 "1:\tmovl %2, %0\n" \
16054 "\tmovl\t%0, %3\n" \
16055 "\t" insn "\n" \
16056- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16057+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16058 "\tjnz\t1b\n" \
16059 "3:\t" ASM_CLAC "\n" \
16060 "\t.section .fixup,\"ax\"\n" \
16061@@ -38,7 +40,7 @@
16062 _ASM_EXTABLE(1b, 4b) \
16063 _ASM_EXTABLE(2b, 4b) \
16064 : "=&a" (oldval), "=&r" (ret), \
16065- "+m" (*uaddr), "=&r" (tem) \
16066+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16067 : "r" (oparg), "i" (-EFAULT), "1" (0))
16068
16069 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16070@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16071
16072 pagefault_disable();
16073
16074+ pax_open_userland();
16075 switch (op) {
16076 case FUTEX_OP_SET:
16077- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16078+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16079 break;
16080 case FUTEX_OP_ADD:
16081- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16082+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16083 uaddr, oparg);
16084 break;
16085 case FUTEX_OP_OR:
16086@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16087 default:
16088 ret = -ENOSYS;
16089 }
16090+ pax_close_userland();
16091
16092 pagefault_enable();
16093
16094@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
16095 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
16096 return -EFAULT;
16097
16098+ pax_open_userland();
16099 asm volatile("\t" ASM_STAC "\n"
16100- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
16101+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
16102 "2:\t" ASM_CLAC "\n"
16103 "\t.section .fixup, \"ax\"\n"
16104 "3:\tmov %3, %0\n"
16105 "\tjmp 2b\n"
16106 "\t.previous\n"
16107 _ASM_EXTABLE(1b, 3b)
16108- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
16109+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
16110 : "i" (-EFAULT), "r" (newval), "1" (oldval)
16111 : "memory"
16112 );
16113+ pax_close_userland();
16114
16115 *uval = oldval;
16116 return ret;
16117diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16118index 92b3bae..3866449 100644
16119--- a/arch/x86/include/asm/hw_irq.h
16120+++ b/arch/x86/include/asm/hw_irq.h
16121@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
16122 extern void enable_IO_APIC(void);
16123
16124 /* Statistics */
16125-extern atomic_t irq_err_count;
16126-extern atomic_t irq_mis_count;
16127+extern atomic_unchecked_t irq_err_count;
16128+extern atomic_unchecked_t irq_mis_count;
16129
16130 /* EISA */
16131 extern void eisa_set_level_irq(unsigned int irq);
16132diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16133index a203659..9889f1c 100644
16134--- a/arch/x86/include/asm/i8259.h
16135+++ b/arch/x86/include/asm/i8259.h
16136@@ -62,7 +62,7 @@ struct legacy_pic {
16137 void (*init)(int auto_eoi);
16138 int (*irq_pending)(unsigned int irq);
16139 void (*make_irq)(unsigned int irq);
16140-};
16141+} __do_const;
16142
16143 extern struct legacy_pic *legacy_pic;
16144 extern struct legacy_pic null_legacy_pic;
16145diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16146index 34f69cb..6d95446 100644
16147--- a/arch/x86/include/asm/io.h
16148+++ b/arch/x86/include/asm/io.h
16149@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16150 "m" (*(volatile type __force *)addr) barrier); }
16151
16152 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16153-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16154-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16155+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16156+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16157
16158 build_mmio_read(__readb, "b", unsigned char, "=q", )
16159-build_mmio_read(__readw, "w", unsigned short, "=r", )
16160-build_mmio_read(__readl, "l", unsigned int, "=r", )
16161+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16162+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16163
16164 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16165 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16166@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16167 return ioremap_nocache(offset, size);
16168 }
16169
16170-extern void iounmap(volatile void __iomem *addr);
16171+extern void iounmap(const volatile void __iomem *addr);
16172
16173 extern void set_iounmap_nonlazy(void);
16174
16175@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
16176
16177 #include <linux/vmalloc.h>
16178
16179+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
16180+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
16181+{
16182+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16183+}
16184+
16185+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
16186+{
16187+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16188+}
16189+
16190 /*
16191 * Convert a virtual cached pointer to an uncached pointer
16192 */
16193diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
16194index bba3cf8..06bc8da 100644
16195--- a/arch/x86/include/asm/irqflags.h
16196+++ b/arch/x86/include/asm/irqflags.h
16197@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
16198 sti; \
16199 sysexit
16200
16201+#define GET_CR0_INTO_RDI mov %cr0, %rdi
16202+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
16203+#define GET_CR3_INTO_RDI mov %cr3, %rdi
16204+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
16205+
16206 #else
16207 #define INTERRUPT_RETURN iret
16208 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
16209diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
16210index 9454c16..e4100e3 100644
16211--- a/arch/x86/include/asm/kprobes.h
16212+++ b/arch/x86/include/asm/kprobes.h
16213@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
16214 #define RELATIVEJUMP_SIZE 5
16215 #define RELATIVECALL_OPCODE 0xe8
16216 #define RELATIVE_ADDR_SIZE 4
16217-#define MAX_STACK_SIZE 64
16218-#define MIN_STACK_SIZE(ADDR) \
16219- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
16220- THREAD_SIZE - (unsigned long)(ADDR))) \
16221- ? (MAX_STACK_SIZE) \
16222- : (((unsigned long)current_thread_info()) + \
16223- THREAD_SIZE - (unsigned long)(ADDR)))
16224+#define MAX_STACK_SIZE 64UL
16225+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
16226
16227 #define flush_insn_slot(p) do { } while (0)
16228
16229diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
16230index 2d89e39..baee879 100644
16231--- a/arch/x86/include/asm/local.h
16232+++ b/arch/x86/include/asm/local.h
16233@@ -10,33 +10,97 @@ typedef struct {
16234 atomic_long_t a;
16235 } local_t;
16236
16237+typedef struct {
16238+ atomic_long_unchecked_t a;
16239+} local_unchecked_t;
16240+
16241 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16242
16243 #define local_read(l) atomic_long_read(&(l)->a)
16244+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
16245 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
16246+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
16247
16248 static inline void local_inc(local_t *l)
16249 {
16250- asm volatile(_ASM_INC "%0"
16251+ asm volatile(_ASM_INC "%0\n"
16252+
16253+#ifdef CONFIG_PAX_REFCOUNT
16254+ "jno 0f\n"
16255+ _ASM_DEC "%0\n"
16256+ "int $4\n0:\n"
16257+ _ASM_EXTABLE(0b, 0b)
16258+#endif
16259+
16260+ : "+m" (l->a.counter));
16261+}
16262+
16263+static inline void local_inc_unchecked(local_unchecked_t *l)
16264+{
16265+ asm volatile(_ASM_INC "%0\n"
16266 : "+m" (l->a.counter));
16267 }
16268
16269 static inline void local_dec(local_t *l)
16270 {
16271- asm volatile(_ASM_DEC "%0"
16272+ asm volatile(_ASM_DEC "%0\n"
16273+
16274+#ifdef CONFIG_PAX_REFCOUNT
16275+ "jno 0f\n"
16276+ _ASM_INC "%0\n"
16277+ "int $4\n0:\n"
16278+ _ASM_EXTABLE(0b, 0b)
16279+#endif
16280+
16281+ : "+m" (l->a.counter));
16282+}
16283+
16284+static inline void local_dec_unchecked(local_unchecked_t *l)
16285+{
16286+ asm volatile(_ASM_DEC "%0\n"
16287 : "+m" (l->a.counter));
16288 }
16289
16290 static inline void local_add(long i, local_t *l)
16291 {
16292- asm volatile(_ASM_ADD "%1,%0"
16293+ asm volatile(_ASM_ADD "%1,%0\n"
16294+
16295+#ifdef CONFIG_PAX_REFCOUNT
16296+ "jno 0f\n"
16297+ _ASM_SUB "%1,%0\n"
16298+ "int $4\n0:\n"
16299+ _ASM_EXTABLE(0b, 0b)
16300+#endif
16301+
16302+ : "+m" (l->a.counter)
16303+ : "ir" (i));
16304+}
16305+
16306+static inline void local_add_unchecked(long i, local_unchecked_t *l)
16307+{
16308+ asm volatile(_ASM_ADD "%1,%0\n"
16309 : "+m" (l->a.counter)
16310 : "ir" (i));
16311 }
16312
16313 static inline void local_sub(long i, local_t *l)
16314 {
16315- asm volatile(_ASM_SUB "%1,%0"
16316+ asm volatile(_ASM_SUB "%1,%0\n"
16317+
16318+#ifdef CONFIG_PAX_REFCOUNT
16319+ "jno 0f\n"
16320+ _ASM_ADD "%1,%0\n"
16321+ "int $4\n0:\n"
16322+ _ASM_EXTABLE(0b, 0b)
16323+#endif
16324+
16325+ : "+m" (l->a.counter)
16326+ : "ir" (i));
16327+}
16328+
16329+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
16330+{
16331+ asm volatile(_ASM_SUB "%1,%0\n"
16332 : "+m" (l->a.counter)
16333 : "ir" (i));
16334 }
16335@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
16336 {
16337 unsigned char c;
16338
16339- asm volatile(_ASM_SUB "%2,%0; sete %1"
16340+ asm volatile(_ASM_SUB "%2,%0\n"
16341+
16342+#ifdef CONFIG_PAX_REFCOUNT
16343+ "jno 0f\n"
16344+ _ASM_ADD "%2,%0\n"
16345+ "int $4\n0:\n"
16346+ _ASM_EXTABLE(0b, 0b)
16347+#endif
16348+
16349+ "sete %1\n"
16350 : "+m" (l->a.counter), "=qm" (c)
16351 : "ir" (i) : "memory");
16352 return c;
16353@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
16354 {
16355 unsigned char c;
16356
16357- asm volatile(_ASM_DEC "%0; sete %1"
16358+ asm volatile(_ASM_DEC "%0\n"
16359+
16360+#ifdef CONFIG_PAX_REFCOUNT
16361+ "jno 0f\n"
16362+ _ASM_INC "%0\n"
16363+ "int $4\n0:\n"
16364+ _ASM_EXTABLE(0b, 0b)
16365+#endif
16366+
16367+ "sete %1\n"
16368 : "+m" (l->a.counter), "=qm" (c)
16369 : : "memory");
16370 return c != 0;
16371@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
16372 {
16373 unsigned char c;
16374
16375- asm volatile(_ASM_INC "%0; sete %1"
16376+ asm volatile(_ASM_INC "%0\n"
16377+
16378+#ifdef CONFIG_PAX_REFCOUNT
16379+ "jno 0f\n"
16380+ _ASM_DEC "%0\n"
16381+ "int $4\n0:\n"
16382+ _ASM_EXTABLE(0b, 0b)
16383+#endif
16384+
16385+ "sete %1\n"
16386 : "+m" (l->a.counter), "=qm" (c)
16387 : : "memory");
16388 return c != 0;
16389@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
16390 {
16391 unsigned char c;
16392
16393- asm volatile(_ASM_ADD "%2,%0; sets %1"
16394+ asm volatile(_ASM_ADD "%2,%0\n"
16395+
16396+#ifdef CONFIG_PAX_REFCOUNT
16397+ "jno 0f\n"
16398+ _ASM_SUB "%2,%0\n"
16399+ "int $4\n0:\n"
16400+ _ASM_EXTABLE(0b, 0b)
16401+#endif
16402+
16403+ "sets %1\n"
16404 : "+m" (l->a.counter), "=qm" (c)
16405 : "ir" (i) : "memory");
16406 return c;
16407@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
16408 static inline long local_add_return(long i, local_t *l)
16409 {
16410 long __i = i;
16411+ asm volatile(_ASM_XADD "%0, %1\n"
16412+
16413+#ifdef CONFIG_PAX_REFCOUNT
16414+ "jno 0f\n"
16415+ _ASM_MOV "%0,%1\n"
16416+ "int $4\n0:\n"
16417+ _ASM_EXTABLE(0b, 0b)
16418+#endif
16419+
16420+ : "+r" (i), "+m" (l->a.counter)
16421+ : : "memory");
16422+ return i + __i;
16423+}
16424+
16425+/**
16426+ * local_add_return_unchecked - add and return
16427+ * @i: integer value to add
16428+ * @l: pointer to type local_unchecked_t
16429+ *
16430+ * Atomically adds @i to @l and returns @i + @l
16431+ */
16432+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
16433+{
16434+ long __i = i;
16435 asm volatile(_ASM_XADD "%0, %1;"
16436 : "+r" (i), "+m" (l->a.counter)
16437 : : "memory");
16438@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
16439
16440 #define local_cmpxchg(l, o, n) \
16441 (cmpxchg_local(&((l)->a.counter), (o), (n)))
16442+#define local_cmpxchg_unchecked(l, o, n) \
16443+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
16444 /* Always has a lock prefix */
16445 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
16446
16447diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
16448new file mode 100644
16449index 0000000..2bfd3ba
16450--- /dev/null
16451+++ b/arch/x86/include/asm/mman.h
16452@@ -0,0 +1,15 @@
16453+#ifndef _X86_MMAN_H
16454+#define _X86_MMAN_H
16455+
16456+#include <uapi/asm/mman.h>
16457+
16458+#ifdef __KERNEL__
16459+#ifndef __ASSEMBLY__
16460+#ifdef CONFIG_X86_32
16461+#define arch_mmap_check i386_mmap_check
16462+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
16463+#endif
16464+#endif
16465+#endif
16466+
16467+#endif /* X86_MMAN_H */
16468diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
16469index 5f55e69..e20bfb1 100644
16470--- a/arch/x86/include/asm/mmu.h
16471+++ b/arch/x86/include/asm/mmu.h
16472@@ -9,7 +9,7 @@
16473 * we put the segment information here.
16474 */
16475 typedef struct {
16476- void *ldt;
16477+ struct desc_struct *ldt;
16478 int size;
16479
16480 #ifdef CONFIG_X86_64
16481@@ -18,7 +18,19 @@ typedef struct {
16482 #endif
16483
16484 struct mutex lock;
16485- void *vdso;
16486+ unsigned long vdso;
16487+
16488+#ifdef CONFIG_X86_32
16489+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16490+ unsigned long user_cs_base;
16491+ unsigned long user_cs_limit;
16492+
16493+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16494+ cpumask_t cpu_user_cs_mask;
16495+#endif
16496+
16497+#endif
16498+#endif
16499 } mm_context_t;
16500
16501 #ifdef CONFIG_SMP
16502diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
16503index be12c53..4d24039 100644
16504--- a/arch/x86/include/asm/mmu_context.h
16505+++ b/arch/x86/include/asm/mmu_context.h
16506@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
16507
16508 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
16509 {
16510+
16511+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16512+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
16513+ unsigned int i;
16514+ pgd_t *pgd;
16515+
16516+ pax_open_kernel();
16517+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
16518+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
16519+ set_pgd_batched(pgd+i, native_make_pgd(0));
16520+ pax_close_kernel();
16521+ }
16522+#endif
16523+
16524 #ifdef CONFIG_SMP
16525 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
16526 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
16527@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16528 struct task_struct *tsk)
16529 {
16530 unsigned cpu = smp_processor_id();
16531+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16532+ int tlbstate = TLBSTATE_OK;
16533+#endif
16534
16535 if (likely(prev != next)) {
16536 #ifdef CONFIG_SMP
16537+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16538+ tlbstate = this_cpu_read(cpu_tlbstate.state);
16539+#endif
16540 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16541 this_cpu_write(cpu_tlbstate.active_mm, next);
16542 #endif
16543 cpumask_set_cpu(cpu, mm_cpumask(next));
16544
16545 /* Re-load page tables */
16546+#ifdef CONFIG_PAX_PER_CPU_PGD
16547+ pax_open_kernel();
16548+
16549+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16550+ if (static_cpu_has(X86_FEATURE_PCID))
16551+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16552+ else
16553+#endif
16554+
16555+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16556+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16557+ pax_close_kernel();
16558+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16559+
16560+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16561+ if (static_cpu_has(X86_FEATURE_PCID)) {
16562+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16563+ u64 descriptor[2];
16564+ descriptor[0] = PCID_USER;
16565+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16566+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16567+ descriptor[0] = PCID_KERNEL;
16568+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16569+ }
16570+ } else {
16571+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16572+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16573+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16574+ else
16575+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16576+ }
16577+ } else
16578+#endif
16579+
16580+ load_cr3(get_cpu_pgd(cpu, kernel));
16581+#else
16582 load_cr3(next->pgd);
16583+#endif
16584
16585 /* Stop flush ipis for the previous mm */
16586 cpumask_clear_cpu(cpu, mm_cpumask(prev));
16587@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16588 /* Load the LDT, if the LDT is different: */
16589 if (unlikely(prev->context.ldt != next->context.ldt))
16590 load_LDT_nolock(&next->context);
16591+
16592+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16593+ if (!(__supported_pte_mask & _PAGE_NX)) {
16594+ smp_mb__before_clear_bit();
16595+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
16596+ smp_mb__after_clear_bit();
16597+ cpu_set(cpu, next->context.cpu_user_cs_mask);
16598+ }
16599+#endif
16600+
16601+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16602+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
16603+ prev->context.user_cs_limit != next->context.user_cs_limit))
16604+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16605+#ifdef CONFIG_SMP
16606+ else if (unlikely(tlbstate != TLBSTATE_OK))
16607+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16608+#endif
16609+#endif
16610+
16611 }
16612+ else {
16613+
16614+#ifdef CONFIG_PAX_PER_CPU_PGD
16615+ pax_open_kernel();
16616+
16617+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16618+ if (static_cpu_has(X86_FEATURE_PCID))
16619+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16620+ else
16621+#endif
16622+
16623+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16624+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16625+ pax_close_kernel();
16626+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16627+
16628+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16629+ if (static_cpu_has(X86_FEATURE_PCID)) {
16630+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16631+ u64 descriptor[2];
16632+ descriptor[0] = PCID_USER;
16633+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16634+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16635+ descriptor[0] = PCID_KERNEL;
16636+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16637+ }
16638+ } else {
16639+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16640+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16641+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16642+ else
16643+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16644+ }
16645+ } else
16646+#endif
16647+
16648+ load_cr3(get_cpu_pgd(cpu, kernel));
16649+#endif
16650+
16651 #ifdef CONFIG_SMP
16652- else {
16653 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16654 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
16655
16656@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16657 * tlb flush IPI delivery. We must reload CR3
16658 * to make sure to use no freed page tables.
16659 */
16660+
16661+#ifndef CONFIG_PAX_PER_CPU_PGD
16662 load_cr3(next->pgd);
16663+#endif
16664+
16665 load_LDT_nolock(&next->context);
16666+
16667+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16668+ if (!(__supported_pte_mask & _PAGE_NX))
16669+ cpu_set(cpu, next->context.cpu_user_cs_mask);
16670+#endif
16671+
16672+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16673+#ifdef CONFIG_PAX_PAGEEXEC
16674+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
16675+#endif
16676+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16677+#endif
16678+
16679 }
16680+#endif
16681 }
16682-#endif
16683 }
16684
16685 #define activate_mm(prev, next) \
16686diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
16687index e3b7819..b257c64 100644
16688--- a/arch/x86/include/asm/module.h
16689+++ b/arch/x86/include/asm/module.h
16690@@ -5,6 +5,7 @@
16691
16692 #ifdef CONFIG_X86_64
16693 /* X86_64 does not define MODULE_PROC_FAMILY */
16694+#define MODULE_PROC_FAMILY ""
16695 #elif defined CONFIG_M486
16696 #define MODULE_PROC_FAMILY "486 "
16697 #elif defined CONFIG_M586
16698@@ -57,8 +58,20 @@
16699 #error unknown processor family
16700 #endif
16701
16702-#ifdef CONFIG_X86_32
16703-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
16704+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
16705+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
16706+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
16707+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
16708+#else
16709+#define MODULE_PAX_KERNEXEC ""
16710 #endif
16711
16712+#ifdef CONFIG_PAX_MEMORY_UDEREF
16713+#define MODULE_PAX_UDEREF "UDEREF "
16714+#else
16715+#define MODULE_PAX_UDEREF ""
16716+#endif
16717+
16718+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
16719+
16720 #endif /* _ASM_X86_MODULE_H */
16721diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
16722index 86f9301..b365cda 100644
16723--- a/arch/x86/include/asm/nmi.h
16724+++ b/arch/x86/include/asm/nmi.h
16725@@ -40,11 +40,11 @@ struct nmiaction {
16726 nmi_handler_t handler;
16727 unsigned long flags;
16728 const char *name;
16729-};
16730+} __do_const;
16731
16732 #define register_nmi_handler(t, fn, fg, n, init...) \
16733 ({ \
16734- static struct nmiaction init fn##_na = { \
16735+ static const struct nmiaction init fn##_na = { \
16736 .handler = (fn), \
16737 .name = (n), \
16738 .flags = (fg), \
16739@@ -52,7 +52,7 @@ struct nmiaction {
16740 __register_nmi_handler((t), &fn##_na); \
16741 })
16742
16743-int __register_nmi_handler(unsigned int, struct nmiaction *);
16744+int __register_nmi_handler(unsigned int, const struct nmiaction *);
16745
16746 void unregister_nmi_handler(unsigned int, const char *);
16747
16748diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
16749index c878924..21f4889 100644
16750--- a/arch/x86/include/asm/page.h
16751+++ b/arch/x86/include/asm/page.h
16752@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
16753 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
16754
16755 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
16756+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
16757
16758 #define __boot_va(x) __va(x)
16759 #define __boot_pa(x) __pa(x)
16760diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
16761index 0f1ddee..e2fc3d1 100644
16762--- a/arch/x86/include/asm/page_64.h
16763+++ b/arch/x86/include/asm/page_64.h
16764@@ -7,9 +7,9 @@
16765
16766 /* duplicated to the one in bootmem.h */
16767 extern unsigned long max_pfn;
16768-extern unsigned long phys_base;
16769+extern const unsigned long phys_base;
16770
16771-static inline unsigned long __phys_addr_nodebug(unsigned long x)
16772+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
16773 {
16774 unsigned long y = x - __START_KERNEL_map;
16775
16776diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
16777index 401f350..dee5d13 100644
16778--- a/arch/x86/include/asm/paravirt.h
16779+++ b/arch/x86/include/asm/paravirt.h
16780@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
16781 return (pmd_t) { ret };
16782 }
16783
16784-static inline pmdval_t pmd_val(pmd_t pmd)
16785+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
16786 {
16787 pmdval_t ret;
16788
16789@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
16790 val);
16791 }
16792
16793+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16794+{
16795+ pgdval_t val = native_pgd_val(pgd);
16796+
16797+ if (sizeof(pgdval_t) > sizeof(long))
16798+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
16799+ val, (u64)val >> 32);
16800+ else
16801+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
16802+ val);
16803+}
16804+
16805 static inline void pgd_clear(pgd_t *pgdp)
16806 {
16807 set_pgd(pgdp, __pgd(0));
16808@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
16809 pv_mmu_ops.set_fixmap(idx, phys, flags);
16810 }
16811
16812+#ifdef CONFIG_PAX_KERNEXEC
16813+static inline unsigned long pax_open_kernel(void)
16814+{
16815+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
16816+}
16817+
16818+static inline unsigned long pax_close_kernel(void)
16819+{
16820+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
16821+}
16822+#else
16823+static inline unsigned long pax_open_kernel(void) { return 0; }
16824+static inline unsigned long pax_close_kernel(void) { return 0; }
16825+#endif
16826+
16827 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
16828
16829 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
16830@@ -906,7 +933,7 @@ extern void default_banner(void);
16831
16832 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
16833 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
16834-#define PARA_INDIRECT(addr) *%cs:addr
16835+#define PARA_INDIRECT(addr) *%ss:addr
16836 #endif
16837
16838 #define INTERRUPT_RETURN \
16839@@ -981,6 +1008,21 @@ extern void default_banner(void);
16840 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16841 CLBR_NONE, \
16842 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16843+
16844+#define GET_CR0_INTO_RDI \
16845+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16846+ mov %rax,%rdi
16847+
16848+#define SET_RDI_INTO_CR0 \
16849+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16850+
16851+#define GET_CR3_INTO_RDI \
16852+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16853+ mov %rax,%rdi
16854+
16855+#define SET_RDI_INTO_CR3 \
16856+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16857+
16858 #endif /* CONFIG_X86_32 */
16859
16860 #endif /* __ASSEMBLY__ */
16861diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16862index aab8f67..2531748 100644
16863--- a/arch/x86/include/asm/paravirt_types.h
16864+++ b/arch/x86/include/asm/paravirt_types.h
16865@@ -84,7 +84,7 @@ struct pv_init_ops {
16866 */
16867 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16868 unsigned long addr, unsigned len);
16869-};
16870+} __no_const;
16871
16872
16873 struct pv_lazy_ops {
16874@@ -98,7 +98,7 @@ struct pv_time_ops {
16875 unsigned long long (*sched_clock)(void);
16876 unsigned long long (*steal_clock)(int cpu);
16877 unsigned long (*get_tsc_khz)(void);
16878-};
16879+} __no_const;
16880
16881 struct pv_cpu_ops {
16882 /* hooks for various privileged instructions */
16883@@ -192,7 +192,7 @@ struct pv_cpu_ops {
16884
16885 void (*start_context_switch)(struct task_struct *prev);
16886 void (*end_context_switch)(struct task_struct *next);
16887-};
16888+} __no_const;
16889
16890 struct pv_irq_ops {
16891 /*
16892@@ -223,7 +223,7 @@ struct pv_apic_ops {
16893 unsigned long start_eip,
16894 unsigned long start_esp);
16895 #endif
16896-};
16897+} __no_const;
16898
16899 struct pv_mmu_ops {
16900 unsigned long (*read_cr2)(void);
16901@@ -313,6 +313,7 @@ struct pv_mmu_ops {
16902 struct paravirt_callee_save make_pud;
16903
16904 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16905+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16906 #endif /* PAGETABLE_LEVELS == 4 */
16907 #endif /* PAGETABLE_LEVELS >= 3 */
16908
16909@@ -324,6 +325,12 @@ struct pv_mmu_ops {
16910 an mfn. We can tell which is which from the index. */
16911 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16912 phys_addr_t phys, pgprot_t flags);
16913+
16914+#ifdef CONFIG_PAX_KERNEXEC
16915+ unsigned long (*pax_open_kernel)(void);
16916+ unsigned long (*pax_close_kernel)(void);
16917+#endif
16918+
16919 };
16920
16921 struct arch_spinlock;
16922diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16923index b4389a4..7024269 100644
16924--- a/arch/x86/include/asm/pgalloc.h
16925+++ b/arch/x86/include/asm/pgalloc.h
16926@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16927 pmd_t *pmd, pte_t *pte)
16928 {
16929 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16930+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16931+}
16932+
16933+static inline void pmd_populate_user(struct mm_struct *mm,
16934+ pmd_t *pmd, pte_t *pte)
16935+{
16936+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16937 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16938 }
16939
16940@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16941
16942 #ifdef CONFIG_X86_PAE
16943 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16944+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16945+{
16946+ pud_populate(mm, pudp, pmd);
16947+}
16948 #else /* !CONFIG_X86_PAE */
16949 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16950 {
16951 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16952 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16953 }
16954+
16955+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16956+{
16957+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16958+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
16959+}
16960 #endif /* CONFIG_X86_PAE */
16961
16962 #if PAGETABLE_LEVELS > 3
16963@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16964 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
16965 }
16966
16967+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16968+{
16969+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
16970+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
16971+}
16972+
16973 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
16974 {
16975 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
16976diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
16977index 3bf2dd0..23d2a9f 100644
16978--- a/arch/x86/include/asm/pgtable-2level.h
16979+++ b/arch/x86/include/asm/pgtable-2level.h
16980@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
16981
16982 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16983 {
16984+ pax_open_kernel();
16985 *pmdp = pmd;
16986+ pax_close_kernel();
16987 }
16988
16989 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16990diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
16991index 81bb91b..9392125 100644
16992--- a/arch/x86/include/asm/pgtable-3level.h
16993+++ b/arch/x86/include/asm/pgtable-3level.h
16994@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16995
16996 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16997 {
16998+ pax_open_kernel();
16999 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17000+ pax_close_kernel();
17001 }
17002
17003 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17004 {
17005+ pax_open_kernel();
17006 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17007+ pax_close_kernel();
17008 }
17009
17010 /*
17011diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17012index 3d19994..732a48c 100644
17013--- a/arch/x86/include/asm/pgtable.h
17014+++ b/arch/x86/include/asm/pgtable.h
17015@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17016
17017 #ifndef __PAGETABLE_PUD_FOLDED
17018 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17019+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17020 #define pgd_clear(pgd) native_pgd_clear(pgd)
17021 #endif
17022
17023@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17024
17025 #define arch_end_context_switch(prev) do {} while(0)
17026
17027+#define pax_open_kernel() native_pax_open_kernel()
17028+#define pax_close_kernel() native_pax_close_kernel()
17029 #endif /* CONFIG_PARAVIRT */
17030
17031+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17032+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17033+
17034+#ifdef CONFIG_PAX_KERNEXEC
17035+static inline unsigned long native_pax_open_kernel(void)
17036+{
17037+ unsigned long cr0;
17038+
17039+ preempt_disable();
17040+ barrier();
17041+ cr0 = read_cr0() ^ X86_CR0_WP;
17042+ BUG_ON(cr0 & X86_CR0_WP);
17043+ write_cr0(cr0);
17044+ return cr0 ^ X86_CR0_WP;
17045+}
17046+
17047+static inline unsigned long native_pax_close_kernel(void)
17048+{
17049+ unsigned long cr0;
17050+
17051+ cr0 = read_cr0() ^ X86_CR0_WP;
17052+ BUG_ON(!(cr0 & X86_CR0_WP));
17053+ write_cr0(cr0);
17054+ barrier();
17055+ preempt_enable_no_resched();
17056+ return cr0 ^ X86_CR0_WP;
17057+}
17058+#else
17059+static inline unsigned long native_pax_open_kernel(void) { return 0; }
17060+static inline unsigned long native_pax_close_kernel(void) { return 0; }
17061+#endif
17062+
17063 /*
17064 * The following only work if pte_present() is true.
17065 * Undefined behaviour if not..
17066 */
17067+static inline int pte_user(pte_t pte)
17068+{
17069+ return pte_val(pte) & _PAGE_USER;
17070+}
17071+
17072 static inline int pte_dirty(pte_t pte)
17073 {
17074 return pte_flags(pte) & _PAGE_DIRTY;
17075@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17076 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17077 }
17078
17079+static inline unsigned long pgd_pfn(pgd_t pgd)
17080+{
17081+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17082+}
17083+
17084 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17085
17086 static inline int pmd_large(pmd_t pte)
17087@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17088 return pte_clear_flags(pte, _PAGE_RW);
17089 }
17090
17091+static inline pte_t pte_mkread(pte_t pte)
17092+{
17093+ return __pte(pte_val(pte) | _PAGE_USER);
17094+}
17095+
17096 static inline pte_t pte_mkexec(pte_t pte)
17097 {
17098- return pte_clear_flags(pte, _PAGE_NX);
17099+#ifdef CONFIG_X86_PAE
17100+ if (__supported_pte_mask & _PAGE_NX)
17101+ return pte_clear_flags(pte, _PAGE_NX);
17102+ else
17103+#endif
17104+ return pte_set_flags(pte, _PAGE_USER);
17105+}
17106+
17107+static inline pte_t pte_exprotect(pte_t pte)
17108+{
17109+#ifdef CONFIG_X86_PAE
17110+ if (__supported_pte_mask & _PAGE_NX)
17111+ return pte_set_flags(pte, _PAGE_NX);
17112+ else
17113+#endif
17114+ return pte_clear_flags(pte, _PAGE_USER);
17115 }
17116
17117 static inline pte_t pte_mkdirty(pte_t pte)
17118@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
17119 #endif
17120
17121 #ifndef __ASSEMBLY__
17122+
17123+#ifdef CONFIG_PAX_PER_CPU_PGD
17124+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
17125+enum cpu_pgd_type {kernel = 0, user = 1};
17126+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
17127+{
17128+ return cpu_pgd[cpu][type];
17129+}
17130+#endif
17131+
17132 #include <linux/mm_types.h>
17133 #include <linux/mmdebug.h>
17134 #include <linux/log2.h>
17135@@ -563,7 +638,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
17136 * Currently stuck as a macro due to indirect forward reference to
17137 * linux/mmzone.h's __section_mem_map_addr() definition:
17138 */
17139-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
17140+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
17141
17142 /* Find an entry in the second-level page table.. */
17143 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
17144@@ -603,7 +678,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
17145 * Currently stuck as a macro due to indirect forward reference to
17146 * linux/mmzone.h's __section_mem_map_addr() definition:
17147 */
17148-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
17149+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
17150
17151 /* to find an entry in a page-table-directory. */
17152 static inline unsigned long pud_index(unsigned long address)
17153@@ -618,7 +693,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
17154
17155 static inline int pgd_bad(pgd_t pgd)
17156 {
17157- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
17158+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
17159 }
17160
17161 static inline int pgd_none(pgd_t pgd)
17162@@ -641,7 +716,12 @@ static inline int pgd_none(pgd_t pgd)
17163 * pgd_offset() returns a (pgd_t *)
17164 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
17165 */
17166-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
17167+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
17168+
17169+#ifdef CONFIG_PAX_PER_CPU_PGD
17170+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
17171+#endif
17172+
17173 /*
17174 * a shortcut which implies the use of the kernel's pgd, instead
17175 * of a process's
17176@@ -652,6 +732,23 @@ static inline int pgd_none(pgd_t pgd)
17177 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
17178 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
17179
17180+#ifdef CONFIG_X86_32
17181+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
17182+#else
17183+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
17184+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
17185+
17186+#ifdef CONFIG_PAX_MEMORY_UDEREF
17187+#ifdef __ASSEMBLY__
17188+#define pax_user_shadow_base pax_user_shadow_base(%rip)
17189+#else
17190+extern unsigned long pax_user_shadow_base;
17191+extern pgdval_t clone_pgd_mask;
17192+#endif
17193+#endif
17194+
17195+#endif
17196+
17197 #ifndef __ASSEMBLY__
17198
17199 extern int direct_gbpages;
17200@@ -818,11 +915,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
17201 * dst and src can be on the same page, but the range must not overlap,
17202 * and must not cross a page boundary.
17203 */
17204-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
17205+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
17206 {
17207- memcpy(dst, src, count * sizeof(pgd_t));
17208+ pax_open_kernel();
17209+ while (count--)
17210+ *dst++ = *src++;
17211+ pax_close_kernel();
17212 }
17213
17214+#ifdef CONFIG_PAX_PER_CPU_PGD
17215+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
17216+#endif
17217+
17218+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17219+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
17220+#else
17221+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
17222+#endif
17223+
17224 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
17225 static inline int page_level_shift(enum pg_level level)
17226 {
17227diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
17228index 9ee3221..b979c6b 100644
17229--- a/arch/x86/include/asm/pgtable_32.h
17230+++ b/arch/x86/include/asm/pgtable_32.h
17231@@ -25,9 +25,6 @@
17232 struct mm_struct;
17233 struct vm_area_struct;
17234
17235-extern pgd_t swapper_pg_dir[1024];
17236-extern pgd_t initial_page_table[1024];
17237-
17238 static inline void pgtable_cache_init(void) { }
17239 static inline void check_pgt_cache(void) { }
17240 void paging_init(void);
17241@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17242 # include <asm/pgtable-2level.h>
17243 #endif
17244
17245+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
17246+extern pgd_t initial_page_table[PTRS_PER_PGD];
17247+#ifdef CONFIG_X86_PAE
17248+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
17249+#endif
17250+
17251 #if defined(CONFIG_HIGHPTE)
17252 #define pte_offset_map(dir, address) \
17253 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
17254@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17255 /* Clear a kernel PTE and flush it from the TLB */
17256 #define kpte_clear_flush(ptep, vaddr) \
17257 do { \
17258+ pax_open_kernel(); \
17259 pte_clear(&init_mm, (vaddr), (ptep)); \
17260+ pax_close_kernel(); \
17261 __flush_tlb_one((vaddr)); \
17262 } while (0)
17263
17264 #endif /* !__ASSEMBLY__ */
17265
17266+#define HAVE_ARCH_UNMAPPED_AREA
17267+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17268+
17269 /*
17270 * kern_addr_valid() is (1) for FLATMEM and (0) for
17271 * SPARSEMEM and DISCONTIGMEM
17272diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
17273index ed5903b..c7fe163 100644
17274--- a/arch/x86/include/asm/pgtable_32_types.h
17275+++ b/arch/x86/include/asm/pgtable_32_types.h
17276@@ -8,7 +8,7 @@
17277 */
17278 #ifdef CONFIG_X86_PAE
17279 # include <asm/pgtable-3level_types.h>
17280-# define PMD_SIZE (1UL << PMD_SHIFT)
17281+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
17282 # define PMD_MASK (~(PMD_SIZE - 1))
17283 #else
17284 # include <asm/pgtable-2level_types.h>
17285@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
17286 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
17287 #endif
17288
17289+#ifdef CONFIG_PAX_KERNEXEC
17290+#ifndef __ASSEMBLY__
17291+extern unsigned char MODULES_EXEC_VADDR[];
17292+extern unsigned char MODULES_EXEC_END[];
17293+#endif
17294+#include <asm/boot.h>
17295+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
17296+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
17297+#else
17298+#define ktla_ktva(addr) (addr)
17299+#define ktva_ktla(addr) (addr)
17300+#endif
17301+
17302 #define MODULES_VADDR VMALLOC_START
17303 #define MODULES_END VMALLOC_END
17304 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
17305diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
17306index e22c1db..23a625a 100644
17307--- a/arch/x86/include/asm/pgtable_64.h
17308+++ b/arch/x86/include/asm/pgtable_64.h
17309@@ -16,10 +16,14 @@
17310
17311 extern pud_t level3_kernel_pgt[512];
17312 extern pud_t level3_ident_pgt[512];
17313+extern pud_t level3_vmalloc_start_pgt[512];
17314+extern pud_t level3_vmalloc_end_pgt[512];
17315+extern pud_t level3_vmemmap_pgt[512];
17316+extern pud_t level2_vmemmap_pgt[512];
17317 extern pmd_t level2_kernel_pgt[512];
17318 extern pmd_t level2_fixmap_pgt[512];
17319-extern pmd_t level2_ident_pgt[512];
17320-extern pgd_t init_level4_pgt[];
17321+extern pmd_t level2_ident_pgt[512*2];
17322+extern pgd_t init_level4_pgt[512];
17323
17324 #define swapper_pg_dir init_level4_pgt
17325
17326@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17327
17328 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17329 {
17330+ pax_open_kernel();
17331 *pmdp = pmd;
17332+ pax_close_kernel();
17333 }
17334
17335 static inline void native_pmd_clear(pmd_t *pmd)
17336@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
17337
17338 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17339 {
17340+ pax_open_kernel();
17341 *pudp = pud;
17342+ pax_close_kernel();
17343 }
17344
17345 static inline void native_pud_clear(pud_t *pud)
17346@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
17347
17348 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
17349 {
17350+ pax_open_kernel();
17351+ *pgdp = pgd;
17352+ pax_close_kernel();
17353+}
17354+
17355+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17356+{
17357 *pgdp = pgd;
17358 }
17359
17360diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
17361index 2d88344..4679fc3 100644
17362--- a/arch/x86/include/asm/pgtable_64_types.h
17363+++ b/arch/x86/include/asm/pgtable_64_types.h
17364@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
17365 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
17366 #define MODULES_END _AC(0xffffffffff000000, UL)
17367 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
17368+#define MODULES_EXEC_VADDR MODULES_VADDR
17369+#define MODULES_EXEC_END MODULES_END
17370+
17371+#define ktla_ktva(addr) (addr)
17372+#define ktva_ktla(addr) (addr)
17373
17374 #define EARLY_DYNAMIC_PAGE_TABLES 64
17375
17376diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
17377index 0ecac25..306c276 100644
17378--- a/arch/x86/include/asm/pgtable_types.h
17379+++ b/arch/x86/include/asm/pgtable_types.h
17380@@ -16,13 +16,12 @@
17381 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17382 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17383 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17384-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17385+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
17386 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
17387 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
17388 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
17389-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
17390-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
17391-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
17392+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
17393+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
17394 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
17395
17396 /* If _PAGE_BIT_PRESENT is clear, we use these: */
17397@@ -40,7 +39,6 @@
17398 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
17399 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
17400 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
17401-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
17402 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
17403 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
17404 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
17405@@ -87,8 +85,10 @@
17406
17407 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
17408 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
17409-#else
17410+#elif defined(CONFIG_KMEMCHECK)
17411 #define _PAGE_NX (_AT(pteval_t, 0))
17412+#else
17413+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
17414 #endif
17415
17416 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
17417@@ -146,6 +146,9 @@
17418 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
17419 _PAGE_ACCESSED)
17420
17421+#define PAGE_READONLY_NOEXEC PAGE_READONLY
17422+#define PAGE_SHARED_NOEXEC PAGE_SHARED
17423+
17424 #define __PAGE_KERNEL_EXEC \
17425 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
17426 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
17427@@ -156,7 +159,7 @@
17428 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
17429 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
17430 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
17431-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
17432+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
17433 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
17434 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
17435 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
17436@@ -218,8 +221,8 @@
17437 * bits are combined, this will alow user to access the high address mapped
17438 * VDSO in the presence of CONFIG_COMPAT_VDSO
17439 */
17440-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
17441-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
17442+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17443+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17444 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
17445 #endif
17446
17447@@ -257,7 +260,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
17448 {
17449 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
17450 }
17451+#endif
17452
17453+#if PAGETABLE_LEVELS == 3
17454+#include <asm-generic/pgtable-nopud.h>
17455+#endif
17456+
17457+#if PAGETABLE_LEVELS == 2
17458+#include <asm-generic/pgtable-nopmd.h>
17459+#endif
17460+
17461+#ifndef __ASSEMBLY__
17462 #if PAGETABLE_LEVELS > 3
17463 typedef struct { pudval_t pud; } pud_t;
17464
17465@@ -271,8 +284,6 @@ static inline pudval_t native_pud_val(pud_t pud)
17466 return pud.pud;
17467 }
17468 #else
17469-#include <asm-generic/pgtable-nopud.h>
17470-
17471 static inline pudval_t native_pud_val(pud_t pud)
17472 {
17473 return native_pgd_val(pud.pgd);
17474@@ -292,8 +303,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
17475 return pmd.pmd;
17476 }
17477 #else
17478-#include <asm-generic/pgtable-nopmd.h>
17479-
17480 static inline pmdval_t native_pmd_val(pmd_t pmd)
17481 {
17482 return native_pgd_val(pmd.pud.pgd);
17483@@ -333,7 +342,6 @@ typedef struct page *pgtable_t;
17484
17485 extern pteval_t __supported_pte_mask;
17486 extern void set_nx(void);
17487-extern int nx_enabled;
17488
17489 #define pgprot_writecombine pgprot_writecombine
17490 extern pgprot_t pgprot_writecombine(pgprot_t prot);
17491diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
17492index 987c75e..2723054 100644
17493--- a/arch/x86/include/asm/processor.h
17494+++ b/arch/x86/include/asm/processor.h
17495@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
17496 : "memory");
17497 }
17498
17499+/* invpcid (%rdx),%rax */
17500+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
17501+
17502+#define INVPCID_SINGLE_ADDRESS 0UL
17503+#define INVPCID_SINGLE_CONTEXT 1UL
17504+#define INVPCID_ALL_GLOBAL 2UL
17505+#define INVPCID_ALL_MONGLOBAL 3UL
17506+
17507+#define PCID_KERNEL 0UL
17508+#define PCID_USER 1UL
17509+#define PCID_NOFLUSH (1UL << 63)
17510+
17511 static inline void load_cr3(pgd_t *pgdir)
17512 {
17513- write_cr3(__pa(pgdir));
17514+ write_cr3(__pa(pgdir) | PCID_KERNEL);
17515 }
17516
17517 #ifdef CONFIG_X86_32
17518@@ -283,7 +295,7 @@ struct tss_struct {
17519
17520 } ____cacheline_aligned;
17521
17522-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
17523+extern struct tss_struct init_tss[NR_CPUS];
17524
17525 /*
17526 * Save the original ist values for checking stack pointers during debugging
17527@@ -453,6 +465,7 @@ struct thread_struct {
17528 unsigned short ds;
17529 unsigned short fsindex;
17530 unsigned short gsindex;
17531+ unsigned short ss;
17532 #endif
17533 #ifdef CONFIG_X86_32
17534 unsigned long ip;
17535@@ -553,29 +566,8 @@ static inline void load_sp0(struct tss_struct *tss,
17536 extern unsigned long mmu_cr4_features;
17537 extern u32 *trampoline_cr4_features;
17538
17539-static inline void set_in_cr4(unsigned long mask)
17540-{
17541- unsigned long cr4;
17542-
17543- mmu_cr4_features |= mask;
17544- if (trampoline_cr4_features)
17545- *trampoline_cr4_features = mmu_cr4_features;
17546- cr4 = read_cr4();
17547- cr4 |= mask;
17548- write_cr4(cr4);
17549-}
17550-
17551-static inline void clear_in_cr4(unsigned long mask)
17552-{
17553- unsigned long cr4;
17554-
17555- mmu_cr4_features &= ~mask;
17556- if (trampoline_cr4_features)
17557- *trampoline_cr4_features = mmu_cr4_features;
17558- cr4 = read_cr4();
17559- cr4 &= ~mask;
17560- write_cr4(cr4);
17561-}
17562+extern void set_in_cr4(unsigned long mask);
17563+extern void clear_in_cr4(unsigned long mask);
17564
17565 typedef struct {
17566 unsigned long seg;
17567@@ -824,11 +816,18 @@ static inline void spin_lock_prefetch(const void *x)
17568 */
17569 #define TASK_SIZE PAGE_OFFSET
17570 #define TASK_SIZE_MAX TASK_SIZE
17571+
17572+#ifdef CONFIG_PAX_SEGMEXEC
17573+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
17574+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
17575+#else
17576 #define STACK_TOP TASK_SIZE
17577-#define STACK_TOP_MAX STACK_TOP
17578+#endif
17579+
17580+#define STACK_TOP_MAX TASK_SIZE
17581
17582 #define INIT_THREAD { \
17583- .sp0 = sizeof(init_stack) + (long)&init_stack, \
17584+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17585 .vm86_info = NULL, \
17586 .sysenter_cs = __KERNEL_CS, \
17587 .io_bitmap_ptr = NULL, \
17588@@ -842,7 +841,7 @@ static inline void spin_lock_prefetch(const void *x)
17589 */
17590 #define INIT_TSS { \
17591 .x86_tss = { \
17592- .sp0 = sizeof(init_stack) + (long)&init_stack, \
17593+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17594 .ss0 = __KERNEL_DS, \
17595 .ss1 = __KERNEL_CS, \
17596 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
17597@@ -853,11 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
17598 extern unsigned long thread_saved_pc(struct task_struct *tsk);
17599
17600 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
17601-#define KSTK_TOP(info) \
17602-({ \
17603- unsigned long *__ptr = (unsigned long *)(info); \
17604- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
17605-})
17606+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
17607
17608 /*
17609 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
17610@@ -872,7 +867,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17611 #define task_pt_regs(task) \
17612 ({ \
17613 struct pt_regs *__regs__; \
17614- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
17615+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
17616 __regs__ - 1; \
17617 })
17618
17619@@ -882,13 +877,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17620 /*
17621 * User space process size. 47bits minus one guard page.
17622 */
17623-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
17624+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
17625
17626 /* This decides where the kernel will search for a free chunk of vm
17627 * space during mmap's.
17628 */
17629 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
17630- 0xc0000000 : 0xFFFFe000)
17631+ 0xc0000000 : 0xFFFFf000)
17632
17633 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
17634 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
17635@@ -899,11 +894,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17636 #define STACK_TOP_MAX TASK_SIZE_MAX
17637
17638 #define INIT_THREAD { \
17639- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17640+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17641 }
17642
17643 #define INIT_TSS { \
17644- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17645+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17646 }
17647
17648 /*
17649@@ -931,6 +926,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
17650 */
17651 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
17652
17653+#ifdef CONFIG_PAX_SEGMEXEC
17654+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
17655+#endif
17656+
17657 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
17658
17659 /* Get/set a process' ability to use the timestamp counter instruction */
17660@@ -957,7 +956,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
17661 return 0;
17662 }
17663
17664-extern unsigned long arch_align_stack(unsigned long sp);
17665+#define arch_align_stack(x) ((x) & ~0xfUL)
17666 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
17667
17668 void default_idle(void);
17669@@ -967,6 +966,6 @@ bool xen_set_default_idle(void);
17670 #define xen_set_default_idle 0
17671 #endif
17672
17673-void stop_this_cpu(void *dummy);
17674+void stop_this_cpu(void *dummy) __noreturn;
17675 void df_debug(struct pt_regs *regs, long error_code);
17676 #endif /* _ASM_X86_PROCESSOR_H */
17677diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
17678index 942a086..6c26446 100644
17679--- a/arch/x86/include/asm/ptrace.h
17680+++ b/arch/x86/include/asm/ptrace.h
17681@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
17682 }
17683
17684 /*
17685- * user_mode_vm(regs) determines whether a register set came from user mode.
17686+ * user_mode(regs) determines whether a register set came from user mode.
17687 * This is true if V8086 mode was enabled OR if the register set was from
17688 * protected mode with RPL-3 CS value. This tricky test checks that with
17689 * one comparison. Many places in the kernel can bypass this full check
17690- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
17691+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
17692+ * be used.
17693 */
17694-static inline int user_mode(struct pt_regs *regs)
17695+static inline int user_mode_novm(struct pt_regs *regs)
17696 {
17697 #ifdef CONFIG_X86_32
17698 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
17699 #else
17700- return !!(regs->cs & 3);
17701+ return !!(regs->cs & SEGMENT_RPL_MASK);
17702 #endif
17703 }
17704
17705-static inline int user_mode_vm(struct pt_regs *regs)
17706+static inline int user_mode(struct pt_regs *regs)
17707 {
17708 #ifdef CONFIG_X86_32
17709 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
17710 USER_RPL;
17711 #else
17712- return user_mode(regs);
17713+ return user_mode_novm(regs);
17714 #endif
17715 }
17716
17717@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
17718 #ifdef CONFIG_X86_64
17719 static inline bool user_64bit_mode(struct pt_regs *regs)
17720 {
17721+ unsigned long cs = regs->cs & 0xffff;
17722 #ifndef CONFIG_PARAVIRT
17723 /*
17724 * On non-paravirt systems, this is the only long mode CPL 3
17725 * selector. We do not allow long mode selectors in the LDT.
17726 */
17727- return regs->cs == __USER_CS;
17728+ return cs == __USER_CS;
17729 #else
17730 /* Headers are too twisted for this to go in paravirt.h. */
17731- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
17732+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
17733 #endif
17734 }
17735
17736@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
17737 * Traps from the kernel do not save sp and ss.
17738 * Use the helper function to retrieve sp.
17739 */
17740- if (offset == offsetof(struct pt_regs, sp) &&
17741- regs->cs == __KERNEL_CS)
17742- return kernel_stack_pointer(regs);
17743+ if (offset == offsetof(struct pt_regs, sp)) {
17744+ unsigned long cs = regs->cs & 0xffff;
17745+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
17746+ return kernel_stack_pointer(regs);
17747+ }
17748 #endif
17749 return *(unsigned long *)((unsigned long)regs + offset);
17750 }
17751diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
17752index 9c6b890..5305f53 100644
17753--- a/arch/x86/include/asm/realmode.h
17754+++ b/arch/x86/include/asm/realmode.h
17755@@ -22,16 +22,14 @@ struct real_mode_header {
17756 #endif
17757 /* APM/BIOS reboot */
17758 u32 machine_real_restart_asm;
17759-#ifdef CONFIG_X86_64
17760 u32 machine_real_restart_seg;
17761-#endif
17762 };
17763
17764 /* This must match data at trampoline_32/64.S */
17765 struct trampoline_header {
17766 #ifdef CONFIG_X86_32
17767 u32 start;
17768- u16 gdt_pad;
17769+ u16 boot_cs;
17770 u16 gdt_limit;
17771 u32 gdt_base;
17772 #else
17773diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
17774index a82c4f1..ac45053 100644
17775--- a/arch/x86/include/asm/reboot.h
17776+++ b/arch/x86/include/asm/reboot.h
17777@@ -6,13 +6,13 @@
17778 struct pt_regs;
17779
17780 struct machine_ops {
17781- void (*restart)(char *cmd);
17782- void (*halt)(void);
17783- void (*power_off)(void);
17784+ void (* __noreturn restart)(char *cmd);
17785+ void (* __noreturn halt)(void);
17786+ void (* __noreturn power_off)(void);
17787 void (*shutdown)(void);
17788 void (*crash_shutdown)(struct pt_regs *);
17789- void (*emergency_restart)(void);
17790-};
17791+ void (* __noreturn emergency_restart)(void);
17792+} __no_const;
17793
17794 extern struct machine_ops machine_ops;
17795
17796diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
17797index cad82c9..2e5c5c1 100644
17798--- a/arch/x86/include/asm/rwsem.h
17799+++ b/arch/x86/include/asm/rwsem.h
17800@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
17801 {
17802 asm volatile("# beginning down_read\n\t"
17803 LOCK_PREFIX _ASM_INC "(%1)\n\t"
17804+
17805+#ifdef CONFIG_PAX_REFCOUNT
17806+ "jno 0f\n"
17807+ LOCK_PREFIX _ASM_DEC "(%1)\n"
17808+ "int $4\n0:\n"
17809+ _ASM_EXTABLE(0b, 0b)
17810+#endif
17811+
17812 /* adds 0x00000001 */
17813 " jns 1f\n"
17814 " call call_rwsem_down_read_failed\n"
17815@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
17816 "1:\n\t"
17817 " mov %1,%2\n\t"
17818 " add %3,%2\n\t"
17819+
17820+#ifdef CONFIG_PAX_REFCOUNT
17821+ "jno 0f\n"
17822+ "sub %3,%2\n"
17823+ "int $4\n0:\n"
17824+ _ASM_EXTABLE(0b, 0b)
17825+#endif
17826+
17827 " jle 2f\n\t"
17828 LOCK_PREFIX " cmpxchg %2,%0\n\t"
17829 " jnz 1b\n\t"
17830@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
17831 long tmp;
17832 asm volatile("# beginning down_write\n\t"
17833 LOCK_PREFIX " xadd %1,(%2)\n\t"
17834+
17835+#ifdef CONFIG_PAX_REFCOUNT
17836+ "jno 0f\n"
17837+ "mov %1,(%2)\n"
17838+ "int $4\n0:\n"
17839+ _ASM_EXTABLE(0b, 0b)
17840+#endif
17841+
17842 /* adds 0xffff0001, returns the old value */
17843 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
17844 /* was the active mask 0 before? */
17845@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
17846 long tmp;
17847 asm volatile("# beginning __up_read\n\t"
17848 LOCK_PREFIX " xadd %1,(%2)\n\t"
17849+
17850+#ifdef CONFIG_PAX_REFCOUNT
17851+ "jno 0f\n"
17852+ "mov %1,(%2)\n"
17853+ "int $4\n0:\n"
17854+ _ASM_EXTABLE(0b, 0b)
17855+#endif
17856+
17857 /* subtracts 1, returns the old value */
17858 " jns 1f\n\t"
17859 " call call_rwsem_wake\n" /* expects old value in %edx */
17860@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
17861 long tmp;
17862 asm volatile("# beginning __up_write\n\t"
17863 LOCK_PREFIX " xadd %1,(%2)\n\t"
17864+
17865+#ifdef CONFIG_PAX_REFCOUNT
17866+ "jno 0f\n"
17867+ "mov %1,(%2)\n"
17868+ "int $4\n0:\n"
17869+ _ASM_EXTABLE(0b, 0b)
17870+#endif
17871+
17872 /* subtracts 0xffff0001, returns the old value */
17873 " jns 1f\n\t"
17874 " call call_rwsem_wake\n" /* expects old value in %edx */
17875@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17876 {
17877 asm volatile("# beginning __downgrade_write\n\t"
17878 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
17879+
17880+#ifdef CONFIG_PAX_REFCOUNT
17881+ "jno 0f\n"
17882+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
17883+ "int $4\n0:\n"
17884+ _ASM_EXTABLE(0b, 0b)
17885+#endif
17886+
17887 /*
17888 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
17889 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
17890@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17891 */
17892 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17893 {
17894- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
17895+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
17896+
17897+#ifdef CONFIG_PAX_REFCOUNT
17898+ "jno 0f\n"
17899+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
17900+ "int $4\n0:\n"
17901+ _ASM_EXTABLE(0b, 0b)
17902+#endif
17903+
17904 : "+m" (sem->count)
17905 : "er" (delta));
17906 }
17907@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17908 */
17909 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
17910 {
17911- return delta + xadd(&sem->count, delta);
17912+ return delta + xadd_check_overflow(&sem->count, delta);
17913 }
17914
17915 #endif /* __KERNEL__ */
17916diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
17917index c48a950..bc40804 100644
17918--- a/arch/x86/include/asm/segment.h
17919+++ b/arch/x86/include/asm/segment.h
17920@@ -64,10 +64,15 @@
17921 * 26 - ESPFIX small SS
17922 * 27 - per-cpu [ offset to per-cpu data area ]
17923 * 28 - stack_canary-20 [ for stack protector ]
17924- * 29 - unused
17925- * 30 - unused
17926+ * 29 - PCI BIOS CS
17927+ * 30 - PCI BIOS DS
17928 * 31 - TSS for double fault handler
17929 */
17930+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
17931+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
17932+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
17933+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
17934+
17935 #define GDT_ENTRY_TLS_MIN 6
17936 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
17937
17938@@ -79,6 +84,8 @@
17939
17940 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
17941
17942+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
17943+
17944 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
17945
17946 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
17947@@ -104,6 +111,12 @@
17948 #define __KERNEL_STACK_CANARY 0
17949 #endif
17950
17951+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
17952+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
17953+
17954+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
17955+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
17956+
17957 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
17958
17959 /*
17960@@ -141,7 +154,7 @@
17961 */
17962
17963 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
17964-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
17965+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
17966
17967
17968 #else
17969@@ -165,6 +178,8 @@
17970 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
17971 #define __USER32_DS __USER_DS
17972
17973+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
17974+
17975 #define GDT_ENTRY_TSS 8 /* needs two entries */
17976 #define GDT_ENTRY_LDT 10 /* needs two entries */
17977 #define GDT_ENTRY_TLS_MIN 12
17978@@ -173,6 +188,8 @@
17979 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
17980 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
17981
17982+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
17983+
17984 /* TLS indexes for 64bit - hardcoded in arch_prctl */
17985 #define FS_TLS 0
17986 #define GS_TLS 1
17987@@ -180,12 +197,14 @@
17988 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
17989 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
17990
17991-#define GDT_ENTRIES 16
17992+#define GDT_ENTRIES 17
17993
17994 #endif
17995
17996 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
17997+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
17998 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
17999+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
18000 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
18001 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
18002 #ifndef CONFIG_PARAVIRT
18003@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
18004 {
18005 unsigned long __limit;
18006 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
18007- return __limit + 1;
18008+ return __limit;
18009 }
18010
18011 #endif /* !__ASSEMBLY__ */
18012diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
18013index 8d3120f..352b440 100644
18014--- a/arch/x86/include/asm/smap.h
18015+++ b/arch/x86/include/asm/smap.h
18016@@ -25,11 +25,40 @@
18017
18018 #include <asm/alternative-asm.h>
18019
18020+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18021+#define ASM_PAX_OPEN_USERLAND \
18022+ 661: jmp 663f; \
18023+ .pushsection .altinstr_replacement, "a" ; \
18024+ 662: pushq %rax; nop; \
18025+ .popsection ; \
18026+ .pushsection .altinstructions, "a" ; \
18027+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18028+ .popsection ; \
18029+ call __pax_open_userland; \
18030+ popq %rax; \
18031+ 663:
18032+
18033+#define ASM_PAX_CLOSE_USERLAND \
18034+ 661: jmp 663f; \
18035+ .pushsection .altinstr_replacement, "a" ; \
18036+ 662: pushq %rax; nop; \
18037+ .popsection; \
18038+ .pushsection .altinstructions, "a" ; \
18039+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18040+ .popsection; \
18041+ call __pax_close_userland; \
18042+ popq %rax; \
18043+ 663:
18044+#else
18045+#define ASM_PAX_OPEN_USERLAND
18046+#define ASM_PAX_CLOSE_USERLAND
18047+#endif
18048+
18049 #ifdef CONFIG_X86_SMAP
18050
18051 #define ASM_CLAC \
18052 661: ASM_NOP3 ; \
18053- .pushsection .altinstr_replacement, "ax" ; \
18054+ .pushsection .altinstr_replacement, "a" ; \
18055 662: __ASM_CLAC ; \
18056 .popsection ; \
18057 .pushsection .altinstructions, "a" ; \
18058@@ -38,7 +67,7 @@
18059
18060 #define ASM_STAC \
18061 661: ASM_NOP3 ; \
18062- .pushsection .altinstr_replacement, "ax" ; \
18063+ .pushsection .altinstr_replacement, "a" ; \
18064 662: __ASM_STAC ; \
18065 .popsection ; \
18066 .pushsection .altinstructions, "a" ; \
18067@@ -56,6 +85,37 @@
18068
18069 #include <asm/alternative.h>
18070
18071+#define __HAVE_ARCH_PAX_OPEN_USERLAND
18072+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
18073+
18074+extern void __pax_open_userland(void);
18075+static __always_inline unsigned long pax_open_userland(void)
18076+{
18077+
18078+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18079+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
18080+ :
18081+ : [open] "i" (__pax_open_userland)
18082+ : "memory", "rax");
18083+#endif
18084+
18085+ return 0;
18086+}
18087+
18088+extern void __pax_close_userland(void);
18089+static __always_inline unsigned long pax_close_userland(void)
18090+{
18091+
18092+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18093+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
18094+ :
18095+ : [close] "i" (__pax_close_userland)
18096+ : "memory", "rax");
18097+#endif
18098+
18099+ return 0;
18100+}
18101+
18102 #ifdef CONFIG_X86_SMAP
18103
18104 static __always_inline void clac(void)
18105diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
18106index 4137890..03fa172 100644
18107--- a/arch/x86/include/asm/smp.h
18108+++ b/arch/x86/include/asm/smp.h
18109@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
18110 /* cpus sharing the last level cache: */
18111 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
18112 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
18113-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
18114+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
18115
18116 static inline struct cpumask *cpu_sibling_mask(int cpu)
18117 {
18118@@ -79,7 +79,7 @@ struct smp_ops {
18119
18120 void (*send_call_func_ipi)(const struct cpumask *mask);
18121 void (*send_call_func_single_ipi)(int cpu);
18122-};
18123+} __no_const;
18124
18125 /* Globals due to paravirt */
18126 extern void set_cpu_sibling_map(int cpu);
18127@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
18128 extern int safe_smp_processor_id(void);
18129
18130 #elif defined(CONFIG_X86_64_SMP)
18131-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18132-
18133-#define stack_smp_processor_id() \
18134-({ \
18135- struct thread_info *ti; \
18136- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
18137- ti->cpu; \
18138-})
18139+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18140+#define stack_smp_processor_id() raw_smp_processor_id()
18141 #define safe_smp_processor_id() smp_processor_id()
18142
18143 #endif
18144diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
18145index bf156de..1a782ab 100644
18146--- a/arch/x86/include/asm/spinlock.h
18147+++ b/arch/x86/include/asm/spinlock.h
18148@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
18149 static inline void arch_read_lock(arch_rwlock_t *rw)
18150 {
18151 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
18152+
18153+#ifdef CONFIG_PAX_REFCOUNT
18154+ "jno 0f\n"
18155+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
18156+ "int $4\n0:\n"
18157+ _ASM_EXTABLE(0b, 0b)
18158+#endif
18159+
18160 "jns 1f\n"
18161 "call __read_lock_failed\n\t"
18162 "1:\n"
18163@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
18164 static inline void arch_write_lock(arch_rwlock_t *rw)
18165 {
18166 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
18167+
18168+#ifdef CONFIG_PAX_REFCOUNT
18169+ "jno 0f\n"
18170+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
18171+ "int $4\n0:\n"
18172+ _ASM_EXTABLE(0b, 0b)
18173+#endif
18174+
18175 "jz 1f\n"
18176 "call __write_lock_failed\n\t"
18177 "1:\n"
18178@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
18179
18180 static inline void arch_read_unlock(arch_rwlock_t *rw)
18181 {
18182- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
18183+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
18184+
18185+#ifdef CONFIG_PAX_REFCOUNT
18186+ "jno 0f\n"
18187+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
18188+ "int $4\n0:\n"
18189+ _ASM_EXTABLE(0b, 0b)
18190+#endif
18191+
18192 :"+m" (rw->lock) : : "memory");
18193 }
18194
18195 static inline void arch_write_unlock(arch_rwlock_t *rw)
18196 {
18197- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
18198+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
18199+
18200+#ifdef CONFIG_PAX_REFCOUNT
18201+ "jno 0f\n"
18202+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
18203+ "int $4\n0:\n"
18204+ _ASM_EXTABLE(0b, 0b)
18205+#endif
18206+
18207 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
18208 }
18209
18210diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
18211index 6a99859..03cb807 100644
18212--- a/arch/x86/include/asm/stackprotector.h
18213+++ b/arch/x86/include/asm/stackprotector.h
18214@@ -47,7 +47,7 @@
18215 * head_32 for boot CPU and setup_per_cpu_areas() for others.
18216 */
18217 #define GDT_STACK_CANARY_INIT \
18218- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
18219+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
18220
18221 /*
18222 * Initialize the stackprotector canary value.
18223@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
18224
18225 static inline void load_stack_canary_segment(void)
18226 {
18227-#ifdef CONFIG_X86_32
18228+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18229 asm volatile ("mov %0, %%gs" : : "r" (0));
18230 #endif
18231 }
18232diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
18233index 70bbe39..4ae2bd4 100644
18234--- a/arch/x86/include/asm/stacktrace.h
18235+++ b/arch/x86/include/asm/stacktrace.h
18236@@ -11,28 +11,20 @@
18237
18238 extern int kstack_depth_to_print;
18239
18240-struct thread_info;
18241+struct task_struct;
18242 struct stacktrace_ops;
18243
18244-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
18245- unsigned long *stack,
18246- unsigned long bp,
18247- const struct stacktrace_ops *ops,
18248- void *data,
18249- unsigned long *end,
18250- int *graph);
18251+typedef unsigned long walk_stack_t(struct task_struct *task,
18252+ void *stack_start,
18253+ unsigned long *stack,
18254+ unsigned long bp,
18255+ const struct stacktrace_ops *ops,
18256+ void *data,
18257+ unsigned long *end,
18258+ int *graph);
18259
18260-extern unsigned long
18261-print_context_stack(struct thread_info *tinfo,
18262- unsigned long *stack, unsigned long bp,
18263- const struct stacktrace_ops *ops, void *data,
18264- unsigned long *end, int *graph);
18265-
18266-extern unsigned long
18267-print_context_stack_bp(struct thread_info *tinfo,
18268- unsigned long *stack, unsigned long bp,
18269- const struct stacktrace_ops *ops, void *data,
18270- unsigned long *end, int *graph);
18271+extern walk_stack_t print_context_stack;
18272+extern walk_stack_t print_context_stack_bp;
18273
18274 /* Generic stack tracer with callbacks */
18275
18276@@ -40,7 +32,7 @@ struct stacktrace_ops {
18277 void (*address)(void *data, unsigned long address, int reliable);
18278 /* On negative return stop dumping */
18279 int (*stack)(void *data, char *name);
18280- walk_stack_t walk_stack;
18281+ walk_stack_t *walk_stack;
18282 };
18283
18284 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18285diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
18286index d7f3b3b..3cc39f1 100644
18287--- a/arch/x86/include/asm/switch_to.h
18288+++ b/arch/x86/include/asm/switch_to.h
18289@@ -108,7 +108,7 @@ do { \
18290 "call __switch_to\n\t" \
18291 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
18292 __switch_canary \
18293- "movq %P[thread_info](%%rsi),%%r8\n\t" \
18294+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
18295 "movq %%rax,%%rdi\n\t" \
18296 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
18297 "jnz ret_from_fork\n\t" \
18298@@ -119,7 +119,7 @@ do { \
18299 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
18300 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
18301 [_tif_fork] "i" (_TIF_FORK), \
18302- [thread_info] "i" (offsetof(struct task_struct, stack)), \
18303+ [thread_info] "m" (current_tinfo), \
18304 [current_task] "m" (current_task) \
18305 __switch_canary_iparam \
18306 : "memory", "cc" __EXTRA_CLOBBER)
18307diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
18308index 2781119..618b59b 100644
18309--- a/arch/x86/include/asm/thread_info.h
18310+++ b/arch/x86/include/asm/thread_info.h
18311@@ -10,6 +10,7 @@
18312 #include <linux/compiler.h>
18313 #include <asm/page.h>
18314 #include <asm/types.h>
18315+#include <asm/percpu.h>
18316
18317 /*
18318 * low level task data that entry.S needs immediate access to
18319@@ -23,7 +24,6 @@ struct exec_domain;
18320 #include <linux/atomic.h>
18321
18322 struct thread_info {
18323- struct task_struct *task; /* main task structure */
18324 struct exec_domain *exec_domain; /* execution domain */
18325 __u32 flags; /* low level flags */
18326 __u32 status; /* thread synchronous flags */
18327@@ -33,19 +33,13 @@ struct thread_info {
18328 mm_segment_t addr_limit;
18329 struct restart_block restart_block;
18330 void __user *sysenter_return;
18331-#ifdef CONFIG_X86_32
18332- unsigned long previous_esp; /* ESP of the previous stack in
18333- case of nested (IRQ) stacks
18334- */
18335- __u8 supervisor_stack[0];
18336-#endif
18337+ unsigned long lowest_stack;
18338 unsigned int sig_on_uaccess_error:1;
18339 unsigned int uaccess_err:1; /* uaccess failed */
18340 };
18341
18342-#define INIT_THREAD_INFO(tsk) \
18343+#define INIT_THREAD_INFO \
18344 { \
18345- .task = &tsk, \
18346 .exec_domain = &default_exec_domain, \
18347 .flags = 0, \
18348 .cpu = 0, \
18349@@ -56,7 +50,7 @@ struct thread_info {
18350 }, \
18351 }
18352
18353-#define init_thread_info (init_thread_union.thread_info)
18354+#define init_thread_info (init_thread_union.stack)
18355 #define init_stack (init_thread_union.stack)
18356
18357 #else /* !__ASSEMBLY__ */
18358@@ -96,6 +90,7 @@ struct thread_info {
18359 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
18360 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
18361 #define TIF_X32 30 /* 32-bit native x86-64 binary */
18362+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
18363
18364 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
18365 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
18366@@ -119,17 +114,18 @@ struct thread_info {
18367 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
18368 #define _TIF_ADDR32 (1 << TIF_ADDR32)
18369 #define _TIF_X32 (1 << TIF_X32)
18370+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
18371
18372 /* work to do in syscall_trace_enter() */
18373 #define _TIF_WORK_SYSCALL_ENTRY \
18374 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
18375 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
18376- _TIF_NOHZ)
18377+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
18378
18379 /* work to do in syscall_trace_leave() */
18380 #define _TIF_WORK_SYSCALL_EXIT \
18381 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
18382- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
18383+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
18384
18385 /* work to do on interrupt/exception return */
18386 #define _TIF_WORK_MASK \
18387@@ -140,7 +136,7 @@ struct thread_info {
18388 /* work to do on any return to user space */
18389 #define _TIF_ALLWORK_MASK \
18390 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
18391- _TIF_NOHZ)
18392+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
18393
18394 /* Only used for 64 bit */
18395 #define _TIF_DO_NOTIFY_MASK \
18396@@ -156,45 +152,40 @@ struct thread_info {
18397
18398 #define PREEMPT_ACTIVE 0x10000000
18399
18400-#ifdef CONFIG_X86_32
18401-
18402-#define STACK_WARN (THREAD_SIZE/8)
18403-/*
18404- * macros/functions for gaining access to the thread information structure
18405- *
18406- * preempt_count needs to be 1 initially, until the scheduler is functional.
18407- */
18408-#ifndef __ASSEMBLY__
18409-
18410-
18411-/* how to get the current stack pointer from C */
18412-register unsigned long current_stack_pointer asm("esp") __used;
18413-
18414-/* how to get the thread information struct from C */
18415-static inline struct thread_info *current_thread_info(void)
18416-{
18417- return (struct thread_info *)
18418- (current_stack_pointer & ~(THREAD_SIZE - 1));
18419-}
18420-
18421-#else /* !__ASSEMBLY__ */
18422-
18423+#ifdef __ASSEMBLY__
18424 /* how to get the thread information struct from ASM */
18425 #define GET_THREAD_INFO(reg) \
18426- movl $-THREAD_SIZE, reg; \
18427- andl %esp, reg
18428+ mov PER_CPU_VAR(current_tinfo), reg
18429
18430 /* use this one if reg already contains %esp */
18431-#define GET_THREAD_INFO_WITH_ESP(reg) \
18432- andl $-THREAD_SIZE, reg
18433+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
18434+#else
18435+/* how to get the thread information struct from C */
18436+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
18437+
18438+static __always_inline struct thread_info *current_thread_info(void)
18439+{
18440+ return this_cpu_read_stable(current_tinfo);
18441+}
18442+#endif
18443+
18444+#ifdef CONFIG_X86_32
18445+
18446+#define STACK_WARN (THREAD_SIZE/8)
18447+/*
18448+ * macros/functions for gaining access to the thread information structure
18449+ *
18450+ * preempt_count needs to be 1 initially, until the scheduler is functional.
18451+ */
18452+#ifndef __ASSEMBLY__
18453+
18454+/* how to get the current stack pointer from C */
18455+register unsigned long current_stack_pointer asm("esp") __used;
18456
18457 #endif
18458
18459 #else /* X86_32 */
18460
18461-#include <asm/percpu.h>
18462-#define KERNEL_STACK_OFFSET (5*8)
18463-
18464 /*
18465 * macros/functions for gaining access to the thread information structure
18466 * preempt_count needs to be 1 initially, until the scheduler is functional.
18467@@ -202,27 +193,8 @@ static inline struct thread_info *current_thread_info(void)
18468 #ifndef __ASSEMBLY__
18469 DECLARE_PER_CPU(unsigned long, kernel_stack);
18470
18471-static inline struct thread_info *current_thread_info(void)
18472-{
18473- struct thread_info *ti;
18474- ti = (void *)(this_cpu_read_stable(kernel_stack) +
18475- KERNEL_STACK_OFFSET - THREAD_SIZE);
18476- return ti;
18477-}
18478-
18479-#else /* !__ASSEMBLY__ */
18480-
18481-/* how to get the thread information struct from ASM */
18482-#define GET_THREAD_INFO(reg) \
18483- movq PER_CPU_VAR(kernel_stack),reg ; \
18484- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
18485-
18486-/*
18487- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
18488- * a certain register (to be used in assembler memory operands).
18489- */
18490-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
18491-
18492+/* how to get the current stack pointer from C */
18493+register unsigned long current_stack_pointer asm("rsp") __used;
18494 #endif
18495
18496 #endif /* !X86_32 */
18497@@ -281,5 +253,12 @@ static inline bool is_ia32_task(void)
18498 extern void arch_task_cache_init(void);
18499 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
18500 extern void arch_release_task_struct(struct task_struct *tsk);
18501+
18502+#define __HAVE_THREAD_FUNCTIONS
18503+#define task_thread_info(task) (&(task)->tinfo)
18504+#define task_stack_page(task) ((task)->stack)
18505+#define setup_thread_stack(p, org) do {} while (0)
18506+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
18507+
18508 #endif
18509 #endif /* _ASM_X86_THREAD_INFO_H */
18510diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
18511index e6d90ba..0897f44 100644
18512--- a/arch/x86/include/asm/tlbflush.h
18513+++ b/arch/x86/include/asm/tlbflush.h
18514@@ -17,18 +17,44 @@
18515
18516 static inline void __native_flush_tlb(void)
18517 {
18518+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18519+ u64 descriptor[2];
18520+
18521+ descriptor[0] = PCID_KERNEL;
18522+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
18523+ return;
18524+ }
18525+
18526+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18527+ if (static_cpu_has(X86_FEATURE_PCID)) {
18528+ unsigned int cpu = raw_get_cpu();
18529+
18530+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18531+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18532+ raw_put_cpu_no_resched();
18533+ return;
18534+ }
18535+#endif
18536+
18537 native_write_cr3(native_read_cr3());
18538 }
18539
18540 static inline void __native_flush_tlb_global_irq_disabled(void)
18541 {
18542- unsigned long cr4;
18543+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18544+ u64 descriptor[2];
18545
18546- cr4 = native_read_cr4();
18547- /* clear PGE */
18548- native_write_cr4(cr4 & ~X86_CR4_PGE);
18549- /* write old PGE again and flush TLBs */
18550- native_write_cr4(cr4);
18551+ descriptor[0] = PCID_KERNEL;
18552+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
18553+ } else {
18554+ unsigned long cr4;
18555+
18556+ cr4 = native_read_cr4();
18557+ /* clear PGE */
18558+ native_write_cr4(cr4 & ~X86_CR4_PGE);
18559+ /* write old PGE again and flush TLBs */
18560+ native_write_cr4(cr4);
18561+ }
18562 }
18563
18564 static inline void __native_flush_tlb_global(void)
18565@@ -49,6 +75,42 @@ static inline void __native_flush_tlb_global(void)
18566
18567 static inline void __native_flush_tlb_single(unsigned long addr)
18568 {
18569+
18570+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18571+ u64 descriptor[2];
18572+
18573+ descriptor[0] = PCID_KERNEL;
18574+ descriptor[1] = addr;
18575+
18576+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18577+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
18578+ if (addr < TASK_SIZE_MAX)
18579+ descriptor[1] += pax_user_shadow_base;
18580+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18581+ }
18582+
18583+ descriptor[0] = PCID_USER;
18584+ descriptor[1] = addr;
18585+#endif
18586+
18587+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18588+ return;
18589+ }
18590+
18591+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18592+ if (static_cpu_has(X86_FEATURE_PCID)) {
18593+ unsigned int cpu = raw_get_cpu();
18594+
18595+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
18596+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18597+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18598+ raw_put_cpu_no_resched();
18599+
18600+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
18601+ addr += pax_user_shadow_base;
18602+ }
18603+#endif
18604+
18605 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18606 }
18607
18608diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
18609index 5838fa9..f7ae572 100644
18610--- a/arch/x86/include/asm/uaccess.h
18611+++ b/arch/x86/include/asm/uaccess.h
18612@@ -7,6 +7,7 @@
18613 #include <linux/compiler.h>
18614 #include <linux/thread_info.h>
18615 #include <linux/string.h>
18616+#include <linux/sched.h>
18617 #include <asm/asm.h>
18618 #include <asm/page.h>
18619 #include <asm/smap.h>
18620@@ -29,7 +30,12 @@
18621
18622 #define get_ds() (KERNEL_DS)
18623 #define get_fs() (current_thread_info()->addr_limit)
18624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18625+void __set_fs(mm_segment_t x);
18626+void set_fs(mm_segment_t x);
18627+#else
18628 #define set_fs(x) (current_thread_info()->addr_limit = (x))
18629+#endif
18630
18631 #define segment_eq(a, b) ((a).seg == (b).seg)
18632
18633@@ -77,8 +83,33 @@
18634 * checks that the pointer is in the user space range - after calling
18635 * this function, memory access functions may still return -EFAULT.
18636 */
18637-#define access_ok(type, addr, size) \
18638- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18639+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18640+#define access_ok(type, addr, size) \
18641+({ \
18642+ long __size = size; \
18643+ unsigned long __addr = (unsigned long)addr; \
18644+ unsigned long __addr_ao = __addr & PAGE_MASK; \
18645+ unsigned long __end_ao = __addr + __size - 1; \
18646+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
18647+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
18648+ while(__addr_ao <= __end_ao) { \
18649+ char __c_ao; \
18650+ __addr_ao += PAGE_SIZE; \
18651+ if (__size > PAGE_SIZE) \
18652+ cond_resched(); \
18653+ if (__get_user(__c_ao, (char __user *)__addr)) \
18654+ break; \
18655+ if (type != VERIFY_WRITE) { \
18656+ __addr = __addr_ao; \
18657+ continue; \
18658+ } \
18659+ if (__put_user(__c_ao, (char __user *)__addr)) \
18660+ break; \
18661+ __addr = __addr_ao; \
18662+ } \
18663+ } \
18664+ __ret_ao; \
18665+})
18666
18667 /*
18668 * The exception table consists of pairs of addresses relative to the
18669@@ -168,10 +199,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18670 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
18671 __chk_user_ptr(ptr); \
18672 might_fault(); \
18673+ pax_open_userland(); \
18674 asm volatile("call __get_user_%P3" \
18675 : "=a" (__ret_gu), "=r" (__val_gu) \
18676 : "0" (ptr), "i" (sizeof(*(ptr)))); \
18677 (x) = (__typeof__(*(ptr))) __val_gu; \
18678+ pax_close_userland(); \
18679 __ret_gu; \
18680 })
18681
18682@@ -179,13 +212,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18683 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
18684 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
18685
18686-
18687+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18688+#define __copyuser_seg "gs;"
18689+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
18690+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
18691+#else
18692+#define __copyuser_seg
18693+#define __COPYUSER_SET_ES
18694+#define __COPYUSER_RESTORE_ES
18695+#endif
18696
18697 #ifdef CONFIG_X86_32
18698 #define __put_user_asm_u64(x, addr, err, errret) \
18699 asm volatile(ASM_STAC "\n" \
18700- "1: movl %%eax,0(%2)\n" \
18701- "2: movl %%edx,4(%2)\n" \
18702+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
18703+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
18704 "3: " ASM_CLAC "\n" \
18705 ".section .fixup,\"ax\"\n" \
18706 "4: movl %3,%0\n" \
18707@@ -198,8 +239,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18708
18709 #define __put_user_asm_ex_u64(x, addr) \
18710 asm volatile(ASM_STAC "\n" \
18711- "1: movl %%eax,0(%1)\n" \
18712- "2: movl %%edx,4(%1)\n" \
18713+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
18714+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
18715 "3: " ASM_CLAC "\n" \
18716 _ASM_EXTABLE_EX(1b, 2b) \
18717 _ASM_EXTABLE_EX(2b, 3b) \
18718@@ -249,7 +290,8 @@ extern void __put_user_8(void);
18719 __typeof__(*(ptr)) __pu_val; \
18720 __chk_user_ptr(ptr); \
18721 might_fault(); \
18722- __pu_val = x; \
18723+ __pu_val = (x); \
18724+ pax_open_userland(); \
18725 switch (sizeof(*(ptr))) { \
18726 case 1: \
18727 __put_user_x(1, __pu_val, ptr, __ret_pu); \
18728@@ -267,6 +309,7 @@ extern void __put_user_8(void);
18729 __put_user_x(X, __pu_val, ptr, __ret_pu); \
18730 break; \
18731 } \
18732+ pax_close_userland(); \
18733 __ret_pu; \
18734 })
18735
18736@@ -347,8 +390,10 @@ do { \
18737 } while (0)
18738
18739 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18740+do { \
18741+ pax_open_userland(); \
18742 asm volatile(ASM_STAC "\n" \
18743- "1: mov"itype" %2,%"rtype"1\n" \
18744+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
18745 "2: " ASM_CLAC "\n" \
18746 ".section .fixup,\"ax\"\n" \
18747 "3: mov %3,%0\n" \
18748@@ -356,8 +401,10 @@ do { \
18749 " jmp 2b\n" \
18750 ".previous\n" \
18751 _ASM_EXTABLE(1b, 3b) \
18752- : "=r" (err), ltype(x) \
18753- : "m" (__m(addr)), "i" (errret), "0" (err))
18754+ : "=r" (err), ltype (x) \
18755+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
18756+ pax_close_userland(); \
18757+} while (0)
18758
18759 #define __get_user_size_ex(x, ptr, size) \
18760 do { \
18761@@ -381,7 +428,7 @@ do { \
18762 } while (0)
18763
18764 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
18765- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
18766+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
18767 "2:\n" \
18768 _ASM_EXTABLE_EX(1b, 2b) \
18769 : ltype(x) : "m" (__m(addr)))
18770@@ -398,13 +445,24 @@ do { \
18771 int __gu_err; \
18772 unsigned long __gu_val; \
18773 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
18774- (x) = (__force __typeof__(*(ptr)))__gu_val; \
18775+ (x) = (__typeof__(*(ptr)))__gu_val; \
18776 __gu_err; \
18777 })
18778
18779 /* FIXME: this hack is definitely wrong -AK */
18780 struct __large_struct { unsigned long buf[100]; };
18781-#define __m(x) (*(struct __large_struct __user *)(x))
18782+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18783+#define ____m(x) \
18784+({ \
18785+ unsigned long ____x = (unsigned long)(x); \
18786+ if (____x < pax_user_shadow_base) \
18787+ ____x += pax_user_shadow_base; \
18788+ (typeof(x))____x; \
18789+})
18790+#else
18791+#define ____m(x) (x)
18792+#endif
18793+#define __m(x) (*(struct __large_struct __user *)____m(x))
18794
18795 /*
18796 * Tell gcc we read from memory instead of writing: this is because
18797@@ -412,8 +470,10 @@ struct __large_struct { unsigned long buf[100]; };
18798 * aliasing issues.
18799 */
18800 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18801+do { \
18802+ pax_open_userland(); \
18803 asm volatile(ASM_STAC "\n" \
18804- "1: mov"itype" %"rtype"1,%2\n" \
18805+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
18806 "2: " ASM_CLAC "\n" \
18807 ".section .fixup,\"ax\"\n" \
18808 "3: mov %3,%0\n" \
18809@@ -421,10 +481,12 @@ struct __large_struct { unsigned long buf[100]; };
18810 ".previous\n" \
18811 _ASM_EXTABLE(1b, 3b) \
18812 : "=r"(err) \
18813- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
18814+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
18815+ pax_close_userland(); \
18816+} while (0)
18817
18818 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
18819- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
18820+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
18821 "2:\n" \
18822 _ASM_EXTABLE_EX(1b, 2b) \
18823 : : ltype(x), "m" (__m(addr)))
18824@@ -434,11 +496,13 @@ struct __large_struct { unsigned long buf[100]; };
18825 */
18826 #define uaccess_try do { \
18827 current_thread_info()->uaccess_err = 0; \
18828+ pax_open_userland(); \
18829 stac(); \
18830 barrier();
18831
18832 #define uaccess_catch(err) \
18833 clac(); \
18834+ pax_close_userland(); \
18835 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
18836 } while (0)
18837
18838@@ -463,8 +527,12 @@ struct __large_struct { unsigned long buf[100]; };
18839 * On error, the variable @x is set to zero.
18840 */
18841
18842+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18843+#define __get_user(x, ptr) get_user((x), (ptr))
18844+#else
18845 #define __get_user(x, ptr) \
18846 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
18847+#endif
18848
18849 /**
18850 * __put_user: - Write a simple value into user space, with less checking.
18851@@ -486,8 +554,12 @@ struct __large_struct { unsigned long buf[100]; };
18852 * Returns zero on success, or -EFAULT on error.
18853 */
18854
18855+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18856+#define __put_user(x, ptr) put_user((x), (ptr))
18857+#else
18858 #define __put_user(x, ptr) \
18859 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
18860+#endif
18861
18862 #define __get_user_unaligned __get_user
18863 #define __put_user_unaligned __put_user
18864@@ -505,7 +577,7 @@ struct __large_struct { unsigned long buf[100]; };
18865 #define get_user_ex(x, ptr) do { \
18866 unsigned long __gue_val; \
18867 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
18868- (x) = (__force __typeof__(*(ptr)))__gue_val; \
18869+ (x) = (__typeof__(*(ptr)))__gue_val; \
18870 } while (0)
18871
18872 #define put_user_try uaccess_try
18873diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
18874index 7f760a9..b596b9a 100644
18875--- a/arch/x86/include/asm/uaccess_32.h
18876+++ b/arch/x86/include/asm/uaccess_32.h
18877@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
18878 static __always_inline unsigned long __must_check
18879 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
18880 {
18881+ if ((long)n < 0)
18882+ return n;
18883+
18884+ check_object_size(from, n, true);
18885+
18886 if (__builtin_constant_p(n)) {
18887 unsigned long ret;
18888
18889@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
18890 __copy_to_user(void __user *to, const void *from, unsigned long n)
18891 {
18892 might_fault();
18893+
18894 return __copy_to_user_inatomic(to, from, n);
18895 }
18896
18897 static __always_inline unsigned long
18898 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
18899 {
18900+ if ((long)n < 0)
18901+ return n;
18902+
18903 /* Avoid zeroing the tail if the copy fails..
18904 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
18905 * but as the zeroing behaviour is only significant when n is not
18906@@ -137,6 +146,12 @@ static __always_inline unsigned long
18907 __copy_from_user(void *to, const void __user *from, unsigned long n)
18908 {
18909 might_fault();
18910+
18911+ if ((long)n < 0)
18912+ return n;
18913+
18914+ check_object_size(to, n, false);
18915+
18916 if (__builtin_constant_p(n)) {
18917 unsigned long ret;
18918
18919@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
18920 const void __user *from, unsigned long n)
18921 {
18922 might_fault();
18923+
18924+ if ((long)n < 0)
18925+ return n;
18926+
18927 if (__builtin_constant_p(n)) {
18928 unsigned long ret;
18929
18930@@ -181,15 +200,19 @@ static __always_inline unsigned long
18931 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
18932 unsigned long n)
18933 {
18934- return __copy_from_user_ll_nocache_nozero(to, from, n);
18935+ if ((long)n < 0)
18936+ return n;
18937+
18938+ return __copy_from_user_ll_nocache_nozero(to, from, n);
18939 }
18940
18941-unsigned long __must_check copy_to_user(void __user *to,
18942- const void *from, unsigned long n);
18943-unsigned long __must_check _copy_from_user(void *to,
18944- const void __user *from,
18945- unsigned long n);
18946-
18947+extern void copy_to_user_overflow(void)
18948+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18949+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18950+#else
18951+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18952+#endif
18953+;
18954
18955 extern void copy_from_user_overflow(void)
18956 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18957@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
18958 #endif
18959 ;
18960
18961-static inline unsigned long __must_check copy_from_user(void *to,
18962- const void __user *from,
18963- unsigned long n)
18964+/**
18965+ * copy_to_user: - Copy a block of data into user space.
18966+ * @to: Destination address, in user space.
18967+ * @from: Source address, in kernel space.
18968+ * @n: Number of bytes to copy.
18969+ *
18970+ * Context: User context only. This function may sleep.
18971+ *
18972+ * Copy data from kernel space to user space.
18973+ *
18974+ * Returns number of bytes that could not be copied.
18975+ * On success, this will be zero.
18976+ */
18977+static inline unsigned long __must_check
18978+copy_to_user(void __user *to, const void *from, unsigned long n)
18979 {
18980- int sz = __compiletime_object_size(to);
18981+ size_t sz = __compiletime_object_size(from);
18982
18983- if (likely(sz == -1 || sz >= n))
18984- n = _copy_from_user(to, from, n);
18985- else
18986+ if (unlikely(sz != (size_t)-1 && sz < n))
18987+ copy_to_user_overflow();
18988+ else if (access_ok(VERIFY_WRITE, to, n))
18989+ n = __copy_to_user(to, from, n);
18990+ return n;
18991+}
18992+
18993+/**
18994+ * copy_from_user: - Copy a block of data from user space.
18995+ * @to: Destination address, in kernel space.
18996+ * @from: Source address, in user space.
18997+ * @n: Number of bytes to copy.
18998+ *
18999+ * Context: User context only. This function may sleep.
19000+ *
19001+ * Copy data from user space to kernel space.
19002+ *
19003+ * Returns number of bytes that could not be copied.
19004+ * On success, this will be zero.
19005+ *
19006+ * If some data could not be copied, this function will pad the copied
19007+ * data to the requested size using zero bytes.
19008+ */
19009+static inline unsigned long __must_check
19010+copy_from_user(void *to, const void __user *from, unsigned long n)
19011+{
19012+ size_t sz = __compiletime_object_size(to);
19013+
19014+ check_object_size(to, n, false);
19015+
19016+ if (unlikely(sz != (size_t)-1 && sz < n))
19017 copy_from_user_overflow();
19018-
19019+ else if (access_ok(VERIFY_READ, from, n))
19020+ n = __copy_from_user(to, from, n);
19021+ else if ((long)n > 0)
19022+ memset(to, 0, n);
19023 return n;
19024 }
19025
19026diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
19027index 4f7923d..201b58d 100644
19028--- a/arch/x86/include/asm/uaccess_64.h
19029+++ b/arch/x86/include/asm/uaccess_64.h
19030@@ -10,6 +10,9 @@
19031 #include <asm/alternative.h>
19032 #include <asm/cpufeature.h>
19033 #include <asm/page.h>
19034+#include <asm/pgtable.h>
19035+
19036+#define set_fs(x) (current_thread_info()->addr_limit = (x))
19037
19038 /*
19039 * Copy To/From Userspace
19040@@ -17,14 +20,14 @@
19041
19042 /* Handles exceptions in both to and from, but doesn't do access_ok */
19043 __must_check unsigned long
19044-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
19045+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
19046 __must_check unsigned long
19047-copy_user_generic_string(void *to, const void *from, unsigned len);
19048+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
19049 __must_check unsigned long
19050-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
19051+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
19052
19053-static __always_inline __must_check unsigned long
19054-copy_user_generic(void *to, const void *from, unsigned len)
19055+static __always_inline __must_check unsigned long
19056+copy_user_generic(void *to, const void *from, unsigned long len)
19057 {
19058 unsigned ret;
19059
19060@@ -45,138 +48,200 @@ copy_user_generic(void *to, const void *from, unsigned len)
19061 return ret;
19062 }
19063
19064+static __always_inline __must_check unsigned long
19065+__copy_to_user(void __user *to, const void *from, unsigned long len);
19066+static __always_inline __must_check unsigned long
19067+__copy_from_user(void *to, const void __user *from, unsigned long len);
19068 __must_check unsigned long
19069-_copy_to_user(void __user *to, const void *from, unsigned len);
19070-__must_check unsigned long
19071-_copy_from_user(void *to, const void __user *from, unsigned len);
19072-__must_check unsigned long
19073-copy_in_user(void __user *to, const void __user *from, unsigned len);
19074+copy_in_user(void __user *to, const void __user *from, unsigned long len);
19075+
19076+extern void copy_to_user_overflow(void)
19077+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19078+ __compiletime_error("copy_to_user() buffer size is not provably correct")
19079+#else
19080+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
19081+#endif
19082+;
19083+
19084+extern void copy_from_user_overflow(void)
19085+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19086+ __compiletime_error("copy_from_user() buffer size is not provably correct")
19087+#else
19088+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
19089+#endif
19090+;
19091
19092 static inline unsigned long __must_check copy_from_user(void *to,
19093 const void __user *from,
19094 unsigned long n)
19095 {
19096- int sz = __compiletime_object_size(to);
19097-
19098 might_fault();
19099- if (likely(sz == -1 || sz >= n))
19100- n = _copy_from_user(to, from, n);
19101-#ifdef CONFIG_DEBUG_VM
19102- else
19103- WARN(1, "Buffer overflow detected!\n");
19104-#endif
19105+
19106+ check_object_size(to, n, false);
19107+
19108+ if (access_ok(VERIFY_READ, from, n))
19109+ n = __copy_from_user(to, from, n);
19110+ else if (n < INT_MAX)
19111+ memset(to, 0, n);
19112 return n;
19113 }
19114
19115 static __always_inline __must_check
19116-int copy_to_user(void __user *dst, const void *src, unsigned size)
19117+int copy_to_user(void __user *dst, const void *src, unsigned long size)
19118 {
19119 might_fault();
19120
19121- return _copy_to_user(dst, src, size);
19122+ if (access_ok(VERIFY_WRITE, dst, size))
19123+ size = __copy_to_user(dst, src, size);
19124+ return size;
19125 }
19126
19127 static __always_inline __must_check
19128-int __copy_from_user(void *dst, const void __user *src, unsigned size)
19129+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
19130 {
19131- int ret = 0;
19132+ size_t sz = __compiletime_object_size(dst);
19133+ unsigned ret = 0;
19134
19135 might_fault();
19136+
19137+ if (size > INT_MAX)
19138+ return size;
19139+
19140+ check_object_size(dst, size, false);
19141+
19142+#ifdef CONFIG_PAX_MEMORY_UDEREF
19143+ if (!__access_ok(VERIFY_READ, src, size))
19144+ return size;
19145+#endif
19146+
19147+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19148+ copy_from_user_overflow();
19149+ return size;
19150+ }
19151+
19152 if (!__builtin_constant_p(size))
19153- return copy_user_generic(dst, (__force void *)src, size);
19154+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19155 switch (size) {
19156- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
19157+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
19158 ret, "b", "b", "=q", 1);
19159 return ret;
19160- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
19161+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
19162 ret, "w", "w", "=r", 2);
19163 return ret;
19164- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
19165+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
19166 ret, "l", "k", "=r", 4);
19167 return ret;
19168- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
19169+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19170 ret, "q", "", "=r", 8);
19171 return ret;
19172 case 10:
19173- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19174+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19175 ret, "q", "", "=r", 10);
19176 if (unlikely(ret))
19177 return ret;
19178 __get_user_asm(*(u16 *)(8 + (char *)dst),
19179- (u16 __user *)(8 + (char __user *)src),
19180+ (const u16 __user *)(8 + (const char __user *)src),
19181 ret, "w", "w", "=r", 2);
19182 return ret;
19183 case 16:
19184- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19185+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19186 ret, "q", "", "=r", 16);
19187 if (unlikely(ret))
19188 return ret;
19189 __get_user_asm(*(u64 *)(8 + (char *)dst),
19190- (u64 __user *)(8 + (char __user *)src),
19191+ (const u64 __user *)(8 + (const char __user *)src),
19192 ret, "q", "", "=r", 8);
19193 return ret;
19194 default:
19195- return copy_user_generic(dst, (__force void *)src, size);
19196+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19197 }
19198 }
19199
19200 static __always_inline __must_check
19201-int __copy_to_user(void __user *dst, const void *src, unsigned size)
19202+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
19203 {
19204- int ret = 0;
19205+ size_t sz = __compiletime_object_size(src);
19206+ unsigned ret = 0;
19207
19208 might_fault();
19209+
19210+ if (size > INT_MAX)
19211+ return size;
19212+
19213+ check_object_size(src, size, true);
19214+
19215+#ifdef CONFIG_PAX_MEMORY_UDEREF
19216+ if (!__access_ok(VERIFY_WRITE, dst, size))
19217+ return size;
19218+#endif
19219+
19220+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19221+ copy_to_user_overflow();
19222+ return size;
19223+ }
19224+
19225 if (!__builtin_constant_p(size))
19226- return copy_user_generic((__force void *)dst, src, size);
19227+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19228 switch (size) {
19229- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
19230+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
19231 ret, "b", "b", "iq", 1);
19232 return ret;
19233- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
19234+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
19235 ret, "w", "w", "ir", 2);
19236 return ret;
19237- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
19238+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
19239 ret, "l", "k", "ir", 4);
19240 return ret;
19241- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
19242+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19243 ret, "q", "", "er", 8);
19244 return ret;
19245 case 10:
19246- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19247+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19248 ret, "q", "", "er", 10);
19249 if (unlikely(ret))
19250 return ret;
19251 asm("":::"memory");
19252- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
19253+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
19254 ret, "w", "w", "ir", 2);
19255 return ret;
19256 case 16:
19257- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19258+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19259 ret, "q", "", "er", 16);
19260 if (unlikely(ret))
19261 return ret;
19262 asm("":::"memory");
19263- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
19264+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
19265 ret, "q", "", "er", 8);
19266 return ret;
19267 default:
19268- return copy_user_generic((__force void *)dst, src, size);
19269+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19270 }
19271 }
19272
19273 static __always_inline __must_check
19274-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19275+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
19276 {
19277- int ret = 0;
19278+ unsigned ret = 0;
19279
19280 might_fault();
19281+
19282+ if (size > INT_MAX)
19283+ return size;
19284+
19285+#ifdef CONFIG_PAX_MEMORY_UDEREF
19286+ if (!__access_ok(VERIFY_READ, src, size))
19287+ return size;
19288+ if (!__access_ok(VERIFY_WRITE, dst, size))
19289+ return size;
19290+#endif
19291+
19292 if (!__builtin_constant_p(size))
19293- return copy_user_generic((__force void *)dst,
19294- (__force void *)src, size);
19295+ return copy_user_generic((__force_kernel void *)____m(dst),
19296+ (__force_kernel const void *)____m(src), size);
19297 switch (size) {
19298 case 1: {
19299 u8 tmp;
19300- __get_user_asm(tmp, (u8 __user *)src,
19301+ __get_user_asm(tmp, (const u8 __user *)src,
19302 ret, "b", "b", "=q", 1);
19303 if (likely(!ret))
19304 __put_user_asm(tmp, (u8 __user *)dst,
19305@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19306 }
19307 case 2: {
19308 u16 tmp;
19309- __get_user_asm(tmp, (u16 __user *)src,
19310+ __get_user_asm(tmp, (const u16 __user *)src,
19311 ret, "w", "w", "=r", 2);
19312 if (likely(!ret))
19313 __put_user_asm(tmp, (u16 __user *)dst,
19314@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19315
19316 case 4: {
19317 u32 tmp;
19318- __get_user_asm(tmp, (u32 __user *)src,
19319+ __get_user_asm(tmp, (const u32 __user *)src,
19320 ret, "l", "k", "=r", 4);
19321 if (likely(!ret))
19322 __put_user_asm(tmp, (u32 __user *)dst,
19323@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19324 }
19325 case 8: {
19326 u64 tmp;
19327- __get_user_asm(tmp, (u64 __user *)src,
19328+ __get_user_asm(tmp, (const u64 __user *)src,
19329 ret, "q", "", "=r", 8);
19330 if (likely(!ret))
19331 __put_user_asm(tmp, (u64 __user *)dst,
19332@@ -212,41 +277,74 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19333 return ret;
19334 }
19335 default:
19336- return copy_user_generic((__force void *)dst,
19337- (__force void *)src, size);
19338+ return copy_user_generic((__force_kernel void *)____m(dst),
19339+ (__force_kernel const void *)____m(src), size);
19340 }
19341 }
19342
19343-static __must_check __always_inline int
19344-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
19345+static __must_check __always_inline unsigned long
19346+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
19347 {
19348- return copy_user_generic(dst, (__force const void *)src, size);
19349+ if (size > INT_MAX)
19350+ return size;
19351+
19352+#ifdef CONFIG_PAX_MEMORY_UDEREF
19353+ if (!__access_ok(VERIFY_READ, src, size))
19354+ return size;
19355+#endif
19356+
19357+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19358 }
19359
19360-static __must_check __always_inline int
19361-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
19362+static __must_check __always_inline unsigned long
19363+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
19364 {
19365- return copy_user_generic((__force void *)dst, src, size);
19366+ if (size > INT_MAX)
19367+ return size;
19368+
19369+#ifdef CONFIG_PAX_MEMORY_UDEREF
19370+ if (!__access_ok(VERIFY_WRITE, dst, size))
19371+ return size;
19372+#endif
19373+
19374+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19375 }
19376
19377-extern long __copy_user_nocache(void *dst, const void __user *src,
19378- unsigned size, int zerorest);
19379+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
19380+ unsigned long size, int zerorest);
19381
19382-static inline int
19383-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
19384+static inline unsigned long
19385+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
19386 {
19387 might_fault();
19388+
19389+ if (size > INT_MAX)
19390+ return size;
19391+
19392+#ifdef CONFIG_PAX_MEMORY_UDEREF
19393+ if (!__access_ok(VERIFY_READ, src, size))
19394+ return size;
19395+#endif
19396+
19397 return __copy_user_nocache(dst, src, size, 1);
19398 }
19399
19400-static inline int
19401+static inline unsigned long
19402 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
19403- unsigned size)
19404+ unsigned long size)
19405 {
19406+ if (size > INT_MAX)
19407+ return size;
19408+
19409+#ifdef CONFIG_PAX_MEMORY_UDEREF
19410+ if (!__access_ok(VERIFY_READ, src, size))
19411+ return size;
19412+#endif
19413+
19414 return __copy_user_nocache(dst, src, size, 0);
19415 }
19416
19417 unsigned long
19418-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
19419+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
19420
19421 #endif /* _ASM_X86_UACCESS_64_H */
19422diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
19423index 5b238981..77fdd78 100644
19424--- a/arch/x86/include/asm/word-at-a-time.h
19425+++ b/arch/x86/include/asm/word-at-a-time.h
19426@@ -11,7 +11,7 @@
19427 * and shift, for example.
19428 */
19429 struct word_at_a_time {
19430- const unsigned long one_bits, high_bits;
19431+ unsigned long one_bits, high_bits;
19432 };
19433
19434 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
19435diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
19436index 828a156..650e625 100644
19437--- a/arch/x86/include/asm/x86_init.h
19438+++ b/arch/x86/include/asm/x86_init.h
19439@@ -129,7 +129,7 @@ struct x86_init_ops {
19440 struct x86_init_timers timers;
19441 struct x86_init_iommu iommu;
19442 struct x86_init_pci pci;
19443-};
19444+} __no_const;
19445
19446 /**
19447 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
19448@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
19449 void (*setup_percpu_clockev)(void);
19450 void (*early_percpu_clock_init)(void);
19451 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
19452-};
19453+} __no_const;
19454
19455 struct timespec;
19456
19457@@ -168,7 +168,7 @@ struct x86_platform_ops {
19458 void (*save_sched_clock_state)(void);
19459 void (*restore_sched_clock_state)(void);
19460 void (*apic_post_init)(void);
19461-};
19462+} __no_const;
19463
19464 struct pci_dev;
19465 struct msi_msg;
19466@@ -182,7 +182,7 @@ struct x86_msi_ops {
19467 void (*teardown_msi_irqs)(struct pci_dev *dev);
19468 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
19469 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
19470-};
19471+} __no_const;
19472
19473 struct IO_APIC_route_entry;
19474 struct io_apic_irq_attr;
19475@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
19476 unsigned int destination, int vector,
19477 struct io_apic_irq_attr *attr);
19478 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
19479-};
19480+} __no_const;
19481
19482 extern struct x86_init_ops x86_init;
19483 extern struct x86_cpuinit_ops x86_cpuinit;
19484diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
19485index b913915..4f5a581 100644
19486--- a/arch/x86/include/asm/xen/page.h
19487+++ b/arch/x86/include/asm/xen/page.h
19488@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
19489 extern struct page *m2p_find_override(unsigned long mfn);
19490 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
19491
19492-static inline unsigned long pfn_to_mfn(unsigned long pfn)
19493+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
19494 {
19495 unsigned long mfn;
19496
19497diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
19498index 0415cda..3b22adc 100644
19499--- a/arch/x86/include/asm/xsave.h
19500+++ b/arch/x86/include/asm/xsave.h
19501@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19502 if (unlikely(err))
19503 return -EFAULT;
19504
19505+ pax_open_userland();
19506 __asm__ __volatile__(ASM_STAC "\n"
19507- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
19508+ "1:"
19509+ __copyuser_seg
19510+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
19511 "2: " ASM_CLAC "\n"
19512 ".section .fixup,\"ax\"\n"
19513 "3: movl $-1,%[err]\n"
19514@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19515 : [err] "=r" (err)
19516 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
19517 : "memory");
19518+ pax_close_userland();
19519 return err;
19520 }
19521
19522 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19523 {
19524 int err;
19525- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
19526+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
19527 u32 lmask = mask;
19528 u32 hmask = mask >> 32;
19529
19530+ pax_open_userland();
19531 __asm__ __volatile__(ASM_STAC "\n"
19532- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19533+ "1:"
19534+ __copyuser_seg
19535+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19536 "2: " ASM_CLAC "\n"
19537 ".section .fixup,\"ax\"\n"
19538 "3: movl $-1,%[err]\n"
19539@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19540 : [err] "=r" (err)
19541 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
19542 : "memory"); /* memory required? */
19543+ pax_close_userland();
19544 return err;
19545 }
19546
19547diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
19548index bbae024..e1528f9 100644
19549--- a/arch/x86/include/uapi/asm/e820.h
19550+++ b/arch/x86/include/uapi/asm/e820.h
19551@@ -63,7 +63,7 @@ struct e820map {
19552 #define ISA_START_ADDRESS 0xa0000
19553 #define ISA_END_ADDRESS 0x100000
19554
19555-#define BIOS_BEGIN 0x000a0000
19556+#define BIOS_BEGIN 0x000c0000
19557 #define BIOS_END 0x00100000
19558
19559 #define BIOS_ROM_BASE 0xffe00000
19560diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
19561index 7b0a55a..ad115bf 100644
19562--- a/arch/x86/include/uapi/asm/ptrace-abi.h
19563+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
19564@@ -49,7 +49,6 @@
19565 #define EFLAGS 144
19566 #define RSP 152
19567 #define SS 160
19568-#define ARGOFFSET R11
19569 #endif /* __ASSEMBLY__ */
19570
19571 /* top of stack page */
19572diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
19573index a5408b9..5133813 100644
19574--- a/arch/x86/kernel/Makefile
19575+++ b/arch/x86/kernel/Makefile
19576@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
19577 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
19578 obj-$(CONFIG_IRQ_WORK) += irq_work.o
19579 obj-y += probe_roms.o
19580-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
19581+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
19582 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
19583 obj-y += syscall_$(BITS).o
19584 obj-$(CONFIG_X86_64) += vsyscall_64.o
19585diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
19586index 40c7660..f709f4b 100644
19587--- a/arch/x86/kernel/acpi/boot.c
19588+++ b/arch/x86/kernel/acpi/boot.c
19589@@ -1365,7 +1365,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
19590 * If your system is blacklisted here, but you find that acpi=force
19591 * works for you, please contact linux-acpi@vger.kernel.org
19592 */
19593-static struct dmi_system_id __initdata acpi_dmi_table[] = {
19594+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
19595 /*
19596 * Boxes that need ACPI disabled
19597 */
19598@@ -1440,7 +1440,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
19599 };
19600
19601 /* second table for DMI checks that should run after early-quirks */
19602-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
19603+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
19604 /*
19605 * HP laptops which use a DSDT reporting as HP/SB400/10000,
19606 * which includes some code which overrides all temperature
19607diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
19608index 3312010..a65ca7b 100644
19609--- a/arch/x86/kernel/acpi/sleep.c
19610+++ b/arch/x86/kernel/acpi/sleep.c
19611@@ -88,8 +88,12 @@ int x86_acpi_suspend_lowlevel(void)
19612 #else /* CONFIG_64BIT */
19613 #ifdef CONFIG_SMP
19614 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
19615+
19616+ pax_open_kernel();
19617 early_gdt_descr.address =
19618 (unsigned long)get_cpu_gdt_table(smp_processor_id());
19619+ pax_close_kernel();
19620+
19621 initial_gs = per_cpu_offset(smp_processor_id());
19622 #endif
19623 initial_code = (unsigned long)wakeup_long64;
19624diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
19625index d1daa66..59fecba 100644
19626--- a/arch/x86/kernel/acpi/wakeup_32.S
19627+++ b/arch/x86/kernel/acpi/wakeup_32.S
19628@@ -29,13 +29,11 @@ wakeup_pmode_return:
19629 # and restore the stack ... but you need gdt for this to work
19630 movl saved_context_esp, %esp
19631
19632- movl %cs:saved_magic, %eax
19633- cmpl $0x12345678, %eax
19634+ cmpl $0x12345678, saved_magic
19635 jne bogus_magic
19636
19637 # jump to place where we left off
19638- movl saved_eip, %eax
19639- jmp *%eax
19640+ jmp *(saved_eip)
19641
19642 bogus_magic:
19643 jmp bogus_magic
19644diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
19645index 15e8563..323cbe1 100644
19646--- a/arch/x86/kernel/alternative.c
19647+++ b/arch/x86/kernel/alternative.c
19648@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
19649 */
19650 for (a = start; a < end; a++) {
19651 instr = (u8 *)&a->instr_offset + a->instr_offset;
19652+
19653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19654+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19655+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
19656+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19657+#endif
19658+
19659 replacement = (u8 *)&a->repl_offset + a->repl_offset;
19660 BUG_ON(a->replacementlen > a->instrlen);
19661 BUG_ON(a->instrlen > sizeof(insnbuf));
19662@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
19663 for (poff = start; poff < end; poff++) {
19664 u8 *ptr = (u8 *)poff + *poff;
19665
19666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19667+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19668+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19669+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19670+#endif
19671+
19672 if (!*poff || ptr < text || ptr >= text_end)
19673 continue;
19674 /* turn DS segment override prefix into lock prefix */
19675- if (*ptr == 0x3e)
19676+ if (*ktla_ktva(ptr) == 0x3e)
19677 text_poke(ptr, ((unsigned char []){0xf0}), 1);
19678 }
19679 mutex_unlock(&text_mutex);
19680@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
19681 for (poff = start; poff < end; poff++) {
19682 u8 *ptr = (u8 *)poff + *poff;
19683
19684+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19685+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19686+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19687+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19688+#endif
19689+
19690 if (!*poff || ptr < text || ptr >= text_end)
19691 continue;
19692 /* turn lock prefix into DS segment override prefix */
19693- if (*ptr == 0xf0)
19694+ if (*ktla_ktva(ptr) == 0xf0)
19695 text_poke(ptr, ((unsigned char []){0x3E}), 1);
19696 }
19697 mutex_unlock(&text_mutex);
19698@@ -469,7 +488,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
19699
19700 BUG_ON(p->len > MAX_PATCH_LEN);
19701 /* prep the buffer with the original instructions */
19702- memcpy(insnbuf, p->instr, p->len);
19703+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
19704 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
19705 (unsigned long)p->instr, p->len);
19706
19707@@ -516,7 +535,7 @@ void __init alternative_instructions(void)
19708 if (!uniproc_patched || num_possible_cpus() == 1)
19709 free_init_pages("SMP alternatives",
19710 (unsigned long)__smp_locks,
19711- (unsigned long)__smp_locks_end);
19712+ PAGE_ALIGN((unsigned long)__smp_locks_end));
19713 #endif
19714
19715 apply_paravirt(__parainstructions, __parainstructions_end);
19716@@ -536,13 +555,17 @@ void __init alternative_instructions(void)
19717 * instructions. And on the local CPU you need to be protected again NMI or MCE
19718 * handlers seeing an inconsistent instruction while you patch.
19719 */
19720-void *__init_or_module text_poke_early(void *addr, const void *opcode,
19721+void *__kprobes text_poke_early(void *addr, const void *opcode,
19722 size_t len)
19723 {
19724 unsigned long flags;
19725 local_irq_save(flags);
19726- memcpy(addr, opcode, len);
19727+
19728+ pax_open_kernel();
19729+ memcpy(ktla_ktva(addr), opcode, len);
19730 sync_core();
19731+ pax_close_kernel();
19732+
19733 local_irq_restore(flags);
19734 /* Could also do a CLFLUSH here to speed up CPU recovery; but
19735 that causes hangs on some VIA CPUs. */
19736@@ -564,36 +587,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
19737 */
19738 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
19739 {
19740- unsigned long flags;
19741- char *vaddr;
19742+ unsigned char *vaddr = ktla_ktva(addr);
19743 struct page *pages[2];
19744- int i;
19745+ size_t i;
19746
19747 if (!core_kernel_text((unsigned long)addr)) {
19748- pages[0] = vmalloc_to_page(addr);
19749- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
19750+ pages[0] = vmalloc_to_page(vaddr);
19751+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
19752 } else {
19753- pages[0] = virt_to_page(addr);
19754+ pages[0] = virt_to_page(vaddr);
19755 WARN_ON(!PageReserved(pages[0]));
19756- pages[1] = virt_to_page(addr + PAGE_SIZE);
19757+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
19758 }
19759 BUG_ON(!pages[0]);
19760- local_irq_save(flags);
19761- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
19762- if (pages[1])
19763- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
19764- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
19765- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
19766- clear_fixmap(FIX_TEXT_POKE0);
19767- if (pages[1])
19768- clear_fixmap(FIX_TEXT_POKE1);
19769- local_flush_tlb();
19770- sync_core();
19771- /* Could also do a CLFLUSH here to speed up CPU recovery; but
19772- that causes hangs on some VIA CPUs. */
19773+ text_poke_early(addr, opcode, len);
19774 for (i = 0; i < len; i++)
19775- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
19776- local_irq_restore(flags);
19777+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
19778 return addr;
19779 }
19780
19781@@ -613,7 +622,7 @@ int poke_int3_handler(struct pt_regs *regs)
19782 if (likely(!bp_patching_in_progress))
19783 return 0;
19784
19785- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
19786+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
19787 return 0;
19788
19789 /* set up the specified breakpoint handler */
19790@@ -647,7 +656,7 @@ int poke_int3_handler(struct pt_regs *regs)
19791 */
19792 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
19793 {
19794- unsigned char int3 = 0xcc;
19795+ const unsigned char int3 = 0xcc;
19796
19797 bp_int3_handler = handler;
19798 bp_int3_addr = (u8 *)addr + sizeof(int3);
19799diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
19800index a7eb82d..f6e52d4 100644
19801--- a/arch/x86/kernel/apic/apic.c
19802+++ b/arch/x86/kernel/apic/apic.c
19803@@ -190,7 +190,7 @@ int first_system_vector = 0xfe;
19804 /*
19805 * Debug level, exported for io_apic.c
19806 */
19807-unsigned int apic_verbosity;
19808+int apic_verbosity;
19809
19810 int pic_mode;
19811
19812@@ -1985,7 +1985,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
19813 apic_write(APIC_ESR, 0);
19814 v1 = apic_read(APIC_ESR);
19815 ack_APIC_irq();
19816- atomic_inc(&irq_err_count);
19817+ atomic_inc_unchecked(&irq_err_count);
19818
19819 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
19820 smp_processor_id(), v0 , v1);
19821diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
19822index 00c77cf..2dc6a2d 100644
19823--- a/arch/x86/kernel/apic/apic_flat_64.c
19824+++ b/arch/x86/kernel/apic/apic_flat_64.c
19825@@ -157,7 +157,7 @@ static int flat_probe(void)
19826 return 1;
19827 }
19828
19829-static struct apic apic_flat = {
19830+static struct apic apic_flat __read_only = {
19831 .name = "flat",
19832 .probe = flat_probe,
19833 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19834@@ -271,7 +271,7 @@ static int physflat_probe(void)
19835 return 0;
19836 }
19837
19838-static struct apic apic_physflat = {
19839+static struct apic apic_physflat __read_only = {
19840
19841 .name = "physical flat",
19842 .probe = physflat_probe,
19843diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19844index e145f28..2752888 100644
19845--- a/arch/x86/kernel/apic/apic_noop.c
19846+++ b/arch/x86/kernel/apic/apic_noop.c
19847@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19848 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19849 }
19850
19851-struct apic apic_noop = {
19852+struct apic apic_noop __read_only = {
19853 .name = "noop",
19854 .probe = noop_probe,
19855 .acpi_madt_oem_check = NULL,
19856diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19857index d50e364..543bee3 100644
19858--- a/arch/x86/kernel/apic/bigsmp_32.c
19859+++ b/arch/x86/kernel/apic/bigsmp_32.c
19860@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
19861 return dmi_bigsmp;
19862 }
19863
19864-static struct apic apic_bigsmp = {
19865+static struct apic apic_bigsmp __read_only = {
19866
19867 .name = "bigsmp",
19868 .probe = probe_bigsmp,
19869diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
19870index c552247..587a316 100644
19871--- a/arch/x86/kernel/apic/es7000_32.c
19872+++ b/arch/x86/kernel/apic/es7000_32.c
19873@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
19874 return ret && es7000_apic_is_cluster();
19875 }
19876
19877-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
19878-static struct apic __refdata apic_es7000_cluster = {
19879+static struct apic apic_es7000_cluster __read_only = {
19880
19881 .name = "es7000",
19882 .probe = probe_es7000,
19883@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
19884 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
19885 };
19886
19887-static struct apic __refdata apic_es7000 = {
19888+static struct apic apic_es7000 __read_only = {
19889
19890 .name = "es7000",
19891 .probe = probe_es7000,
19892diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
19893index e63a5bd..c0babf8 100644
19894--- a/arch/x86/kernel/apic/io_apic.c
19895+++ b/arch/x86/kernel/apic/io_apic.c
19896@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
19897 }
19898 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
19899
19900-void lock_vector_lock(void)
19901+void lock_vector_lock(void) __acquires(vector_lock)
19902 {
19903 /* Used to the online set of cpus does not change
19904 * during assign_irq_vector.
19905@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
19906 raw_spin_lock(&vector_lock);
19907 }
19908
19909-void unlock_vector_lock(void)
19910+void unlock_vector_lock(void) __releases(vector_lock)
19911 {
19912 raw_spin_unlock(&vector_lock);
19913 }
19914@@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_data *data)
19915 ack_APIC_irq();
19916 }
19917
19918-atomic_t irq_mis_count;
19919+atomic_unchecked_t irq_mis_count;
19920
19921 #ifdef CONFIG_GENERIC_PENDING_IRQ
19922 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
19923@@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_data *data)
19924 * at the cpu.
19925 */
19926 if (!(v & (1 << (i & 0x1f)))) {
19927- atomic_inc(&irq_mis_count);
19928+ atomic_inc_unchecked(&irq_mis_count);
19929
19930 eoi_ioapic_irq(irq, cfg);
19931 }
19932diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
19933index 1e42e8f..daacf44 100644
19934--- a/arch/x86/kernel/apic/numaq_32.c
19935+++ b/arch/x86/kernel/apic/numaq_32.c
19936@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
19937 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
19938 }
19939
19940-/* Use __refdata to keep false positive warning calm. */
19941-static struct apic __refdata apic_numaq = {
19942+static struct apic apic_numaq __read_only = {
19943
19944 .name = "NUMAQ",
19945 .probe = probe_numaq,
19946diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
19947index eb35ef9..f184a21 100644
19948--- a/arch/x86/kernel/apic/probe_32.c
19949+++ b/arch/x86/kernel/apic/probe_32.c
19950@@ -72,7 +72,7 @@ static int probe_default(void)
19951 return 1;
19952 }
19953
19954-static struct apic apic_default = {
19955+static struct apic apic_default __read_only = {
19956
19957 .name = "default",
19958 .probe = probe_default,
19959diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
19960index 77c95c0..434f8a4 100644
19961--- a/arch/x86/kernel/apic/summit_32.c
19962+++ b/arch/x86/kernel/apic/summit_32.c
19963@@ -486,7 +486,7 @@ void setup_summit(void)
19964 }
19965 #endif
19966
19967-static struct apic apic_summit = {
19968+static struct apic apic_summit __read_only = {
19969
19970 .name = "summit",
19971 .probe = probe_summit,
19972diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
19973index 140e29d..d88bc95 100644
19974--- a/arch/x86/kernel/apic/x2apic_cluster.c
19975+++ b/arch/x86/kernel/apic/x2apic_cluster.c
19976@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
19977 return notifier_from_errno(err);
19978 }
19979
19980-static struct notifier_block __refdata x2apic_cpu_notifier = {
19981+static struct notifier_block x2apic_cpu_notifier = {
19982 .notifier_call = update_clusterinfo,
19983 };
19984
19985@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
19986 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
19987 }
19988
19989-static struct apic apic_x2apic_cluster = {
19990+static struct apic apic_x2apic_cluster __read_only = {
19991
19992 .name = "cluster x2apic",
19993 .probe = x2apic_cluster_probe,
19994diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
19995index 562a76d..a003c0f 100644
19996--- a/arch/x86/kernel/apic/x2apic_phys.c
19997+++ b/arch/x86/kernel/apic/x2apic_phys.c
19998@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
19999 return apic == &apic_x2apic_phys;
20000 }
20001
20002-static struct apic apic_x2apic_phys = {
20003+static struct apic apic_x2apic_phys __read_only = {
20004
20005 .name = "physical x2apic",
20006 .probe = x2apic_phys_probe,
20007diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20008index a419814..1dd34a0 100644
20009--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20010+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20011@@ -357,7 +357,7 @@ static int uv_probe(void)
20012 return apic == &apic_x2apic_uv_x;
20013 }
20014
20015-static struct apic __refdata apic_x2apic_uv_x = {
20016+static struct apic apic_x2apic_uv_x __read_only = {
20017
20018 .name = "UV large system",
20019 .probe = uv_probe,
20020diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20021index 3ab0343..814c4787 100644
20022--- a/arch/x86/kernel/apm_32.c
20023+++ b/arch/x86/kernel/apm_32.c
20024@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
20025 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20026 * even though they are called in protected mode.
20027 */
20028-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20029+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20030 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20031
20032 static const char driver_version[] = "1.16ac"; /* no spaces */
20033@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
20034 BUG_ON(cpu != 0);
20035 gdt = get_cpu_gdt_table(cpu);
20036 save_desc_40 = gdt[0x40 / 8];
20037+
20038+ pax_open_kernel();
20039 gdt[0x40 / 8] = bad_bios_desc;
20040+ pax_close_kernel();
20041
20042 apm_irq_save(flags);
20043 APM_DO_SAVE_SEGS;
20044@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
20045 &call->esi);
20046 APM_DO_RESTORE_SEGS;
20047 apm_irq_restore(flags);
20048+
20049+ pax_open_kernel();
20050 gdt[0x40 / 8] = save_desc_40;
20051+ pax_close_kernel();
20052+
20053 put_cpu();
20054
20055 return call->eax & 0xff;
20056@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
20057 BUG_ON(cpu != 0);
20058 gdt = get_cpu_gdt_table(cpu);
20059 save_desc_40 = gdt[0x40 / 8];
20060+
20061+ pax_open_kernel();
20062 gdt[0x40 / 8] = bad_bios_desc;
20063+ pax_close_kernel();
20064
20065 apm_irq_save(flags);
20066 APM_DO_SAVE_SEGS;
20067@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
20068 &call->eax);
20069 APM_DO_RESTORE_SEGS;
20070 apm_irq_restore(flags);
20071+
20072+ pax_open_kernel();
20073 gdt[0x40 / 8] = save_desc_40;
20074+ pax_close_kernel();
20075+
20076 put_cpu();
20077 return error;
20078 }
20079@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
20080 * code to that CPU.
20081 */
20082 gdt = get_cpu_gdt_table(0);
20083+
20084+ pax_open_kernel();
20085 set_desc_base(&gdt[APM_CS >> 3],
20086 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20087 set_desc_base(&gdt[APM_CS_16 >> 3],
20088 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20089 set_desc_base(&gdt[APM_DS >> 3],
20090 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20091+ pax_close_kernel();
20092
20093 proc_create("apm", 0, NULL, &apm_file_ops);
20094
20095diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20096index 2861082..6d4718e 100644
20097--- a/arch/x86/kernel/asm-offsets.c
20098+++ b/arch/x86/kernel/asm-offsets.c
20099@@ -33,6 +33,8 @@ void common(void) {
20100 OFFSET(TI_status, thread_info, status);
20101 OFFSET(TI_addr_limit, thread_info, addr_limit);
20102 OFFSET(TI_preempt_count, thread_info, preempt_count);
20103+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20104+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20105
20106 BLANK();
20107 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20108@@ -53,8 +55,26 @@ void common(void) {
20109 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20110 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20111 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20112+
20113+#ifdef CONFIG_PAX_KERNEXEC
20114+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
20115 #endif
20116
20117+#ifdef CONFIG_PAX_MEMORY_UDEREF
20118+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
20119+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
20120+#ifdef CONFIG_X86_64
20121+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
20122+#endif
20123+#endif
20124+
20125+#endif
20126+
20127+ BLANK();
20128+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
20129+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
20130+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
20131+
20132 #ifdef CONFIG_XEN
20133 BLANK();
20134 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
20135diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
20136index e7c798b..2b2019b 100644
20137--- a/arch/x86/kernel/asm-offsets_64.c
20138+++ b/arch/x86/kernel/asm-offsets_64.c
20139@@ -77,6 +77,7 @@ int main(void)
20140 BLANK();
20141 #undef ENTRY
20142
20143+ DEFINE(TSS_size, sizeof(struct tss_struct));
20144 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
20145 BLANK();
20146
20147diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
20148index 47b56a7..efc2bc6 100644
20149--- a/arch/x86/kernel/cpu/Makefile
20150+++ b/arch/x86/kernel/cpu/Makefile
20151@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
20152 CFLAGS_REMOVE_perf_event.o = -pg
20153 endif
20154
20155-# Make sure load_percpu_segment has no stackprotector
20156-nostackp := $(call cc-option, -fno-stack-protector)
20157-CFLAGS_common.o := $(nostackp)
20158-
20159 obj-y := intel_cacheinfo.o scattered.o topology.o
20160 obj-y += proc.o capflags.o powerflags.o common.o
20161 obj-y += rdrand.o
20162diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
20163index 903a264..fc955f3 100644
20164--- a/arch/x86/kernel/cpu/amd.c
20165+++ b/arch/x86/kernel/cpu/amd.c
20166@@ -743,7 +743,7 @@ static void init_amd(struct cpuinfo_x86 *c)
20167 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
20168 {
20169 /* AMD errata T13 (order #21922) */
20170- if ((c->x86 == 6)) {
20171+ if (c->x86 == 6) {
20172 /* Duron Rev A0 */
20173 if (c->x86_model == 3 && c->x86_mask == 0)
20174 size = 64;
20175diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
20176index 2793d1f..b4f313a 100644
20177--- a/arch/x86/kernel/cpu/common.c
20178+++ b/arch/x86/kernel/cpu/common.c
20179@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
20180
20181 static const struct cpu_dev *this_cpu = &default_cpu;
20182
20183-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
20184-#ifdef CONFIG_X86_64
20185- /*
20186- * We need valid kernel segments for data and code in long mode too
20187- * IRET will check the segment types kkeil 2000/10/28
20188- * Also sysret mandates a special GDT layout
20189- *
20190- * TLS descriptors are currently at a different place compared to i386.
20191- * Hopefully nobody expects them at a fixed place (Wine?)
20192- */
20193- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
20194- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
20195- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
20196- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
20197- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
20198- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
20199-#else
20200- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
20201- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20202- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
20203- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
20204- /*
20205- * Segments used for calling PnP BIOS have byte granularity.
20206- * They code segments and data segments have fixed 64k limits,
20207- * the transfer segment sizes are set at run time.
20208- */
20209- /* 32-bit code */
20210- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20211- /* 16-bit code */
20212- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20213- /* 16-bit data */
20214- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
20215- /* 16-bit data */
20216- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
20217- /* 16-bit data */
20218- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
20219- /*
20220- * The APM segments have byte granularity and their bases
20221- * are set at run time. All have 64k limits.
20222- */
20223- /* 32-bit code */
20224- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20225- /* 16-bit code */
20226- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20227- /* data */
20228- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
20229-
20230- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20231- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20232- GDT_STACK_CANARY_INIT
20233-#endif
20234-} };
20235-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
20236-
20237 static int __init x86_xsave_setup(char *s)
20238 {
20239 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
20240@@ -288,6 +234,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
20241 set_in_cr4(X86_CR4_SMAP);
20242 }
20243
20244+#ifdef CONFIG_X86_64
20245+static __init int setup_disable_pcid(char *arg)
20246+{
20247+ setup_clear_cpu_cap(X86_FEATURE_PCID);
20248+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
20249+
20250+#ifdef CONFIG_PAX_MEMORY_UDEREF
20251+ if (clone_pgd_mask != ~(pgdval_t)0UL)
20252+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20253+#endif
20254+
20255+ return 1;
20256+}
20257+__setup("nopcid", setup_disable_pcid);
20258+
20259+static void setup_pcid(struct cpuinfo_x86 *c)
20260+{
20261+ if (!cpu_has(c, X86_FEATURE_PCID)) {
20262+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
20263+
20264+#ifdef CONFIG_PAX_MEMORY_UDEREF
20265+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
20266+ pax_open_kernel();
20267+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20268+ pax_close_kernel();
20269+ printk("PAX: slow and weak UDEREF enabled\n");
20270+ } else
20271+ printk("PAX: UDEREF disabled\n");
20272+#endif
20273+
20274+ return;
20275+ }
20276+
20277+ printk("PAX: PCID detected\n");
20278+ set_in_cr4(X86_CR4_PCIDE);
20279+
20280+#ifdef CONFIG_PAX_MEMORY_UDEREF
20281+ pax_open_kernel();
20282+ clone_pgd_mask = ~(pgdval_t)0UL;
20283+ pax_close_kernel();
20284+ if (pax_user_shadow_base)
20285+ printk("PAX: weak UDEREF enabled\n");
20286+ else {
20287+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
20288+ printk("PAX: strong UDEREF enabled\n");
20289+ }
20290+#endif
20291+
20292+ if (cpu_has(c, X86_FEATURE_INVPCID))
20293+ printk("PAX: INVPCID detected\n");
20294+}
20295+#endif
20296+
20297 /*
20298 * Some CPU features depend on higher CPUID levels, which may not always
20299 * be available due to CPUID level capping or broken virtualization
20300@@ -386,7 +385,7 @@ void switch_to_new_gdt(int cpu)
20301 {
20302 struct desc_ptr gdt_descr;
20303
20304- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
20305+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20306 gdt_descr.size = GDT_SIZE - 1;
20307 load_gdt(&gdt_descr);
20308 /* Reload the per-cpu base */
20309@@ -875,6 +874,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20310 setup_smep(c);
20311 setup_smap(c);
20312
20313+#ifdef CONFIG_X86_64
20314+ setup_pcid(c);
20315+#endif
20316+
20317 /*
20318 * The vendor-specific functions might have changed features.
20319 * Now we do "generic changes."
20320@@ -883,6 +886,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20321 /* Filter out anything that depends on CPUID levels we don't have */
20322 filter_cpuid_features(c, true);
20323
20324+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20325+ setup_clear_cpu_cap(X86_FEATURE_SEP);
20326+#endif
20327+
20328 /* If the model name is still unset, do table lookup. */
20329 if (!c->x86_model_id[0]) {
20330 const char *p;
20331@@ -1070,10 +1077,12 @@ static __init int setup_disablecpuid(char *arg)
20332 }
20333 __setup("clearcpuid=", setup_disablecpuid);
20334
20335+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
20336+EXPORT_PER_CPU_SYMBOL(current_tinfo);
20337+
20338 #ifdef CONFIG_X86_64
20339-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20340-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
20341- (unsigned long) debug_idt_table };
20342+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20343+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
20344
20345 DEFINE_PER_CPU_FIRST(union irq_stack_union,
20346 irq_stack_union) __aligned(PAGE_SIZE) __visible;
20347@@ -1087,7 +1096,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
20348 EXPORT_PER_CPU_SYMBOL(current_task);
20349
20350 DEFINE_PER_CPU(unsigned long, kernel_stack) =
20351- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
20352+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
20353 EXPORT_PER_CPU_SYMBOL(kernel_stack);
20354
20355 DEFINE_PER_CPU(char *, irq_stack_ptr) =
20356@@ -1232,7 +1241,7 @@ void cpu_init(void)
20357 load_ucode_ap();
20358
20359 cpu = stack_smp_processor_id();
20360- t = &per_cpu(init_tss, cpu);
20361+ t = init_tss + cpu;
20362 oist = &per_cpu(orig_ist, cpu);
20363
20364 #ifdef CONFIG_NUMA
20365@@ -1267,7 +1276,6 @@ void cpu_init(void)
20366 wrmsrl(MSR_KERNEL_GS_BASE, 0);
20367 barrier();
20368
20369- x86_configure_nx();
20370 enable_x2apic();
20371
20372 /*
20373@@ -1319,7 +1327,7 @@ void cpu_init(void)
20374 {
20375 int cpu = smp_processor_id();
20376 struct task_struct *curr = current;
20377- struct tss_struct *t = &per_cpu(init_tss, cpu);
20378+ struct tss_struct *t = init_tss + cpu;
20379 struct thread_struct *thread = &curr->thread;
20380
20381 show_ucode_info_early();
20382diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
20383index 1414c90..1159406 100644
20384--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
20385+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
20386@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
20387 };
20388
20389 #ifdef CONFIG_AMD_NB
20390+static struct attribute *default_attrs_amd_nb[] = {
20391+ &type.attr,
20392+ &level.attr,
20393+ &coherency_line_size.attr,
20394+ &physical_line_partition.attr,
20395+ &ways_of_associativity.attr,
20396+ &number_of_sets.attr,
20397+ &size.attr,
20398+ &shared_cpu_map.attr,
20399+ &shared_cpu_list.attr,
20400+ NULL,
20401+ NULL,
20402+ NULL,
20403+ NULL
20404+};
20405+
20406 static struct attribute **amd_l3_attrs(void)
20407 {
20408 static struct attribute **attrs;
20409@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
20410
20411 n = ARRAY_SIZE(default_attrs);
20412
20413- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
20414- n += 2;
20415-
20416- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
20417- n += 1;
20418-
20419- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
20420- if (attrs == NULL)
20421- return attrs = default_attrs;
20422-
20423- for (n = 0; default_attrs[n]; n++)
20424- attrs[n] = default_attrs[n];
20425+ attrs = default_attrs_amd_nb;
20426
20427 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
20428 attrs[n++] = &cache_disable_0.attr;
20429@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
20430 .default_attrs = default_attrs,
20431 };
20432
20433+#ifdef CONFIG_AMD_NB
20434+static struct kobj_type ktype_cache_amd_nb = {
20435+ .sysfs_ops = &sysfs_ops,
20436+ .default_attrs = default_attrs_amd_nb,
20437+};
20438+#endif
20439+
20440 static struct kobj_type ktype_percpu_entry = {
20441 .sysfs_ops = &sysfs_ops,
20442 };
20443@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
20444 return retval;
20445 }
20446
20447+#ifdef CONFIG_AMD_NB
20448+ amd_l3_attrs();
20449+#endif
20450+
20451 for (i = 0; i < num_cache_leaves; i++) {
20452+ struct kobj_type *ktype;
20453+
20454 this_object = INDEX_KOBJECT_PTR(cpu, i);
20455 this_object->cpu = cpu;
20456 this_object->index = i;
20457
20458 this_leaf = CPUID4_INFO_IDX(cpu, i);
20459
20460- ktype_cache.default_attrs = default_attrs;
20461+ ktype = &ktype_cache;
20462 #ifdef CONFIG_AMD_NB
20463 if (this_leaf->base.nb)
20464- ktype_cache.default_attrs = amd_l3_attrs();
20465+ ktype = &ktype_cache_amd_nb;
20466 #endif
20467 retval = kobject_init_and_add(&(this_object->kobj),
20468- &ktype_cache,
20469+ ktype,
20470 per_cpu(ici_cache_kobject, cpu),
20471 "index%1lu", i);
20472 if (unlikely(retval)) {
20473diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
20474index b3218cd..99a75de 100644
20475--- a/arch/x86/kernel/cpu/mcheck/mce.c
20476+++ b/arch/x86/kernel/cpu/mcheck/mce.c
20477@@ -45,6 +45,7 @@
20478 #include <asm/processor.h>
20479 #include <asm/mce.h>
20480 #include <asm/msr.h>
20481+#include <asm/local.h>
20482
20483 #include "mce-internal.h"
20484
20485@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
20486 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
20487 m->cs, m->ip);
20488
20489- if (m->cs == __KERNEL_CS)
20490+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
20491 print_symbol("{%s}", m->ip);
20492 pr_cont("\n");
20493 }
20494@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
20495
20496 #define PANIC_TIMEOUT 5 /* 5 seconds */
20497
20498-static atomic_t mce_paniced;
20499+static atomic_unchecked_t mce_paniced;
20500
20501 static int fake_panic;
20502-static atomic_t mce_fake_paniced;
20503+static atomic_unchecked_t mce_fake_paniced;
20504
20505 /* Panic in progress. Enable interrupts and wait for final IPI */
20506 static void wait_for_panic(void)
20507@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20508 /*
20509 * Make sure only one CPU runs in machine check panic
20510 */
20511- if (atomic_inc_return(&mce_paniced) > 1)
20512+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
20513 wait_for_panic();
20514 barrier();
20515
20516@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20517 console_verbose();
20518 } else {
20519 /* Don't log too much for fake panic */
20520- if (atomic_inc_return(&mce_fake_paniced) > 1)
20521+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
20522 return;
20523 }
20524 /* First print corrected ones that are still unlogged */
20525@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20526 if (!fake_panic) {
20527 if (panic_timeout == 0)
20528 panic_timeout = mca_cfg.panic_timeout;
20529- panic(msg);
20530+ panic("%s", msg);
20531 } else
20532 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
20533 }
20534@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
20535 * might have been modified by someone else.
20536 */
20537 rmb();
20538- if (atomic_read(&mce_paniced))
20539+ if (atomic_read_unchecked(&mce_paniced))
20540 wait_for_panic();
20541 if (!mca_cfg.monarch_timeout)
20542 goto out;
20543@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
20544 }
20545
20546 /* Call the installed machine check handler for this CPU setup. */
20547-void (*machine_check_vector)(struct pt_regs *, long error_code) =
20548+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
20549 unexpected_machine_check;
20550
20551 /*
20552@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20553 return;
20554 }
20555
20556+ pax_open_kernel();
20557 machine_check_vector = do_machine_check;
20558+ pax_close_kernel();
20559
20560 __mcheck_cpu_init_generic();
20561 __mcheck_cpu_init_vendor(c);
20562@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20563 */
20564
20565 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
20566-static int mce_chrdev_open_count; /* #times opened */
20567+static local_t mce_chrdev_open_count; /* #times opened */
20568 static int mce_chrdev_open_exclu; /* already open exclusive? */
20569
20570 static int mce_chrdev_open(struct inode *inode, struct file *file)
20571@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20572 spin_lock(&mce_chrdev_state_lock);
20573
20574 if (mce_chrdev_open_exclu ||
20575- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
20576+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
20577 spin_unlock(&mce_chrdev_state_lock);
20578
20579 return -EBUSY;
20580@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20581
20582 if (file->f_flags & O_EXCL)
20583 mce_chrdev_open_exclu = 1;
20584- mce_chrdev_open_count++;
20585+ local_inc(&mce_chrdev_open_count);
20586
20587 spin_unlock(&mce_chrdev_state_lock);
20588
20589@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
20590 {
20591 spin_lock(&mce_chrdev_state_lock);
20592
20593- mce_chrdev_open_count--;
20594+ local_dec(&mce_chrdev_open_count);
20595 mce_chrdev_open_exclu = 0;
20596
20597 spin_unlock(&mce_chrdev_state_lock);
20598@@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
20599
20600 for (i = 0; i < mca_cfg.banks; i++) {
20601 struct mce_bank *b = &mce_banks[i];
20602- struct device_attribute *a = &b->attr;
20603+ device_attribute_no_const *a = &b->attr;
20604
20605 sysfs_attr_init(&a->attr);
20606 a->attr.name = b->attrname;
20607@@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
20608 static void mce_reset(void)
20609 {
20610 cpu_missing = 0;
20611- atomic_set(&mce_fake_paniced, 0);
20612+ atomic_set_unchecked(&mce_fake_paniced, 0);
20613 atomic_set(&mce_executing, 0);
20614 atomic_set(&mce_callin, 0);
20615 atomic_set(&global_nwo, 0);
20616diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
20617index 1c044b1..37a2a43 100644
20618--- a/arch/x86/kernel/cpu/mcheck/p5.c
20619+++ b/arch/x86/kernel/cpu/mcheck/p5.c
20620@@ -11,6 +11,7 @@
20621 #include <asm/processor.h>
20622 #include <asm/mce.h>
20623 #include <asm/msr.h>
20624+#include <asm/pgtable.h>
20625
20626 /* By default disabled */
20627 int mce_p5_enabled __read_mostly;
20628@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
20629 if (!cpu_has(c, X86_FEATURE_MCE))
20630 return;
20631
20632+ pax_open_kernel();
20633 machine_check_vector = pentium_machine_check;
20634+ pax_close_kernel();
20635 /* Make sure the vector pointer is visible before we enable MCEs: */
20636 wmb();
20637
20638diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
20639index e9a701a..35317d6 100644
20640--- a/arch/x86/kernel/cpu/mcheck/winchip.c
20641+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
20642@@ -10,6 +10,7 @@
20643 #include <asm/processor.h>
20644 #include <asm/mce.h>
20645 #include <asm/msr.h>
20646+#include <asm/pgtable.h>
20647
20648 /* Machine check handler for WinChip C6: */
20649 static void winchip_machine_check(struct pt_regs *regs, long error_code)
20650@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
20651 {
20652 u32 lo, hi;
20653
20654+ pax_open_kernel();
20655 machine_check_vector = winchip_machine_check;
20656+ pax_close_kernel();
20657 /* Make sure the vector pointer is visible before we enable MCEs: */
20658 wmb();
20659
20660diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
20661index f961de9..8a9d332 100644
20662--- a/arch/x86/kernel/cpu/mtrr/main.c
20663+++ b/arch/x86/kernel/cpu/mtrr/main.c
20664@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
20665 u64 size_or_mask, size_and_mask;
20666 static bool mtrr_aps_delayed_init;
20667
20668-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
20669+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
20670
20671 const struct mtrr_ops *mtrr_if;
20672
20673diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
20674index df5e41f..816c719 100644
20675--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
20676+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
20677@@ -25,7 +25,7 @@ struct mtrr_ops {
20678 int (*validate_add_page)(unsigned long base, unsigned long size,
20679 unsigned int type);
20680 int (*have_wrcomb)(void);
20681-};
20682+} __do_const;
20683
20684 extern int generic_get_free_region(unsigned long base, unsigned long size,
20685 int replace_reg);
20686diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
20687index 8a87a32..682a22a 100644
20688--- a/arch/x86/kernel/cpu/perf_event.c
20689+++ b/arch/x86/kernel/cpu/perf_event.c
20690@@ -1348,7 +1348,7 @@ static void __init pmu_check_apic(void)
20691 pr_info("no hardware sampling interrupt available.\n");
20692 }
20693
20694-static struct attribute_group x86_pmu_format_group = {
20695+static attribute_group_no_const x86_pmu_format_group = {
20696 .name = "format",
20697 .attrs = NULL,
20698 };
20699@@ -1447,7 +1447,7 @@ static struct attribute *events_attr[] = {
20700 NULL,
20701 };
20702
20703-static struct attribute_group x86_pmu_events_group = {
20704+static attribute_group_no_const x86_pmu_events_group = {
20705 .name = "events",
20706 .attrs = events_attr,
20707 };
20708@@ -1958,7 +1958,7 @@ static unsigned long get_segment_base(unsigned int segment)
20709 if (idx > GDT_ENTRIES)
20710 return 0;
20711
20712- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
20713+ desc = get_cpu_gdt_table(smp_processor_id());
20714 }
20715
20716 return get_desc_base(desc + idx);
20717@@ -2048,7 +2048,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
20718 break;
20719
20720 perf_callchain_store(entry, frame.return_address);
20721- fp = frame.next_frame;
20722+ fp = (const void __force_user *)frame.next_frame;
20723 }
20724 }
20725
20726diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20727index 639d128..e92d7e5 100644
20728--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20729+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20730@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
20731 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
20732 {
20733 struct attribute **attrs;
20734- struct attribute_group *attr_group;
20735+ attribute_group_no_const *attr_group;
20736 int i = 0, j;
20737
20738 while (amd_iommu_v2_event_descs[i].attr.attr.name)
20739diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
20740index f31a165..7b46cd8 100644
20741--- a/arch/x86/kernel/cpu/perf_event_intel.c
20742+++ b/arch/x86/kernel/cpu/perf_event_intel.c
20743@@ -2247,10 +2247,10 @@ __init int intel_pmu_init(void)
20744 * v2 and above have a perf capabilities MSR
20745 */
20746 if (version > 1) {
20747- u64 capabilities;
20748+ u64 capabilities = x86_pmu.intel_cap.capabilities;
20749
20750- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
20751- x86_pmu.intel_cap.capabilities = capabilities;
20752+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
20753+ x86_pmu.intel_cap.capabilities = capabilities;
20754 }
20755
20756 intel_ds_init();
20757diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20758index 4118f9f..f91d0ab 100644
20759--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20760+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20761@@ -3204,7 +3204,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
20762 static int __init uncore_type_init(struct intel_uncore_type *type)
20763 {
20764 struct intel_uncore_pmu *pmus;
20765- struct attribute_group *attr_group;
20766+ attribute_group_no_const *attr_group;
20767 struct attribute **attrs;
20768 int i, j;
20769
20770diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20771index a80ab71..4089da5 100644
20772--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20773+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20774@@ -498,7 +498,7 @@ struct intel_uncore_box {
20775 struct uncore_event_desc {
20776 struct kobj_attribute attr;
20777 const char *config;
20778-};
20779+} __do_const;
20780
20781 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
20782 { \
20783diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
20784index 7d9481c..99c7e4b 100644
20785--- a/arch/x86/kernel/cpuid.c
20786+++ b/arch/x86/kernel/cpuid.c
20787@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
20788 return notifier_from_errno(err);
20789 }
20790
20791-static struct notifier_block __refdata cpuid_class_cpu_notifier =
20792+static struct notifier_block cpuid_class_cpu_notifier =
20793 {
20794 .notifier_call = cpuid_class_cpu_callback,
20795 };
20796diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20797index 18677a9..f67c45b 100644
20798--- a/arch/x86/kernel/crash.c
20799+++ b/arch/x86/kernel/crash.c
20800@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20801 {
20802 #ifdef CONFIG_X86_32
20803 struct pt_regs fixed_regs;
20804-#endif
20805
20806-#ifdef CONFIG_X86_32
20807- if (!user_mode_vm(regs)) {
20808+ if (!user_mode(regs)) {
20809 crash_fixup_ss_esp(&fixed_regs, regs);
20810 regs = &fixed_regs;
20811 }
20812diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20813index afa64ad..dce67dd 100644
20814--- a/arch/x86/kernel/crash_dump_64.c
20815+++ b/arch/x86/kernel/crash_dump_64.c
20816@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20817 return -ENOMEM;
20818
20819 if (userbuf) {
20820- if (copy_to_user(buf, vaddr + offset, csize)) {
20821+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20822 iounmap(vaddr);
20823 return -EFAULT;
20824 }
20825diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
20826index 5d3fe8d..02e1429 100644
20827--- a/arch/x86/kernel/doublefault.c
20828+++ b/arch/x86/kernel/doublefault.c
20829@@ -13,7 +13,7 @@
20830
20831 #define DOUBLEFAULT_STACKSIZE (1024)
20832 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20833-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20834+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20835
20836 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20837
20838@@ -23,7 +23,7 @@ static void doublefault_fn(void)
20839 unsigned long gdt, tss;
20840
20841 native_store_gdt(&gdt_desc);
20842- gdt = gdt_desc.address;
20843+ gdt = (unsigned long)gdt_desc.address;
20844
20845 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20846
20847@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20848 /* 0x2 bit is always set */
20849 .flags = X86_EFLAGS_SF | 0x2,
20850 .sp = STACK_START,
20851- .es = __USER_DS,
20852+ .es = __KERNEL_DS,
20853 .cs = __KERNEL_CS,
20854 .ss = __KERNEL_DS,
20855- .ds = __USER_DS,
20856+ .ds = __KERNEL_DS,
20857 .fs = __KERNEL_PERCPU,
20858
20859 .__cr3 = __pa_nodebug(swapper_pg_dir),
20860diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
20861index deb6421..76bbc12 100644
20862--- a/arch/x86/kernel/dumpstack.c
20863+++ b/arch/x86/kernel/dumpstack.c
20864@@ -2,6 +2,9 @@
20865 * Copyright (C) 1991, 1992 Linus Torvalds
20866 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
20867 */
20868+#ifdef CONFIG_GRKERNSEC_HIDESYM
20869+#define __INCLUDED_BY_HIDESYM 1
20870+#endif
20871 #include <linux/kallsyms.h>
20872 #include <linux/kprobes.h>
20873 #include <linux/uaccess.h>
20874@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
20875 static void
20876 print_ftrace_graph_addr(unsigned long addr, void *data,
20877 const struct stacktrace_ops *ops,
20878- struct thread_info *tinfo, int *graph)
20879+ struct task_struct *task, int *graph)
20880 {
20881- struct task_struct *task;
20882 unsigned long ret_addr;
20883 int index;
20884
20885 if (addr != (unsigned long)return_to_handler)
20886 return;
20887
20888- task = tinfo->task;
20889 index = task->curr_ret_stack;
20890
20891 if (!task->ret_stack || index < *graph)
20892@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20893 static inline void
20894 print_ftrace_graph_addr(unsigned long addr, void *data,
20895 const struct stacktrace_ops *ops,
20896- struct thread_info *tinfo, int *graph)
20897+ struct task_struct *task, int *graph)
20898 { }
20899 #endif
20900
20901@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20902 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
20903 */
20904
20905-static inline int valid_stack_ptr(struct thread_info *tinfo,
20906- void *p, unsigned int size, void *end)
20907+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
20908 {
20909- void *t = tinfo;
20910 if (end) {
20911 if (p < end && p >= (end-THREAD_SIZE))
20912 return 1;
20913@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
20914 }
20915
20916 unsigned long
20917-print_context_stack(struct thread_info *tinfo,
20918+print_context_stack(struct task_struct *task, void *stack_start,
20919 unsigned long *stack, unsigned long bp,
20920 const struct stacktrace_ops *ops, void *data,
20921 unsigned long *end, int *graph)
20922 {
20923 struct stack_frame *frame = (struct stack_frame *)bp;
20924
20925- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
20926+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
20927 unsigned long addr;
20928
20929 addr = *stack;
20930@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
20931 } else {
20932 ops->address(data, addr, 0);
20933 }
20934- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20935+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20936 }
20937 stack++;
20938 }
20939@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
20940 EXPORT_SYMBOL_GPL(print_context_stack);
20941
20942 unsigned long
20943-print_context_stack_bp(struct thread_info *tinfo,
20944+print_context_stack_bp(struct task_struct *task, void *stack_start,
20945 unsigned long *stack, unsigned long bp,
20946 const struct stacktrace_ops *ops, void *data,
20947 unsigned long *end, int *graph)
20948@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20949 struct stack_frame *frame = (struct stack_frame *)bp;
20950 unsigned long *ret_addr = &frame->return_address;
20951
20952- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
20953+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
20954 unsigned long addr = *ret_addr;
20955
20956 if (!__kernel_text_address(addr))
20957@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20958 ops->address(data, addr, 1);
20959 frame = frame->next_frame;
20960 ret_addr = &frame->return_address;
20961- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20962+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20963 }
20964
20965 return (unsigned long)frame;
20966@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
20967 static void print_trace_address(void *data, unsigned long addr, int reliable)
20968 {
20969 touch_nmi_watchdog();
20970- printk(data);
20971+ printk("%s", (char *)data);
20972 printk_address(addr, reliable);
20973 }
20974
20975@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
20976 }
20977 EXPORT_SYMBOL_GPL(oops_begin);
20978
20979+extern void gr_handle_kernel_exploit(void);
20980+
20981 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20982 {
20983 if (regs && kexec_should_crash(current))
20984@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20985 panic("Fatal exception in interrupt");
20986 if (panic_on_oops)
20987 panic("Fatal exception");
20988- do_exit(signr);
20989+
20990+ gr_handle_kernel_exploit();
20991+
20992+ do_group_exit(signr);
20993 }
20994
20995 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20996@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20997 print_modules();
20998 show_regs(regs);
20999 #ifdef CONFIG_X86_32
21000- if (user_mode_vm(regs)) {
21001+ if (user_mode(regs)) {
21002 sp = regs->sp;
21003 ss = regs->ss & 0xffff;
21004 } else {
21005@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21006 unsigned long flags = oops_begin();
21007 int sig = SIGSEGV;
21008
21009- if (!user_mode_vm(regs))
21010+ if (!user_mode(regs))
21011 report_bug(regs->ip, regs);
21012
21013 if (__die(str, regs, err))
21014diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21015index f2a1770..540657f 100644
21016--- a/arch/x86/kernel/dumpstack_32.c
21017+++ b/arch/x86/kernel/dumpstack_32.c
21018@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21019 bp = stack_frame(task, regs);
21020
21021 for (;;) {
21022- struct thread_info *context;
21023+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21024
21025- context = (struct thread_info *)
21026- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
21027- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
21028+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21029
21030- stack = (unsigned long *)context->previous_esp;
21031- if (!stack)
21032+ if (stack_start == task_stack_page(task))
21033 break;
21034+ stack = *(unsigned long **)stack_start;
21035 if (ops->stack(data, "IRQ") < 0)
21036 break;
21037 touch_nmi_watchdog();
21038@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
21039 int i;
21040
21041 show_regs_print_info(KERN_EMERG);
21042- __show_regs(regs, !user_mode_vm(regs));
21043+ __show_regs(regs, !user_mode(regs));
21044
21045 /*
21046 * When in-kernel, we also print out the stack and code at the
21047 * time of the fault..
21048 */
21049- if (!user_mode_vm(regs)) {
21050+ if (!user_mode(regs)) {
21051 unsigned int code_prologue = code_bytes * 43 / 64;
21052 unsigned int code_len = code_bytes;
21053 unsigned char c;
21054 u8 *ip;
21055+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
21056
21057 pr_emerg("Stack:\n");
21058 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
21059
21060 pr_emerg("Code:");
21061
21062- ip = (u8 *)regs->ip - code_prologue;
21063+ ip = (u8 *)regs->ip - code_prologue + cs_base;
21064 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
21065 /* try starting at IP */
21066- ip = (u8 *)regs->ip;
21067+ ip = (u8 *)regs->ip + cs_base;
21068 code_len = code_len - code_prologue + 1;
21069 }
21070 for (i = 0; i < code_len; i++, ip++) {
21071@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
21072 pr_cont(" Bad EIP value.");
21073 break;
21074 }
21075- if (ip == (u8 *)regs->ip)
21076+ if (ip == (u8 *)regs->ip + cs_base)
21077 pr_cont(" <%02x>", c);
21078 else
21079 pr_cont(" %02x", c);
21080@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
21081 {
21082 unsigned short ud2;
21083
21084+ ip = ktla_ktva(ip);
21085 if (ip < PAGE_OFFSET)
21086 return 0;
21087 if (probe_kernel_address((unsigned short *)ip, ud2))
21088@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
21089
21090 return ud2 == 0x0b0f;
21091 }
21092+
21093+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21094+void pax_check_alloca(unsigned long size)
21095+{
21096+ unsigned long sp = (unsigned long)&sp, stack_left;
21097+
21098+ /* all kernel stacks are of the same size */
21099+ stack_left = sp & (THREAD_SIZE - 1);
21100+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21101+}
21102+EXPORT_SYMBOL(pax_check_alloca);
21103+#endif
21104diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
21105index addb207..99635fa 100644
21106--- a/arch/x86/kernel/dumpstack_64.c
21107+++ b/arch/x86/kernel/dumpstack_64.c
21108@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21109 unsigned long *irq_stack_end =
21110 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
21111 unsigned used = 0;
21112- struct thread_info *tinfo;
21113 int graph = 0;
21114 unsigned long dummy;
21115+ void *stack_start;
21116
21117 if (!task)
21118 task = current;
21119@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21120 * current stack address. If the stacks consist of nested
21121 * exceptions
21122 */
21123- tinfo = task_thread_info(task);
21124 for (;;) {
21125 char *id;
21126 unsigned long *estack_end;
21127+
21128 estack_end = in_exception_stack(cpu, (unsigned long)stack,
21129 &used, &id);
21130
21131@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21132 if (ops->stack(data, id) < 0)
21133 break;
21134
21135- bp = ops->walk_stack(tinfo, stack, bp, ops,
21136+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
21137 data, estack_end, &graph);
21138 ops->stack(data, "<EOE>");
21139 /*
21140@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21141 * second-to-last pointer (index -2 to end) in the
21142 * exception stack:
21143 */
21144+ if ((u16)estack_end[-1] != __KERNEL_DS)
21145+ goto out;
21146 stack = (unsigned long *) estack_end[-2];
21147 continue;
21148 }
21149@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21150 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
21151 if (ops->stack(data, "IRQ") < 0)
21152 break;
21153- bp = ops->walk_stack(tinfo, stack, bp,
21154+ bp = ops->walk_stack(task, irq_stack, stack, bp,
21155 ops, data, irq_stack_end, &graph);
21156 /*
21157 * We link to the next stack (which would be
21158@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21159 /*
21160 * This handles the process stack:
21161 */
21162- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
21163+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21164+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21165+out:
21166 put_cpu();
21167 }
21168 EXPORT_SYMBOL(dump_trace);
21169@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
21170
21171 return ud2 == 0x0b0f;
21172 }
21173+
21174+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21175+void pax_check_alloca(unsigned long size)
21176+{
21177+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
21178+ unsigned cpu, used;
21179+ char *id;
21180+
21181+ /* check the process stack first */
21182+ stack_start = (unsigned long)task_stack_page(current);
21183+ stack_end = stack_start + THREAD_SIZE;
21184+ if (likely(stack_start <= sp && sp < stack_end)) {
21185+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
21186+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21187+ return;
21188+ }
21189+
21190+ cpu = get_cpu();
21191+
21192+ /* check the irq stacks */
21193+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
21194+ stack_start = stack_end - IRQ_STACK_SIZE;
21195+ if (stack_start <= sp && sp < stack_end) {
21196+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
21197+ put_cpu();
21198+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21199+ return;
21200+ }
21201+
21202+ /* check the exception stacks */
21203+ used = 0;
21204+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
21205+ stack_start = stack_end - EXCEPTION_STKSZ;
21206+ if (stack_end && stack_start <= sp && sp < stack_end) {
21207+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
21208+ put_cpu();
21209+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21210+ return;
21211+ }
21212+
21213+ put_cpu();
21214+
21215+ /* unknown stack */
21216+ BUG();
21217+}
21218+EXPORT_SYMBOL(pax_check_alloca);
21219+#endif
21220diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
21221index 174da5f..5e55606 100644
21222--- a/arch/x86/kernel/e820.c
21223+++ b/arch/x86/kernel/e820.c
21224@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
21225
21226 static void early_panic(char *msg)
21227 {
21228- early_printk(msg);
21229- panic(msg);
21230+ early_printk("%s", msg);
21231+ panic("%s", msg);
21232 }
21233
21234 static int userdef __initdata;
21235diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
21236index d15f575..d692043 100644
21237--- a/arch/x86/kernel/early_printk.c
21238+++ b/arch/x86/kernel/early_printk.c
21239@@ -7,6 +7,7 @@
21240 #include <linux/pci_regs.h>
21241 #include <linux/pci_ids.h>
21242 #include <linux/errno.h>
21243+#include <linux/sched.h>
21244 #include <asm/io.h>
21245 #include <asm/processor.h>
21246 #include <asm/fcntl.h>
21247diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
21248index f0dcb0c..9f39b80 100644
21249--- a/arch/x86/kernel/entry_32.S
21250+++ b/arch/x86/kernel/entry_32.S
21251@@ -177,13 +177,153 @@
21252 /*CFI_REL_OFFSET gs, PT_GS*/
21253 .endm
21254 .macro SET_KERNEL_GS reg
21255+
21256+#ifdef CONFIG_CC_STACKPROTECTOR
21257 movl $(__KERNEL_STACK_CANARY), \reg
21258+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21259+ movl $(__USER_DS), \reg
21260+#else
21261+ xorl \reg, \reg
21262+#endif
21263+
21264 movl \reg, %gs
21265 .endm
21266
21267 #endif /* CONFIG_X86_32_LAZY_GS */
21268
21269-.macro SAVE_ALL
21270+.macro pax_enter_kernel
21271+#ifdef CONFIG_PAX_KERNEXEC
21272+ call pax_enter_kernel
21273+#endif
21274+.endm
21275+
21276+.macro pax_exit_kernel
21277+#ifdef CONFIG_PAX_KERNEXEC
21278+ call pax_exit_kernel
21279+#endif
21280+.endm
21281+
21282+#ifdef CONFIG_PAX_KERNEXEC
21283+ENTRY(pax_enter_kernel)
21284+#ifdef CONFIG_PARAVIRT
21285+ pushl %eax
21286+ pushl %ecx
21287+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
21288+ mov %eax, %esi
21289+#else
21290+ mov %cr0, %esi
21291+#endif
21292+ bts $16, %esi
21293+ jnc 1f
21294+ mov %cs, %esi
21295+ cmp $__KERNEL_CS, %esi
21296+ jz 3f
21297+ ljmp $__KERNEL_CS, $3f
21298+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
21299+2:
21300+#ifdef CONFIG_PARAVIRT
21301+ mov %esi, %eax
21302+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
21303+#else
21304+ mov %esi, %cr0
21305+#endif
21306+3:
21307+#ifdef CONFIG_PARAVIRT
21308+ popl %ecx
21309+ popl %eax
21310+#endif
21311+ ret
21312+ENDPROC(pax_enter_kernel)
21313+
21314+ENTRY(pax_exit_kernel)
21315+#ifdef CONFIG_PARAVIRT
21316+ pushl %eax
21317+ pushl %ecx
21318+#endif
21319+ mov %cs, %esi
21320+ cmp $__KERNEXEC_KERNEL_CS, %esi
21321+ jnz 2f
21322+#ifdef CONFIG_PARAVIRT
21323+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
21324+ mov %eax, %esi
21325+#else
21326+ mov %cr0, %esi
21327+#endif
21328+ btr $16, %esi
21329+ ljmp $__KERNEL_CS, $1f
21330+1:
21331+#ifdef CONFIG_PARAVIRT
21332+ mov %esi, %eax
21333+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
21334+#else
21335+ mov %esi, %cr0
21336+#endif
21337+2:
21338+#ifdef CONFIG_PARAVIRT
21339+ popl %ecx
21340+ popl %eax
21341+#endif
21342+ ret
21343+ENDPROC(pax_exit_kernel)
21344+#endif
21345+
21346+ .macro pax_erase_kstack
21347+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21348+ call pax_erase_kstack
21349+#endif
21350+ .endm
21351+
21352+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21353+/*
21354+ * ebp: thread_info
21355+ */
21356+ENTRY(pax_erase_kstack)
21357+ pushl %edi
21358+ pushl %ecx
21359+ pushl %eax
21360+
21361+ mov TI_lowest_stack(%ebp), %edi
21362+ mov $-0xBEEF, %eax
21363+ std
21364+
21365+1: mov %edi, %ecx
21366+ and $THREAD_SIZE_asm - 1, %ecx
21367+ shr $2, %ecx
21368+ repne scasl
21369+ jecxz 2f
21370+
21371+ cmp $2*16, %ecx
21372+ jc 2f
21373+
21374+ mov $2*16, %ecx
21375+ repe scasl
21376+ jecxz 2f
21377+ jne 1b
21378+
21379+2: cld
21380+ mov %esp, %ecx
21381+ sub %edi, %ecx
21382+
21383+ cmp $THREAD_SIZE_asm, %ecx
21384+ jb 3f
21385+ ud2
21386+3:
21387+
21388+ shr $2, %ecx
21389+ rep stosl
21390+
21391+ mov TI_task_thread_sp0(%ebp), %edi
21392+ sub $128, %edi
21393+ mov %edi, TI_lowest_stack(%ebp)
21394+
21395+ popl %eax
21396+ popl %ecx
21397+ popl %edi
21398+ ret
21399+ENDPROC(pax_erase_kstack)
21400+#endif
21401+
21402+.macro __SAVE_ALL _DS
21403 cld
21404 PUSH_GS
21405 pushl_cfi %fs
21406@@ -206,7 +346,7 @@
21407 CFI_REL_OFFSET ecx, 0
21408 pushl_cfi %ebx
21409 CFI_REL_OFFSET ebx, 0
21410- movl $(__USER_DS), %edx
21411+ movl $\_DS, %edx
21412 movl %edx, %ds
21413 movl %edx, %es
21414 movl $(__KERNEL_PERCPU), %edx
21415@@ -214,6 +354,15 @@
21416 SET_KERNEL_GS %edx
21417 .endm
21418
21419+.macro SAVE_ALL
21420+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21421+ __SAVE_ALL __KERNEL_DS
21422+ pax_enter_kernel
21423+#else
21424+ __SAVE_ALL __USER_DS
21425+#endif
21426+.endm
21427+
21428 .macro RESTORE_INT_REGS
21429 popl_cfi %ebx
21430 CFI_RESTORE ebx
21431@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
21432 popfl_cfi
21433 jmp syscall_exit
21434 CFI_ENDPROC
21435-END(ret_from_fork)
21436+ENDPROC(ret_from_fork)
21437
21438 ENTRY(ret_from_kernel_thread)
21439 CFI_STARTPROC
21440@@ -344,7 +493,15 @@ ret_from_intr:
21441 andl $SEGMENT_RPL_MASK, %eax
21442 #endif
21443 cmpl $USER_RPL, %eax
21444+
21445+#ifdef CONFIG_PAX_KERNEXEC
21446+ jae resume_userspace
21447+
21448+ pax_exit_kernel
21449+ jmp resume_kernel
21450+#else
21451 jb resume_kernel # not returning to v8086 or userspace
21452+#endif
21453
21454 ENTRY(resume_userspace)
21455 LOCKDEP_SYS_EXIT
21456@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
21457 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
21458 # int/exception return?
21459 jne work_pending
21460- jmp restore_all
21461-END(ret_from_exception)
21462+ jmp restore_all_pax
21463+ENDPROC(ret_from_exception)
21464
21465 #ifdef CONFIG_PREEMPT
21466 ENTRY(resume_kernel)
21467@@ -372,7 +529,7 @@ need_resched:
21468 jz restore_all
21469 call preempt_schedule_irq
21470 jmp need_resched
21471-END(resume_kernel)
21472+ENDPROC(resume_kernel)
21473 #endif
21474 CFI_ENDPROC
21475 /*
21476@@ -406,30 +563,45 @@ sysenter_past_esp:
21477 /*CFI_REL_OFFSET cs, 0*/
21478 /*
21479 * Push current_thread_info()->sysenter_return to the stack.
21480- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
21481- * pushed above; +8 corresponds to copy_thread's esp0 setting.
21482 */
21483- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
21484+ pushl_cfi $0
21485 CFI_REL_OFFSET eip, 0
21486
21487 pushl_cfi %eax
21488 SAVE_ALL
21489+ GET_THREAD_INFO(%ebp)
21490+ movl TI_sysenter_return(%ebp),%ebp
21491+ movl %ebp,PT_EIP(%esp)
21492 ENABLE_INTERRUPTS(CLBR_NONE)
21493
21494 /*
21495 * Load the potential sixth argument from user stack.
21496 * Careful about security.
21497 */
21498+ movl PT_OLDESP(%esp),%ebp
21499+
21500+#ifdef CONFIG_PAX_MEMORY_UDEREF
21501+ mov PT_OLDSS(%esp),%ds
21502+1: movl %ds:(%ebp),%ebp
21503+ push %ss
21504+ pop %ds
21505+#else
21506 cmpl $__PAGE_OFFSET-3,%ebp
21507 jae syscall_fault
21508 ASM_STAC
21509 1: movl (%ebp),%ebp
21510 ASM_CLAC
21511+#endif
21512+
21513 movl %ebp,PT_EBP(%esp)
21514 _ASM_EXTABLE(1b,syscall_fault)
21515
21516 GET_THREAD_INFO(%ebp)
21517
21518+#ifdef CONFIG_PAX_RANDKSTACK
21519+ pax_erase_kstack
21520+#endif
21521+
21522 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21523 jnz sysenter_audit
21524 sysenter_do_call:
21525@@ -444,12 +616,24 @@ sysenter_do_call:
21526 testl $_TIF_ALLWORK_MASK, %ecx
21527 jne sysexit_audit
21528 sysenter_exit:
21529+
21530+#ifdef CONFIG_PAX_RANDKSTACK
21531+ pushl_cfi %eax
21532+ movl %esp, %eax
21533+ call pax_randomize_kstack
21534+ popl_cfi %eax
21535+#endif
21536+
21537+ pax_erase_kstack
21538+
21539 /* if something modifies registers it must also disable sysexit */
21540 movl PT_EIP(%esp), %edx
21541 movl PT_OLDESP(%esp), %ecx
21542 xorl %ebp,%ebp
21543 TRACE_IRQS_ON
21544 1: mov PT_FS(%esp), %fs
21545+2: mov PT_DS(%esp), %ds
21546+3: mov PT_ES(%esp), %es
21547 PTGS_TO_GS
21548 ENABLE_INTERRUPTS_SYSEXIT
21549
21550@@ -466,6 +650,9 @@ sysenter_audit:
21551 movl %eax,%edx /* 2nd arg: syscall number */
21552 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
21553 call __audit_syscall_entry
21554+
21555+ pax_erase_kstack
21556+
21557 pushl_cfi %ebx
21558 movl PT_EAX(%esp),%eax /* reload syscall number */
21559 jmp sysenter_do_call
21560@@ -491,10 +678,16 @@ sysexit_audit:
21561
21562 CFI_ENDPROC
21563 .pushsection .fixup,"ax"
21564-2: movl $0,PT_FS(%esp)
21565+4: movl $0,PT_FS(%esp)
21566+ jmp 1b
21567+5: movl $0,PT_DS(%esp)
21568+ jmp 1b
21569+6: movl $0,PT_ES(%esp)
21570 jmp 1b
21571 .popsection
21572- _ASM_EXTABLE(1b,2b)
21573+ _ASM_EXTABLE(1b,4b)
21574+ _ASM_EXTABLE(2b,5b)
21575+ _ASM_EXTABLE(3b,6b)
21576 PTGS_TO_GS_EX
21577 ENDPROC(ia32_sysenter_target)
21578
21579@@ -509,6 +702,11 @@ ENTRY(system_call)
21580 pushl_cfi %eax # save orig_eax
21581 SAVE_ALL
21582 GET_THREAD_INFO(%ebp)
21583+
21584+#ifdef CONFIG_PAX_RANDKSTACK
21585+ pax_erase_kstack
21586+#endif
21587+
21588 # system call tracing in operation / emulation
21589 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21590 jnz syscall_trace_entry
21591@@ -527,6 +725,15 @@ syscall_exit:
21592 testl $_TIF_ALLWORK_MASK, %ecx # current->work
21593 jne syscall_exit_work
21594
21595+restore_all_pax:
21596+
21597+#ifdef CONFIG_PAX_RANDKSTACK
21598+ movl %esp, %eax
21599+ call pax_randomize_kstack
21600+#endif
21601+
21602+ pax_erase_kstack
21603+
21604 restore_all:
21605 TRACE_IRQS_IRET
21606 restore_all_notrace:
21607@@ -583,14 +790,34 @@ ldt_ss:
21608 * compensating for the offset by changing to the ESPFIX segment with
21609 * a base address that matches for the difference.
21610 */
21611-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
21612+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
21613 mov %esp, %edx /* load kernel esp */
21614 mov PT_OLDESP(%esp), %eax /* load userspace esp */
21615 mov %dx, %ax /* eax: new kernel esp */
21616 sub %eax, %edx /* offset (low word is 0) */
21617+#ifdef CONFIG_SMP
21618+ movl PER_CPU_VAR(cpu_number), %ebx
21619+ shll $PAGE_SHIFT_asm, %ebx
21620+ addl $cpu_gdt_table, %ebx
21621+#else
21622+ movl $cpu_gdt_table, %ebx
21623+#endif
21624 shr $16, %edx
21625- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
21626- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
21627+
21628+#ifdef CONFIG_PAX_KERNEXEC
21629+ mov %cr0, %esi
21630+ btr $16, %esi
21631+ mov %esi, %cr0
21632+#endif
21633+
21634+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
21635+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
21636+
21637+#ifdef CONFIG_PAX_KERNEXEC
21638+ bts $16, %esi
21639+ mov %esi, %cr0
21640+#endif
21641+
21642 pushl_cfi $__ESPFIX_SS
21643 pushl_cfi %eax /* new kernel esp */
21644 /* Disable interrupts, but do not irqtrace this section: we
21645@@ -619,20 +846,18 @@ work_resched:
21646 movl TI_flags(%ebp), %ecx
21647 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
21648 # than syscall tracing?
21649- jz restore_all
21650+ jz restore_all_pax
21651 testb $_TIF_NEED_RESCHED, %cl
21652 jnz work_resched
21653
21654 work_notifysig: # deal with pending signals and
21655 # notify-resume requests
21656+ movl %esp, %eax
21657 #ifdef CONFIG_VM86
21658 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
21659- movl %esp, %eax
21660 jne work_notifysig_v86 # returning to kernel-space or
21661 # vm86-space
21662 1:
21663-#else
21664- movl %esp, %eax
21665 #endif
21666 TRACE_IRQS_ON
21667 ENABLE_INTERRUPTS(CLBR_NONE)
21668@@ -653,7 +878,7 @@ work_notifysig_v86:
21669 movl %eax, %esp
21670 jmp 1b
21671 #endif
21672-END(work_pending)
21673+ENDPROC(work_pending)
21674
21675 # perform syscall exit tracing
21676 ALIGN
21677@@ -661,11 +886,14 @@ syscall_trace_entry:
21678 movl $-ENOSYS,PT_EAX(%esp)
21679 movl %esp, %eax
21680 call syscall_trace_enter
21681+
21682+ pax_erase_kstack
21683+
21684 /* What it returned is what we'll actually use. */
21685 cmpl $(NR_syscalls), %eax
21686 jnae syscall_call
21687 jmp syscall_exit
21688-END(syscall_trace_entry)
21689+ENDPROC(syscall_trace_entry)
21690
21691 # perform syscall exit tracing
21692 ALIGN
21693@@ -678,21 +906,25 @@ syscall_exit_work:
21694 movl %esp, %eax
21695 call syscall_trace_leave
21696 jmp resume_userspace
21697-END(syscall_exit_work)
21698+ENDPROC(syscall_exit_work)
21699 CFI_ENDPROC
21700
21701 RING0_INT_FRAME # can't unwind into user space anyway
21702 syscall_fault:
21703+#ifdef CONFIG_PAX_MEMORY_UDEREF
21704+ push %ss
21705+ pop %ds
21706+#endif
21707 ASM_CLAC
21708 GET_THREAD_INFO(%ebp)
21709 movl $-EFAULT,PT_EAX(%esp)
21710 jmp resume_userspace
21711-END(syscall_fault)
21712+ENDPROC(syscall_fault)
21713
21714 syscall_badsys:
21715 movl $-ENOSYS,PT_EAX(%esp)
21716 jmp resume_userspace
21717-END(syscall_badsys)
21718+ENDPROC(syscall_badsys)
21719 CFI_ENDPROC
21720 /*
21721 * End of kprobes section
21722@@ -708,8 +940,15 @@ END(syscall_badsys)
21723 * normal stack and adjusts ESP with the matching offset.
21724 */
21725 /* fixup the stack */
21726- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
21727- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
21728+#ifdef CONFIG_SMP
21729+ movl PER_CPU_VAR(cpu_number), %ebx
21730+ shll $PAGE_SHIFT_asm, %ebx
21731+ addl $cpu_gdt_table, %ebx
21732+#else
21733+ movl $cpu_gdt_table, %ebx
21734+#endif
21735+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
21736+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
21737 shl $16, %eax
21738 addl %esp, %eax /* the adjusted stack pointer */
21739 pushl_cfi $__KERNEL_DS
21740@@ -762,7 +1001,7 @@ vector=vector+1
21741 .endr
21742 2: jmp common_interrupt
21743 .endr
21744-END(irq_entries_start)
21745+ENDPROC(irq_entries_start)
21746
21747 .previous
21748 END(interrupt)
21749@@ -823,7 +1062,7 @@ ENTRY(coprocessor_error)
21750 pushl_cfi $do_coprocessor_error
21751 jmp error_code
21752 CFI_ENDPROC
21753-END(coprocessor_error)
21754+ENDPROC(coprocessor_error)
21755
21756 ENTRY(simd_coprocessor_error)
21757 RING0_INT_FRAME
21758@@ -836,7 +1075,7 @@ ENTRY(simd_coprocessor_error)
21759 .section .altinstructions,"a"
21760 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
21761 .previous
21762-.section .altinstr_replacement,"ax"
21763+.section .altinstr_replacement,"a"
21764 663: pushl $do_simd_coprocessor_error
21765 664:
21766 .previous
21767@@ -845,7 +1084,7 @@ ENTRY(simd_coprocessor_error)
21768 #endif
21769 jmp error_code
21770 CFI_ENDPROC
21771-END(simd_coprocessor_error)
21772+ENDPROC(simd_coprocessor_error)
21773
21774 ENTRY(device_not_available)
21775 RING0_INT_FRAME
21776@@ -854,18 +1093,18 @@ ENTRY(device_not_available)
21777 pushl_cfi $do_device_not_available
21778 jmp error_code
21779 CFI_ENDPROC
21780-END(device_not_available)
21781+ENDPROC(device_not_available)
21782
21783 #ifdef CONFIG_PARAVIRT
21784 ENTRY(native_iret)
21785 iret
21786 _ASM_EXTABLE(native_iret, iret_exc)
21787-END(native_iret)
21788+ENDPROC(native_iret)
21789
21790 ENTRY(native_irq_enable_sysexit)
21791 sti
21792 sysexit
21793-END(native_irq_enable_sysexit)
21794+ENDPROC(native_irq_enable_sysexit)
21795 #endif
21796
21797 ENTRY(overflow)
21798@@ -875,7 +1114,7 @@ ENTRY(overflow)
21799 pushl_cfi $do_overflow
21800 jmp error_code
21801 CFI_ENDPROC
21802-END(overflow)
21803+ENDPROC(overflow)
21804
21805 ENTRY(bounds)
21806 RING0_INT_FRAME
21807@@ -884,7 +1123,7 @@ ENTRY(bounds)
21808 pushl_cfi $do_bounds
21809 jmp error_code
21810 CFI_ENDPROC
21811-END(bounds)
21812+ENDPROC(bounds)
21813
21814 ENTRY(invalid_op)
21815 RING0_INT_FRAME
21816@@ -893,7 +1132,7 @@ ENTRY(invalid_op)
21817 pushl_cfi $do_invalid_op
21818 jmp error_code
21819 CFI_ENDPROC
21820-END(invalid_op)
21821+ENDPROC(invalid_op)
21822
21823 ENTRY(coprocessor_segment_overrun)
21824 RING0_INT_FRAME
21825@@ -902,7 +1141,7 @@ ENTRY(coprocessor_segment_overrun)
21826 pushl_cfi $do_coprocessor_segment_overrun
21827 jmp error_code
21828 CFI_ENDPROC
21829-END(coprocessor_segment_overrun)
21830+ENDPROC(coprocessor_segment_overrun)
21831
21832 ENTRY(invalid_TSS)
21833 RING0_EC_FRAME
21834@@ -910,7 +1149,7 @@ ENTRY(invalid_TSS)
21835 pushl_cfi $do_invalid_TSS
21836 jmp error_code
21837 CFI_ENDPROC
21838-END(invalid_TSS)
21839+ENDPROC(invalid_TSS)
21840
21841 ENTRY(segment_not_present)
21842 RING0_EC_FRAME
21843@@ -918,7 +1157,7 @@ ENTRY(segment_not_present)
21844 pushl_cfi $do_segment_not_present
21845 jmp error_code
21846 CFI_ENDPROC
21847-END(segment_not_present)
21848+ENDPROC(segment_not_present)
21849
21850 ENTRY(stack_segment)
21851 RING0_EC_FRAME
21852@@ -926,7 +1165,7 @@ ENTRY(stack_segment)
21853 pushl_cfi $do_stack_segment
21854 jmp error_code
21855 CFI_ENDPROC
21856-END(stack_segment)
21857+ENDPROC(stack_segment)
21858
21859 ENTRY(alignment_check)
21860 RING0_EC_FRAME
21861@@ -934,7 +1173,7 @@ ENTRY(alignment_check)
21862 pushl_cfi $do_alignment_check
21863 jmp error_code
21864 CFI_ENDPROC
21865-END(alignment_check)
21866+ENDPROC(alignment_check)
21867
21868 ENTRY(divide_error)
21869 RING0_INT_FRAME
21870@@ -943,7 +1182,7 @@ ENTRY(divide_error)
21871 pushl_cfi $do_divide_error
21872 jmp error_code
21873 CFI_ENDPROC
21874-END(divide_error)
21875+ENDPROC(divide_error)
21876
21877 #ifdef CONFIG_X86_MCE
21878 ENTRY(machine_check)
21879@@ -953,7 +1192,7 @@ ENTRY(machine_check)
21880 pushl_cfi machine_check_vector
21881 jmp error_code
21882 CFI_ENDPROC
21883-END(machine_check)
21884+ENDPROC(machine_check)
21885 #endif
21886
21887 ENTRY(spurious_interrupt_bug)
21888@@ -963,7 +1202,7 @@ ENTRY(spurious_interrupt_bug)
21889 pushl_cfi $do_spurious_interrupt_bug
21890 jmp error_code
21891 CFI_ENDPROC
21892-END(spurious_interrupt_bug)
21893+ENDPROC(spurious_interrupt_bug)
21894 /*
21895 * End of kprobes section
21896 */
21897@@ -1073,7 +1312,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
21898
21899 ENTRY(mcount)
21900 ret
21901-END(mcount)
21902+ENDPROC(mcount)
21903
21904 ENTRY(ftrace_caller)
21905 cmpl $0, function_trace_stop
21906@@ -1106,7 +1345,7 @@ ftrace_graph_call:
21907 .globl ftrace_stub
21908 ftrace_stub:
21909 ret
21910-END(ftrace_caller)
21911+ENDPROC(ftrace_caller)
21912
21913 ENTRY(ftrace_regs_caller)
21914 pushf /* push flags before compare (in cs location) */
21915@@ -1210,7 +1449,7 @@ trace:
21916 popl %ecx
21917 popl %eax
21918 jmp ftrace_stub
21919-END(mcount)
21920+ENDPROC(mcount)
21921 #endif /* CONFIG_DYNAMIC_FTRACE */
21922 #endif /* CONFIG_FUNCTION_TRACER */
21923
21924@@ -1228,7 +1467,7 @@ ENTRY(ftrace_graph_caller)
21925 popl %ecx
21926 popl %eax
21927 ret
21928-END(ftrace_graph_caller)
21929+ENDPROC(ftrace_graph_caller)
21930
21931 .globl return_to_handler
21932 return_to_handler:
21933@@ -1284,15 +1523,18 @@ error_code:
21934 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
21935 REG_TO_PTGS %ecx
21936 SET_KERNEL_GS %ecx
21937- movl $(__USER_DS), %ecx
21938+ movl $(__KERNEL_DS), %ecx
21939 movl %ecx, %ds
21940 movl %ecx, %es
21941+
21942+ pax_enter_kernel
21943+
21944 TRACE_IRQS_OFF
21945 movl %esp,%eax # pt_regs pointer
21946 call *%edi
21947 jmp ret_from_exception
21948 CFI_ENDPROC
21949-END(page_fault)
21950+ENDPROC(page_fault)
21951
21952 /*
21953 * Debug traps and NMI can happen at the one SYSENTER instruction
21954@@ -1335,7 +1577,7 @@ debug_stack_correct:
21955 call do_debug
21956 jmp ret_from_exception
21957 CFI_ENDPROC
21958-END(debug)
21959+ENDPROC(debug)
21960
21961 /*
21962 * NMI is doubly nasty. It can happen _while_ we're handling
21963@@ -1373,6 +1615,9 @@ nmi_stack_correct:
21964 xorl %edx,%edx # zero error code
21965 movl %esp,%eax # pt_regs pointer
21966 call do_nmi
21967+
21968+ pax_exit_kernel
21969+
21970 jmp restore_all_notrace
21971 CFI_ENDPROC
21972
21973@@ -1409,12 +1654,15 @@ nmi_espfix_stack:
21974 FIXUP_ESPFIX_STACK # %eax == %esp
21975 xorl %edx,%edx # zero error code
21976 call do_nmi
21977+
21978+ pax_exit_kernel
21979+
21980 RESTORE_REGS
21981 lss 12+4(%esp), %esp # back to espfix stack
21982 CFI_ADJUST_CFA_OFFSET -24
21983 jmp irq_return
21984 CFI_ENDPROC
21985-END(nmi)
21986+ENDPROC(nmi)
21987
21988 ENTRY(int3)
21989 RING0_INT_FRAME
21990@@ -1427,14 +1675,14 @@ ENTRY(int3)
21991 call do_int3
21992 jmp ret_from_exception
21993 CFI_ENDPROC
21994-END(int3)
21995+ENDPROC(int3)
21996
21997 ENTRY(general_protection)
21998 RING0_EC_FRAME
21999 pushl_cfi $do_general_protection
22000 jmp error_code
22001 CFI_ENDPROC
22002-END(general_protection)
22003+ENDPROC(general_protection)
22004
22005 #ifdef CONFIG_KVM_GUEST
22006 ENTRY(async_page_fault)
22007@@ -1443,7 +1691,7 @@ ENTRY(async_page_fault)
22008 pushl_cfi $do_async_page_fault
22009 jmp error_code
22010 CFI_ENDPROC
22011-END(async_page_fault)
22012+ENDPROC(async_page_fault)
22013 #endif
22014
22015 /*
22016diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22017index b077f4c..8e0df9f 100644
22018--- a/arch/x86/kernel/entry_64.S
22019+++ b/arch/x86/kernel/entry_64.S
22020@@ -59,6 +59,8 @@
22021 #include <asm/context_tracking.h>
22022 #include <asm/smap.h>
22023 #include <linux/err.h>
22024+#include <asm/pgtable.h>
22025+#include <asm/alternative-asm.h>
22026
22027 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22028 #include <linux/elf-em.h>
22029@@ -80,8 +82,9 @@
22030 #ifdef CONFIG_DYNAMIC_FTRACE
22031
22032 ENTRY(function_hook)
22033+ pax_force_retaddr
22034 retq
22035-END(function_hook)
22036+ENDPROC(function_hook)
22037
22038 /* skip is set if stack has been adjusted */
22039 .macro ftrace_caller_setup skip=0
22040@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
22041 #endif
22042
22043 GLOBAL(ftrace_stub)
22044+ pax_force_retaddr
22045 retq
22046-END(ftrace_caller)
22047+ENDPROC(ftrace_caller)
22048
22049 ENTRY(ftrace_regs_caller)
22050 /* Save the current flags before compare (in SS location)*/
22051@@ -191,7 +195,7 @@ ftrace_restore_flags:
22052 popfq
22053 jmp ftrace_stub
22054
22055-END(ftrace_regs_caller)
22056+ENDPROC(ftrace_regs_caller)
22057
22058
22059 #else /* ! CONFIG_DYNAMIC_FTRACE */
22060@@ -212,6 +216,7 @@ ENTRY(function_hook)
22061 #endif
22062
22063 GLOBAL(ftrace_stub)
22064+ pax_force_retaddr
22065 retq
22066
22067 trace:
22068@@ -225,12 +230,13 @@ trace:
22069 #endif
22070 subq $MCOUNT_INSN_SIZE, %rdi
22071
22072+ pax_force_fptr ftrace_trace_function
22073 call *ftrace_trace_function
22074
22075 MCOUNT_RESTORE_FRAME
22076
22077 jmp ftrace_stub
22078-END(function_hook)
22079+ENDPROC(function_hook)
22080 #endif /* CONFIG_DYNAMIC_FTRACE */
22081 #endif /* CONFIG_FUNCTION_TRACER */
22082
22083@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
22084
22085 MCOUNT_RESTORE_FRAME
22086
22087+ pax_force_retaddr
22088 retq
22089-END(ftrace_graph_caller)
22090+ENDPROC(ftrace_graph_caller)
22091
22092 GLOBAL(return_to_handler)
22093 subq $24, %rsp
22094@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
22095 movq 8(%rsp), %rdx
22096 movq (%rsp), %rax
22097 addq $24, %rsp
22098+ pax_force_fptr %rdi
22099 jmp *%rdi
22100+ENDPROC(return_to_handler)
22101 #endif
22102
22103
22104@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
22105 ENDPROC(native_usergs_sysret64)
22106 #endif /* CONFIG_PARAVIRT */
22107
22108+ .macro ljmpq sel, off
22109+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
22110+ .byte 0x48; ljmp *1234f(%rip)
22111+ .pushsection .rodata
22112+ .align 16
22113+ 1234: .quad \off; .word \sel
22114+ .popsection
22115+#else
22116+ pushq $\sel
22117+ pushq $\off
22118+ lretq
22119+#endif
22120+ .endm
22121+
22122+ .macro pax_enter_kernel
22123+ pax_set_fptr_mask
22124+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22125+ call pax_enter_kernel
22126+#endif
22127+ .endm
22128+
22129+ .macro pax_exit_kernel
22130+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22131+ call pax_exit_kernel
22132+#endif
22133+
22134+ .endm
22135+
22136+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22137+ENTRY(pax_enter_kernel)
22138+ pushq %rdi
22139+
22140+#ifdef CONFIG_PARAVIRT
22141+ PV_SAVE_REGS(CLBR_RDI)
22142+#endif
22143+
22144+#ifdef CONFIG_PAX_KERNEXEC
22145+ GET_CR0_INTO_RDI
22146+ bts $16,%rdi
22147+ jnc 3f
22148+ mov %cs,%edi
22149+ cmp $__KERNEL_CS,%edi
22150+ jnz 2f
22151+1:
22152+#endif
22153+
22154+#ifdef CONFIG_PAX_MEMORY_UDEREF
22155+ 661: jmp 111f
22156+ .pushsection .altinstr_replacement, "a"
22157+ 662: ASM_NOP2
22158+ .popsection
22159+ .pushsection .altinstructions, "a"
22160+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22161+ .popsection
22162+ GET_CR3_INTO_RDI
22163+ cmp $0,%dil
22164+ jnz 112f
22165+ mov $__KERNEL_DS,%edi
22166+ mov %edi,%ss
22167+ jmp 111f
22168+112: cmp $1,%dil
22169+ jz 113f
22170+ ud2
22171+113: sub $4097,%rdi
22172+ bts $63,%rdi
22173+ SET_RDI_INTO_CR3
22174+ mov $__UDEREF_KERNEL_DS,%edi
22175+ mov %edi,%ss
22176+111:
22177+#endif
22178+
22179+#ifdef CONFIG_PARAVIRT
22180+ PV_RESTORE_REGS(CLBR_RDI)
22181+#endif
22182+
22183+ popq %rdi
22184+ pax_force_retaddr
22185+ retq
22186+
22187+#ifdef CONFIG_PAX_KERNEXEC
22188+2: ljmpq __KERNEL_CS,1b
22189+3: ljmpq __KERNEXEC_KERNEL_CS,4f
22190+4: SET_RDI_INTO_CR0
22191+ jmp 1b
22192+#endif
22193+ENDPROC(pax_enter_kernel)
22194+
22195+ENTRY(pax_exit_kernel)
22196+ pushq %rdi
22197+
22198+#ifdef CONFIG_PARAVIRT
22199+ PV_SAVE_REGS(CLBR_RDI)
22200+#endif
22201+
22202+#ifdef CONFIG_PAX_KERNEXEC
22203+ mov %cs,%rdi
22204+ cmp $__KERNEXEC_KERNEL_CS,%edi
22205+ jz 2f
22206+ GET_CR0_INTO_RDI
22207+ bts $16,%rdi
22208+ jnc 4f
22209+1:
22210+#endif
22211+
22212+#ifdef CONFIG_PAX_MEMORY_UDEREF
22213+ 661: jmp 111f
22214+ .pushsection .altinstr_replacement, "a"
22215+ 662: ASM_NOP2
22216+ .popsection
22217+ .pushsection .altinstructions, "a"
22218+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22219+ .popsection
22220+ mov %ss,%edi
22221+ cmp $__UDEREF_KERNEL_DS,%edi
22222+ jnz 111f
22223+ GET_CR3_INTO_RDI
22224+ cmp $0,%dil
22225+ jz 112f
22226+ ud2
22227+112: add $4097,%rdi
22228+ bts $63,%rdi
22229+ SET_RDI_INTO_CR3
22230+ mov $__KERNEL_DS,%edi
22231+ mov %edi,%ss
22232+111:
22233+#endif
22234+
22235+#ifdef CONFIG_PARAVIRT
22236+ PV_RESTORE_REGS(CLBR_RDI);
22237+#endif
22238+
22239+ popq %rdi
22240+ pax_force_retaddr
22241+ retq
22242+
22243+#ifdef CONFIG_PAX_KERNEXEC
22244+2: GET_CR0_INTO_RDI
22245+ btr $16,%rdi
22246+ jnc 4f
22247+ ljmpq __KERNEL_CS,3f
22248+3: SET_RDI_INTO_CR0
22249+ jmp 1b
22250+4: ud2
22251+ jmp 4b
22252+#endif
22253+ENDPROC(pax_exit_kernel)
22254+#endif
22255+
22256+ .macro pax_enter_kernel_user
22257+ pax_set_fptr_mask
22258+#ifdef CONFIG_PAX_MEMORY_UDEREF
22259+ call pax_enter_kernel_user
22260+#endif
22261+ .endm
22262+
22263+ .macro pax_exit_kernel_user
22264+#ifdef CONFIG_PAX_MEMORY_UDEREF
22265+ call pax_exit_kernel_user
22266+#endif
22267+#ifdef CONFIG_PAX_RANDKSTACK
22268+ pushq %rax
22269+ pushq %r11
22270+ call pax_randomize_kstack
22271+ popq %r11
22272+ popq %rax
22273+#endif
22274+ .endm
22275+
22276+#ifdef CONFIG_PAX_MEMORY_UDEREF
22277+ENTRY(pax_enter_kernel_user)
22278+ pushq %rdi
22279+ pushq %rbx
22280+
22281+#ifdef CONFIG_PARAVIRT
22282+ PV_SAVE_REGS(CLBR_RDI)
22283+#endif
22284+
22285+ 661: jmp 111f
22286+ .pushsection .altinstr_replacement, "a"
22287+ 662: ASM_NOP2
22288+ .popsection
22289+ .pushsection .altinstructions, "a"
22290+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22291+ .popsection
22292+ GET_CR3_INTO_RDI
22293+ cmp $1,%dil
22294+ jnz 4f
22295+ sub $4097,%rdi
22296+ bts $63,%rdi
22297+ SET_RDI_INTO_CR3
22298+ jmp 3f
22299+111:
22300+
22301+ GET_CR3_INTO_RDI
22302+ mov %rdi,%rbx
22303+ add $__START_KERNEL_map,%rbx
22304+ sub phys_base(%rip),%rbx
22305+
22306+#ifdef CONFIG_PARAVIRT
22307+ cmpl $0, pv_info+PARAVIRT_enabled
22308+ jz 1f
22309+ pushq %rdi
22310+ i = 0
22311+ .rept USER_PGD_PTRS
22312+ mov i*8(%rbx),%rsi
22313+ mov $0,%sil
22314+ lea i*8(%rbx),%rdi
22315+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22316+ i = i + 1
22317+ .endr
22318+ popq %rdi
22319+ jmp 2f
22320+1:
22321+#endif
22322+
22323+ i = 0
22324+ .rept USER_PGD_PTRS
22325+ movb $0,i*8(%rbx)
22326+ i = i + 1
22327+ .endr
22328+
22329+2: SET_RDI_INTO_CR3
22330+
22331+#ifdef CONFIG_PAX_KERNEXEC
22332+ GET_CR0_INTO_RDI
22333+ bts $16,%rdi
22334+ SET_RDI_INTO_CR0
22335+#endif
22336+
22337+3:
22338+
22339+#ifdef CONFIG_PARAVIRT
22340+ PV_RESTORE_REGS(CLBR_RDI)
22341+#endif
22342+
22343+ popq %rbx
22344+ popq %rdi
22345+ pax_force_retaddr
22346+ retq
22347+4: ud2
22348+ENDPROC(pax_enter_kernel_user)
22349+
22350+ENTRY(pax_exit_kernel_user)
22351+ pushq %rdi
22352+ pushq %rbx
22353+
22354+#ifdef CONFIG_PARAVIRT
22355+ PV_SAVE_REGS(CLBR_RDI)
22356+#endif
22357+
22358+ GET_CR3_INTO_RDI
22359+ 661: jmp 1f
22360+ .pushsection .altinstr_replacement, "a"
22361+ 662: ASM_NOP2
22362+ .popsection
22363+ .pushsection .altinstructions, "a"
22364+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22365+ .popsection
22366+ cmp $0,%dil
22367+ jnz 3f
22368+ add $4097,%rdi
22369+ bts $63,%rdi
22370+ SET_RDI_INTO_CR3
22371+ jmp 2f
22372+1:
22373+
22374+ mov %rdi,%rbx
22375+
22376+#ifdef CONFIG_PAX_KERNEXEC
22377+ GET_CR0_INTO_RDI
22378+ btr $16,%rdi
22379+ jnc 3f
22380+ SET_RDI_INTO_CR0
22381+#endif
22382+
22383+ add $__START_KERNEL_map,%rbx
22384+ sub phys_base(%rip),%rbx
22385+
22386+#ifdef CONFIG_PARAVIRT
22387+ cmpl $0, pv_info+PARAVIRT_enabled
22388+ jz 1f
22389+ i = 0
22390+ .rept USER_PGD_PTRS
22391+ mov i*8(%rbx),%rsi
22392+ mov $0x67,%sil
22393+ lea i*8(%rbx),%rdi
22394+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22395+ i = i + 1
22396+ .endr
22397+ jmp 2f
22398+1:
22399+#endif
22400+
22401+ i = 0
22402+ .rept USER_PGD_PTRS
22403+ movb $0x67,i*8(%rbx)
22404+ i = i + 1
22405+ .endr
22406+2:
22407+
22408+#ifdef CONFIG_PARAVIRT
22409+ PV_RESTORE_REGS(CLBR_RDI)
22410+#endif
22411+
22412+ popq %rbx
22413+ popq %rdi
22414+ pax_force_retaddr
22415+ retq
22416+3: ud2
22417+ENDPROC(pax_exit_kernel_user)
22418+#endif
22419+
22420+ .macro pax_enter_kernel_nmi
22421+ pax_set_fptr_mask
22422+
22423+#ifdef CONFIG_PAX_KERNEXEC
22424+ GET_CR0_INTO_RDI
22425+ bts $16,%rdi
22426+ jc 110f
22427+ SET_RDI_INTO_CR0
22428+ or $2,%ebx
22429+110:
22430+#endif
22431+
22432+#ifdef CONFIG_PAX_MEMORY_UDEREF
22433+ 661: jmp 111f
22434+ .pushsection .altinstr_replacement, "a"
22435+ 662: ASM_NOP2
22436+ .popsection
22437+ .pushsection .altinstructions, "a"
22438+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22439+ .popsection
22440+ GET_CR3_INTO_RDI
22441+ cmp $0,%dil
22442+ jz 111f
22443+ sub $4097,%rdi
22444+ or $4,%ebx
22445+ bts $63,%rdi
22446+ SET_RDI_INTO_CR3
22447+ mov $__UDEREF_KERNEL_DS,%edi
22448+ mov %edi,%ss
22449+111:
22450+#endif
22451+ .endm
22452+
22453+ .macro pax_exit_kernel_nmi
22454+#ifdef CONFIG_PAX_KERNEXEC
22455+ btr $1,%ebx
22456+ jnc 110f
22457+ GET_CR0_INTO_RDI
22458+ btr $16,%rdi
22459+ SET_RDI_INTO_CR0
22460+110:
22461+#endif
22462+
22463+#ifdef CONFIG_PAX_MEMORY_UDEREF
22464+ btr $2,%ebx
22465+ jnc 111f
22466+ GET_CR3_INTO_RDI
22467+ add $4097,%rdi
22468+ bts $63,%rdi
22469+ SET_RDI_INTO_CR3
22470+ mov $__KERNEL_DS,%edi
22471+ mov %edi,%ss
22472+111:
22473+#endif
22474+ .endm
22475+
22476+ .macro pax_erase_kstack
22477+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22478+ call pax_erase_kstack
22479+#endif
22480+ .endm
22481+
22482+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22483+ENTRY(pax_erase_kstack)
22484+ pushq %rdi
22485+ pushq %rcx
22486+ pushq %rax
22487+ pushq %r11
22488+
22489+ GET_THREAD_INFO(%r11)
22490+ mov TI_lowest_stack(%r11), %rdi
22491+ mov $-0xBEEF, %rax
22492+ std
22493+
22494+1: mov %edi, %ecx
22495+ and $THREAD_SIZE_asm - 1, %ecx
22496+ shr $3, %ecx
22497+ repne scasq
22498+ jecxz 2f
22499+
22500+ cmp $2*8, %ecx
22501+ jc 2f
22502+
22503+ mov $2*8, %ecx
22504+ repe scasq
22505+ jecxz 2f
22506+ jne 1b
22507+
22508+2: cld
22509+ mov %esp, %ecx
22510+ sub %edi, %ecx
22511+
22512+ cmp $THREAD_SIZE_asm, %rcx
22513+ jb 3f
22514+ ud2
22515+3:
22516+
22517+ shr $3, %ecx
22518+ rep stosq
22519+
22520+ mov TI_task_thread_sp0(%r11), %rdi
22521+ sub $256, %rdi
22522+ mov %rdi, TI_lowest_stack(%r11)
22523+
22524+ popq %r11
22525+ popq %rax
22526+ popq %rcx
22527+ popq %rdi
22528+ pax_force_retaddr
22529+ ret
22530+ENDPROC(pax_erase_kstack)
22531+#endif
22532
22533 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
22534 #ifdef CONFIG_TRACE_IRQFLAGS
22535@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
22536 .endm
22537
22538 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
22539- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
22540+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
22541 jnc 1f
22542 TRACE_IRQS_ON_DEBUG
22543 1:
22544@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
22545 movq \tmp,R11+\offset(%rsp)
22546 .endm
22547
22548- .macro FAKE_STACK_FRAME child_rip
22549- /* push in order ss, rsp, eflags, cs, rip */
22550- xorl %eax, %eax
22551- pushq_cfi $__KERNEL_DS /* ss */
22552- /*CFI_REL_OFFSET ss,0*/
22553- pushq_cfi %rax /* rsp */
22554- CFI_REL_OFFSET rsp,0
22555- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
22556- /*CFI_REL_OFFSET rflags,0*/
22557- pushq_cfi $__KERNEL_CS /* cs */
22558- /*CFI_REL_OFFSET cs,0*/
22559- pushq_cfi \child_rip /* rip */
22560- CFI_REL_OFFSET rip,0
22561- pushq_cfi %rax /* orig rax */
22562- .endm
22563-
22564- .macro UNFAKE_STACK_FRAME
22565- addq $8*6, %rsp
22566- CFI_ADJUST_CFA_OFFSET -(6*8)
22567- .endm
22568-
22569 /*
22570 * initial frame state for interrupts (and exceptions without error code)
22571 */
22572@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
22573 /* save partial stack frame */
22574 .macro SAVE_ARGS_IRQ
22575 cld
22576- /* start from rbp in pt_regs and jump over */
22577- movq_cfi rdi, (RDI-RBP)
22578- movq_cfi rsi, (RSI-RBP)
22579- movq_cfi rdx, (RDX-RBP)
22580- movq_cfi rcx, (RCX-RBP)
22581- movq_cfi rax, (RAX-RBP)
22582- movq_cfi r8, (R8-RBP)
22583- movq_cfi r9, (R9-RBP)
22584- movq_cfi r10, (R10-RBP)
22585- movq_cfi r11, (R11-RBP)
22586+ /* start from r15 in pt_regs and jump over */
22587+ movq_cfi rdi, RDI
22588+ movq_cfi rsi, RSI
22589+ movq_cfi rdx, RDX
22590+ movq_cfi rcx, RCX
22591+ movq_cfi rax, RAX
22592+ movq_cfi r8, R8
22593+ movq_cfi r9, R9
22594+ movq_cfi r10, R10
22595+ movq_cfi r11, R11
22596+ movq_cfi r12, R12
22597
22598 /* Save rbp so that we can unwind from get_irq_regs() */
22599- movq_cfi rbp, 0
22600+ movq_cfi rbp, RBP
22601
22602 /* Save previous stack value */
22603 movq %rsp, %rsi
22604
22605- leaq -RBP(%rsp),%rdi /* arg1 for handler */
22606- testl $3, CS-RBP(%rsi)
22607+ movq %rsp,%rdi /* arg1 for handler */
22608+ testb $3, CS(%rsi)
22609 je 1f
22610 SWAPGS
22611 /*
22612@@ -514,9 +927,10 @@ ENTRY(save_paranoid)
22613 js 1f /* negative -> in kernel */
22614 SWAPGS
22615 xorl %ebx,%ebx
22616-1: ret
22617+1: pax_force_retaddr_bts
22618+ ret
22619 CFI_ENDPROC
22620-END(save_paranoid)
22621+ENDPROC(save_paranoid)
22622 .popsection
22623
22624 /*
22625@@ -538,7 +952,7 @@ ENTRY(ret_from_fork)
22626
22627 RESTORE_REST
22628
22629- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22630+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22631 jz 1f
22632
22633 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
22634@@ -548,15 +962,13 @@ ENTRY(ret_from_fork)
22635 jmp ret_from_sys_call # go to the SYSRET fastpath
22636
22637 1:
22638- subq $REST_SKIP, %rsp # leave space for volatiles
22639- CFI_ADJUST_CFA_OFFSET REST_SKIP
22640 movq %rbp, %rdi
22641 call *%rbx
22642 movl $0, RAX(%rsp)
22643 RESTORE_REST
22644 jmp int_ret_from_sys_call
22645 CFI_ENDPROC
22646-END(ret_from_fork)
22647+ENDPROC(ret_from_fork)
22648
22649 /*
22650 * System call entry. Up to 6 arguments in registers are supported.
22651@@ -593,7 +1005,7 @@ END(ret_from_fork)
22652 ENTRY(system_call)
22653 CFI_STARTPROC simple
22654 CFI_SIGNAL_FRAME
22655- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
22656+ CFI_DEF_CFA rsp,0
22657 CFI_REGISTER rip,rcx
22658 /*CFI_REGISTER rflags,r11*/
22659 SWAPGS_UNSAFE_STACK
22660@@ -606,16 +1018,23 @@ GLOBAL(system_call_after_swapgs)
22661
22662 movq %rsp,PER_CPU_VAR(old_rsp)
22663 movq PER_CPU_VAR(kernel_stack),%rsp
22664+ SAVE_ARGS 8*6,0
22665+ pax_enter_kernel_user
22666+
22667+#ifdef CONFIG_PAX_RANDKSTACK
22668+ pax_erase_kstack
22669+#endif
22670+
22671 /*
22672 * No need to follow this irqs off/on section - it's straight
22673 * and short:
22674 */
22675 ENABLE_INTERRUPTS(CLBR_NONE)
22676- SAVE_ARGS 8,0
22677 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
22678 movq %rcx,RIP-ARGOFFSET(%rsp)
22679 CFI_REL_OFFSET rip,RIP-ARGOFFSET
22680- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22681+ GET_THREAD_INFO(%rcx)
22682+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
22683 jnz tracesys
22684 system_call_fastpath:
22685 #if __SYSCALL_MASK == ~0
22686@@ -639,10 +1058,13 @@ sysret_check:
22687 LOCKDEP_SYS_EXIT
22688 DISABLE_INTERRUPTS(CLBR_NONE)
22689 TRACE_IRQS_OFF
22690- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
22691+ GET_THREAD_INFO(%rcx)
22692+ movl TI_flags(%rcx),%edx
22693 andl %edi,%edx
22694 jnz sysret_careful
22695 CFI_REMEMBER_STATE
22696+ pax_exit_kernel_user
22697+ pax_erase_kstack
22698 /*
22699 * sysretq will re-enable interrupts:
22700 */
22701@@ -701,6 +1123,9 @@ auditsys:
22702 movq %rax,%rsi /* 2nd arg: syscall number */
22703 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
22704 call __audit_syscall_entry
22705+
22706+ pax_erase_kstack
22707+
22708 LOAD_ARGS 0 /* reload call-clobbered registers */
22709 jmp system_call_fastpath
22710
22711@@ -722,7 +1147,7 @@ sysret_audit:
22712 /* Do syscall tracing */
22713 tracesys:
22714 #ifdef CONFIG_AUDITSYSCALL
22715- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22716+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
22717 jz auditsys
22718 #endif
22719 SAVE_REST
22720@@ -730,12 +1155,15 @@ tracesys:
22721 FIXUP_TOP_OF_STACK %rdi
22722 movq %rsp,%rdi
22723 call syscall_trace_enter
22724+
22725+ pax_erase_kstack
22726+
22727 /*
22728 * Reload arg registers from stack in case ptrace changed them.
22729 * We don't reload %rax because syscall_trace_enter() returned
22730 * the value it wants us to use in the table lookup.
22731 */
22732- LOAD_ARGS ARGOFFSET, 1
22733+ LOAD_ARGS 1
22734 RESTORE_REST
22735 #if __SYSCALL_MASK == ~0
22736 cmpq $__NR_syscall_max,%rax
22737@@ -765,7 +1193,9 @@ GLOBAL(int_with_check)
22738 andl %edi,%edx
22739 jnz int_careful
22740 andl $~TS_COMPAT,TI_status(%rcx)
22741- jmp retint_swapgs
22742+ pax_exit_kernel_user
22743+ pax_erase_kstack
22744+ jmp retint_swapgs_pax
22745
22746 /* Either reschedule or signal or syscall exit tracking needed. */
22747 /* First do a reschedule test. */
22748@@ -811,7 +1241,7 @@ int_restore_rest:
22749 TRACE_IRQS_OFF
22750 jmp int_with_check
22751 CFI_ENDPROC
22752-END(system_call)
22753+ENDPROC(system_call)
22754
22755 .macro FORK_LIKE func
22756 ENTRY(stub_\func)
22757@@ -824,9 +1254,10 @@ ENTRY(stub_\func)
22758 DEFAULT_FRAME 0 8 /* offset 8: return address */
22759 call sys_\func
22760 RESTORE_TOP_OF_STACK %r11, 8
22761- ret $REST_SKIP /* pop extended registers */
22762+ pax_force_retaddr
22763+ ret
22764 CFI_ENDPROC
22765-END(stub_\func)
22766+ENDPROC(stub_\func)
22767 .endm
22768
22769 .macro FIXED_FRAME label,func
22770@@ -836,9 +1267,10 @@ ENTRY(\label)
22771 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
22772 call \func
22773 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
22774+ pax_force_retaddr
22775 ret
22776 CFI_ENDPROC
22777-END(\label)
22778+ENDPROC(\label)
22779 .endm
22780
22781 FORK_LIKE clone
22782@@ -846,19 +1278,6 @@ END(\label)
22783 FORK_LIKE vfork
22784 FIXED_FRAME stub_iopl, sys_iopl
22785
22786-ENTRY(ptregscall_common)
22787- DEFAULT_FRAME 1 8 /* offset 8: return address */
22788- RESTORE_TOP_OF_STACK %r11, 8
22789- movq_cfi_restore R15+8, r15
22790- movq_cfi_restore R14+8, r14
22791- movq_cfi_restore R13+8, r13
22792- movq_cfi_restore R12+8, r12
22793- movq_cfi_restore RBP+8, rbp
22794- movq_cfi_restore RBX+8, rbx
22795- ret $REST_SKIP /* pop extended registers */
22796- CFI_ENDPROC
22797-END(ptregscall_common)
22798-
22799 ENTRY(stub_execve)
22800 CFI_STARTPROC
22801 addq $8, %rsp
22802@@ -870,7 +1289,7 @@ ENTRY(stub_execve)
22803 RESTORE_REST
22804 jmp int_ret_from_sys_call
22805 CFI_ENDPROC
22806-END(stub_execve)
22807+ENDPROC(stub_execve)
22808
22809 /*
22810 * sigreturn is special because it needs to restore all registers on return.
22811@@ -887,7 +1306,7 @@ ENTRY(stub_rt_sigreturn)
22812 RESTORE_REST
22813 jmp int_ret_from_sys_call
22814 CFI_ENDPROC
22815-END(stub_rt_sigreturn)
22816+ENDPROC(stub_rt_sigreturn)
22817
22818 #ifdef CONFIG_X86_X32_ABI
22819 ENTRY(stub_x32_rt_sigreturn)
22820@@ -901,7 +1320,7 @@ ENTRY(stub_x32_rt_sigreturn)
22821 RESTORE_REST
22822 jmp int_ret_from_sys_call
22823 CFI_ENDPROC
22824-END(stub_x32_rt_sigreturn)
22825+ENDPROC(stub_x32_rt_sigreturn)
22826
22827 ENTRY(stub_x32_execve)
22828 CFI_STARTPROC
22829@@ -915,7 +1334,7 @@ ENTRY(stub_x32_execve)
22830 RESTORE_REST
22831 jmp int_ret_from_sys_call
22832 CFI_ENDPROC
22833-END(stub_x32_execve)
22834+ENDPROC(stub_x32_execve)
22835
22836 #endif
22837
22838@@ -952,7 +1371,7 @@ vector=vector+1
22839 2: jmp common_interrupt
22840 .endr
22841 CFI_ENDPROC
22842-END(irq_entries_start)
22843+ENDPROC(irq_entries_start)
22844
22845 .previous
22846 END(interrupt)
22847@@ -969,9 +1388,19 @@ END(interrupt)
22848 /* 0(%rsp): ~(interrupt number) */
22849 .macro interrupt func
22850 /* reserve pt_regs for scratch regs and rbp */
22851- subq $ORIG_RAX-RBP, %rsp
22852- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
22853+ subq $ORIG_RAX, %rsp
22854+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
22855 SAVE_ARGS_IRQ
22856+#ifdef CONFIG_PAX_MEMORY_UDEREF
22857+ testb $3, CS(%rdi)
22858+ jnz 1f
22859+ pax_enter_kernel
22860+ jmp 2f
22861+1: pax_enter_kernel_user
22862+2:
22863+#else
22864+ pax_enter_kernel
22865+#endif
22866 call \func
22867 .endm
22868
22869@@ -997,14 +1426,14 @@ ret_from_intr:
22870
22871 /* Restore saved previous stack */
22872 popq %rsi
22873- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
22874- leaq ARGOFFSET-RBP(%rsi), %rsp
22875+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
22876+ movq %rsi, %rsp
22877 CFI_DEF_CFA_REGISTER rsp
22878- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
22879+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
22880
22881 exit_intr:
22882 GET_THREAD_INFO(%rcx)
22883- testl $3,CS-ARGOFFSET(%rsp)
22884+ testb $3,CS-ARGOFFSET(%rsp)
22885 je retint_kernel
22886
22887 /* Interrupt came from user space */
22888@@ -1026,12 +1455,16 @@ retint_swapgs: /* return to user-space */
22889 * The iretq could re-enable interrupts:
22890 */
22891 DISABLE_INTERRUPTS(CLBR_ANY)
22892+ pax_exit_kernel_user
22893+retint_swapgs_pax:
22894 TRACE_IRQS_IRETQ
22895 SWAPGS
22896 jmp restore_args
22897
22898 retint_restore_args: /* return to kernel space */
22899 DISABLE_INTERRUPTS(CLBR_ANY)
22900+ pax_exit_kernel
22901+ pax_force_retaddr (RIP-ARGOFFSET)
22902 /*
22903 * The iretq could re-enable interrupts:
22904 */
22905@@ -1114,7 +1547,7 @@ ENTRY(retint_kernel)
22906 #endif
22907
22908 CFI_ENDPROC
22909-END(common_interrupt)
22910+ENDPROC(common_interrupt)
22911 /*
22912 * End of kprobes section
22913 */
22914@@ -1132,7 +1565,7 @@ ENTRY(\sym)
22915 interrupt \do_sym
22916 jmp ret_from_intr
22917 CFI_ENDPROC
22918-END(\sym)
22919+ENDPROC(\sym)
22920 .endm
22921
22922 #ifdef CONFIG_TRACING
22923@@ -1215,12 +1648,22 @@ ENTRY(\sym)
22924 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22925 call error_entry
22926 DEFAULT_FRAME 0
22927+#ifdef CONFIG_PAX_MEMORY_UDEREF
22928+ testb $3, CS(%rsp)
22929+ jnz 1f
22930+ pax_enter_kernel
22931+ jmp 2f
22932+1: pax_enter_kernel_user
22933+2:
22934+#else
22935+ pax_enter_kernel
22936+#endif
22937 movq %rsp,%rdi /* pt_regs pointer */
22938 xorl %esi,%esi /* no error code */
22939 call \do_sym
22940 jmp error_exit /* %ebx: no swapgs flag */
22941 CFI_ENDPROC
22942-END(\sym)
22943+ENDPROC(\sym)
22944 .endm
22945
22946 .macro paranoidzeroentry sym do_sym
22947@@ -1233,15 +1676,25 @@ ENTRY(\sym)
22948 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22949 call save_paranoid
22950 TRACE_IRQS_OFF
22951+#ifdef CONFIG_PAX_MEMORY_UDEREF
22952+ testb $3, CS(%rsp)
22953+ jnz 1f
22954+ pax_enter_kernel
22955+ jmp 2f
22956+1: pax_enter_kernel_user
22957+2:
22958+#else
22959+ pax_enter_kernel
22960+#endif
22961 movq %rsp,%rdi /* pt_regs pointer */
22962 xorl %esi,%esi /* no error code */
22963 call \do_sym
22964 jmp paranoid_exit /* %ebx: no swapgs flag */
22965 CFI_ENDPROC
22966-END(\sym)
22967+ENDPROC(\sym)
22968 .endm
22969
22970-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
22971+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
22972 .macro paranoidzeroentry_ist sym do_sym ist
22973 ENTRY(\sym)
22974 INTR_FRAME
22975@@ -1252,14 +1705,30 @@ ENTRY(\sym)
22976 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22977 call save_paranoid
22978 TRACE_IRQS_OFF_DEBUG
22979+#ifdef CONFIG_PAX_MEMORY_UDEREF
22980+ testb $3, CS(%rsp)
22981+ jnz 1f
22982+ pax_enter_kernel
22983+ jmp 2f
22984+1: pax_enter_kernel_user
22985+2:
22986+#else
22987+ pax_enter_kernel
22988+#endif
22989 movq %rsp,%rdi /* pt_regs pointer */
22990 xorl %esi,%esi /* no error code */
22991+#ifdef CONFIG_SMP
22992+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
22993+ lea init_tss(%r13), %r13
22994+#else
22995+ lea init_tss(%rip), %r13
22996+#endif
22997 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22998 call \do_sym
22999 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23000 jmp paranoid_exit /* %ebx: no swapgs flag */
23001 CFI_ENDPROC
23002-END(\sym)
23003+ENDPROC(\sym)
23004 .endm
23005
23006 .macro errorentry sym do_sym
23007@@ -1271,13 +1740,23 @@ ENTRY(\sym)
23008 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
23009 call error_entry
23010 DEFAULT_FRAME 0
23011+#ifdef CONFIG_PAX_MEMORY_UDEREF
23012+ testb $3, CS(%rsp)
23013+ jnz 1f
23014+ pax_enter_kernel
23015+ jmp 2f
23016+1: pax_enter_kernel_user
23017+2:
23018+#else
23019+ pax_enter_kernel
23020+#endif
23021 movq %rsp,%rdi /* pt_regs pointer */
23022 movq ORIG_RAX(%rsp),%rsi /* get error code */
23023 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
23024 call \do_sym
23025 jmp error_exit /* %ebx: no swapgs flag */
23026 CFI_ENDPROC
23027-END(\sym)
23028+ENDPROC(\sym)
23029 .endm
23030
23031 /* error code is on the stack already */
23032@@ -1291,13 +1770,23 @@ ENTRY(\sym)
23033 call save_paranoid
23034 DEFAULT_FRAME 0
23035 TRACE_IRQS_OFF
23036+#ifdef CONFIG_PAX_MEMORY_UDEREF
23037+ testb $3, CS(%rsp)
23038+ jnz 1f
23039+ pax_enter_kernel
23040+ jmp 2f
23041+1: pax_enter_kernel_user
23042+2:
23043+#else
23044+ pax_enter_kernel
23045+#endif
23046 movq %rsp,%rdi /* pt_regs pointer */
23047 movq ORIG_RAX(%rsp),%rsi /* get error code */
23048 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
23049 call \do_sym
23050 jmp paranoid_exit /* %ebx: no swapgs flag */
23051 CFI_ENDPROC
23052-END(\sym)
23053+ENDPROC(\sym)
23054 .endm
23055
23056 zeroentry divide_error do_divide_error
23057@@ -1327,9 +1816,10 @@ gs_change:
23058 2: mfence /* workaround */
23059 SWAPGS
23060 popfq_cfi
23061+ pax_force_retaddr
23062 ret
23063 CFI_ENDPROC
23064-END(native_load_gs_index)
23065+ENDPROC(native_load_gs_index)
23066
23067 _ASM_EXTABLE(gs_change,bad_gs)
23068 .section .fixup,"ax"
23069@@ -1357,9 +1847,10 @@ ENTRY(call_softirq)
23070 CFI_DEF_CFA_REGISTER rsp
23071 CFI_ADJUST_CFA_OFFSET -8
23072 decl PER_CPU_VAR(irq_count)
23073+ pax_force_retaddr
23074 ret
23075 CFI_ENDPROC
23076-END(call_softirq)
23077+ENDPROC(call_softirq)
23078
23079 #ifdef CONFIG_XEN
23080 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
23081@@ -1397,7 +1888,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23082 decl PER_CPU_VAR(irq_count)
23083 jmp error_exit
23084 CFI_ENDPROC
23085-END(xen_do_hypervisor_callback)
23086+ENDPROC(xen_do_hypervisor_callback)
23087
23088 /*
23089 * Hypervisor uses this for application faults while it executes.
23090@@ -1456,7 +1947,7 @@ ENTRY(xen_failsafe_callback)
23091 SAVE_ALL
23092 jmp error_exit
23093 CFI_ENDPROC
23094-END(xen_failsafe_callback)
23095+ENDPROC(xen_failsafe_callback)
23096
23097 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23098 xen_hvm_callback_vector xen_evtchn_do_upcall
23099@@ -1508,18 +1999,33 @@ ENTRY(paranoid_exit)
23100 DEFAULT_FRAME
23101 DISABLE_INTERRUPTS(CLBR_NONE)
23102 TRACE_IRQS_OFF_DEBUG
23103- testl %ebx,%ebx /* swapgs needed? */
23104+ testl $1,%ebx /* swapgs needed? */
23105 jnz paranoid_restore
23106- testl $3,CS(%rsp)
23107+ testb $3,CS(%rsp)
23108 jnz paranoid_userspace
23109+#ifdef CONFIG_PAX_MEMORY_UDEREF
23110+ pax_exit_kernel
23111+ TRACE_IRQS_IRETQ 0
23112+ SWAPGS_UNSAFE_STACK
23113+ RESTORE_ALL 8
23114+ pax_force_retaddr_bts
23115+ jmp irq_return
23116+#endif
23117 paranoid_swapgs:
23118+#ifdef CONFIG_PAX_MEMORY_UDEREF
23119+ pax_exit_kernel_user
23120+#else
23121+ pax_exit_kernel
23122+#endif
23123 TRACE_IRQS_IRETQ 0
23124 SWAPGS_UNSAFE_STACK
23125 RESTORE_ALL 8
23126 jmp irq_return
23127 paranoid_restore:
23128+ pax_exit_kernel
23129 TRACE_IRQS_IRETQ_DEBUG 0
23130 RESTORE_ALL 8
23131+ pax_force_retaddr_bts
23132 jmp irq_return
23133 paranoid_userspace:
23134 GET_THREAD_INFO(%rcx)
23135@@ -1548,7 +2054,7 @@ paranoid_schedule:
23136 TRACE_IRQS_OFF
23137 jmp paranoid_userspace
23138 CFI_ENDPROC
23139-END(paranoid_exit)
23140+ENDPROC(paranoid_exit)
23141
23142 /*
23143 * Exception entry point. This expects an error code/orig_rax on the stack.
23144@@ -1575,12 +2081,13 @@ ENTRY(error_entry)
23145 movq_cfi r14, R14+8
23146 movq_cfi r15, R15+8
23147 xorl %ebx,%ebx
23148- testl $3,CS+8(%rsp)
23149+ testb $3,CS+8(%rsp)
23150 je error_kernelspace
23151 error_swapgs:
23152 SWAPGS
23153 error_sti:
23154 TRACE_IRQS_OFF
23155+ pax_force_retaddr_bts
23156 ret
23157
23158 /*
23159@@ -1607,7 +2114,7 @@ bstep_iret:
23160 movq %rcx,RIP+8(%rsp)
23161 jmp error_swapgs
23162 CFI_ENDPROC
23163-END(error_entry)
23164+ENDPROC(error_entry)
23165
23166
23167 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
23168@@ -1618,7 +2125,7 @@ ENTRY(error_exit)
23169 DISABLE_INTERRUPTS(CLBR_NONE)
23170 TRACE_IRQS_OFF
23171 GET_THREAD_INFO(%rcx)
23172- testl %eax,%eax
23173+ testl $1,%eax
23174 jne retint_kernel
23175 LOCKDEP_SYS_EXIT_IRQ
23176 movl TI_flags(%rcx),%edx
23177@@ -1627,7 +2134,7 @@ ENTRY(error_exit)
23178 jnz retint_careful
23179 jmp retint_swapgs
23180 CFI_ENDPROC
23181-END(error_exit)
23182+ENDPROC(error_exit)
23183
23184 /*
23185 * Test if a given stack is an NMI stack or not.
23186@@ -1685,9 +2192,11 @@ ENTRY(nmi)
23187 * If %cs was not the kernel segment, then the NMI triggered in user
23188 * space, which means it is definitely not nested.
23189 */
23190+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
23191+ je 1f
23192 cmpl $__KERNEL_CS, 16(%rsp)
23193 jne first_nmi
23194-
23195+1:
23196 /*
23197 * Check the special variable on the stack to see if NMIs are
23198 * executing.
23199@@ -1721,8 +2230,7 @@ nested_nmi:
23200
23201 1:
23202 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
23203- leaq -1*8(%rsp), %rdx
23204- movq %rdx, %rsp
23205+ subq $8, %rsp
23206 CFI_ADJUST_CFA_OFFSET 1*8
23207 leaq -10*8(%rsp), %rdx
23208 pushq_cfi $__KERNEL_DS
23209@@ -1740,6 +2248,7 @@ nested_nmi_out:
23210 CFI_RESTORE rdx
23211
23212 /* No need to check faults here */
23213+# pax_force_retaddr_bts
23214 INTERRUPT_RETURN
23215
23216 CFI_RESTORE_STATE
23217@@ -1852,9 +2361,11 @@ end_repeat_nmi:
23218 * NMI itself takes a page fault, the page fault that was preempted
23219 * will read the information from the NMI page fault and not the
23220 * origin fault. Save it off and restore it if it changes.
23221- * Use the r12 callee-saved register.
23222+ * Use the r13 callee-saved register.
23223 */
23224- movq %cr2, %r12
23225+ movq %cr2, %r13
23226+
23227+ pax_enter_kernel_nmi
23228
23229 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
23230 movq %rsp,%rdi
23231@@ -1863,31 +2374,36 @@ end_repeat_nmi:
23232
23233 /* Did the NMI take a page fault? Restore cr2 if it did */
23234 movq %cr2, %rcx
23235- cmpq %rcx, %r12
23236+ cmpq %rcx, %r13
23237 je 1f
23238- movq %r12, %cr2
23239+ movq %r13, %cr2
23240 1:
23241
23242- testl %ebx,%ebx /* swapgs needed? */
23243+ testl $1,%ebx /* swapgs needed? */
23244 jnz nmi_restore
23245 nmi_swapgs:
23246 SWAPGS_UNSAFE_STACK
23247 nmi_restore:
23248+ pax_exit_kernel_nmi
23249 /* Pop the extra iret frame at once */
23250 RESTORE_ALL 6*8
23251+ testb $3, 8(%rsp)
23252+ jnz 1f
23253+ pax_force_retaddr_bts
23254+1:
23255
23256 /* Clear the NMI executing stack variable */
23257 movq $0, 5*8(%rsp)
23258 jmp irq_return
23259 CFI_ENDPROC
23260-END(nmi)
23261+ENDPROC(nmi)
23262
23263 ENTRY(ignore_sysret)
23264 CFI_STARTPROC
23265 mov $-ENOSYS,%eax
23266 sysret
23267 CFI_ENDPROC
23268-END(ignore_sysret)
23269+ENDPROC(ignore_sysret)
23270
23271 /*
23272 * End of kprobes section
23273diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
23274index d4bdd25..912664c 100644
23275--- a/arch/x86/kernel/ftrace.c
23276+++ b/arch/x86/kernel/ftrace.c
23277@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
23278 {
23279 unsigned char replaced[MCOUNT_INSN_SIZE];
23280
23281+ ip = ktla_ktva(ip);
23282+
23283 /*
23284 * Note: Due to modules and __init, code can
23285 * disappear and change, we need to protect against faulting
23286@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23287 unsigned char old[MCOUNT_INSN_SIZE], *new;
23288 int ret;
23289
23290- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
23291+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
23292 new = ftrace_call_replace(ip, (unsigned long)func);
23293
23294 /* See comment above by declaration of modifying_ftrace_code */
23295@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23296 /* Also update the regs callback function */
23297 if (!ret) {
23298 ip = (unsigned long)(&ftrace_regs_call);
23299- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
23300+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
23301 new = ftrace_call_replace(ip, (unsigned long)func);
23302 ret = ftrace_modify_code(ip, old, new);
23303 }
23304@@ -291,7 +293,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
23305 * kernel identity mapping to modify code.
23306 */
23307 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
23308- ip = (unsigned long)__va(__pa_symbol(ip));
23309+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
23310
23311 return probe_kernel_write((void *)ip, val, size);
23312 }
23313@@ -301,7 +303,7 @@ static int add_break(unsigned long ip, const char *old)
23314 unsigned char replaced[MCOUNT_INSN_SIZE];
23315 unsigned char brk = BREAKPOINT_INSTRUCTION;
23316
23317- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
23318+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
23319 return -EFAULT;
23320
23321 /* Make sure it is what we expect it to be */
23322@@ -649,7 +651,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
23323 return ret;
23324
23325 fail_update:
23326- probe_kernel_write((void *)ip, &old_code[0], 1);
23327+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
23328 goto out;
23329 }
23330
23331@@ -682,6 +684,8 @@ static int ftrace_mod_jmp(unsigned long ip,
23332 {
23333 unsigned char code[MCOUNT_INSN_SIZE];
23334
23335+ ip = ktla_ktva(ip);
23336+
23337 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
23338 return -EFAULT;
23339
23340diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
23341index 1be8e43..d9b9ef6 100644
23342--- a/arch/x86/kernel/head64.c
23343+++ b/arch/x86/kernel/head64.c
23344@@ -67,12 +67,12 @@ again:
23345 pgd = *pgd_p;
23346
23347 /*
23348- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
23349- * critical -- __PAGE_OFFSET would point us back into the dynamic
23350+ * The use of __early_va rather than __va here is critical:
23351+ * __va would point us back into the dynamic
23352 * range and we might end up looping forever...
23353 */
23354 if (pgd)
23355- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23356+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
23357 else {
23358 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23359 reset_early_page_tables();
23360@@ -82,13 +82,13 @@ again:
23361 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
23362 for (i = 0; i < PTRS_PER_PUD; i++)
23363 pud_p[i] = 0;
23364- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23365+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
23366 }
23367 pud_p += pud_index(address);
23368 pud = *pud_p;
23369
23370 if (pud)
23371- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23372+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
23373 else {
23374 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23375 reset_early_page_tables();
23376@@ -98,7 +98,7 @@ again:
23377 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
23378 for (i = 0; i < PTRS_PER_PMD; i++)
23379 pmd_p[i] = 0;
23380- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23381+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
23382 }
23383 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
23384 pmd_p[pmd_index(address)] = pmd;
23385@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
23386 if (console_loglevel == 10)
23387 early_printk("Kernel alive\n");
23388
23389- clear_page(init_level4_pgt);
23390 /* set init_level4_pgt kernel high mapping*/
23391 init_level4_pgt[511] = early_level4_pgt[511];
23392
23393diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
23394index 81ba276..30c5411 100644
23395--- a/arch/x86/kernel/head_32.S
23396+++ b/arch/x86/kernel/head_32.S
23397@@ -26,6 +26,12 @@
23398 /* Physical address */
23399 #define pa(X) ((X) - __PAGE_OFFSET)
23400
23401+#ifdef CONFIG_PAX_KERNEXEC
23402+#define ta(X) (X)
23403+#else
23404+#define ta(X) ((X) - __PAGE_OFFSET)
23405+#endif
23406+
23407 /*
23408 * References to members of the new_cpu_data structure.
23409 */
23410@@ -55,11 +61,7 @@
23411 * and small than max_low_pfn, otherwise will waste some page table entries
23412 */
23413
23414-#if PTRS_PER_PMD > 1
23415-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
23416-#else
23417-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
23418-#endif
23419+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
23420
23421 /* Number of possible pages in the lowmem region */
23422 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
23423@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
23424 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23425
23426 /*
23427+ * Real beginning of normal "text" segment
23428+ */
23429+ENTRY(stext)
23430+ENTRY(_stext)
23431+
23432+/*
23433 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
23434 * %esi points to the real-mode code as a 32-bit pointer.
23435 * CS and DS must be 4 GB flat segments, but we don't depend on
23436@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23437 * can.
23438 */
23439 __HEAD
23440+
23441+#ifdef CONFIG_PAX_KERNEXEC
23442+ jmp startup_32
23443+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
23444+.fill PAGE_SIZE-5,1,0xcc
23445+#endif
23446+
23447 ENTRY(startup_32)
23448 movl pa(stack_start),%ecx
23449
23450@@ -106,6 +121,59 @@ ENTRY(startup_32)
23451 2:
23452 leal -__PAGE_OFFSET(%ecx),%esp
23453
23454+#ifdef CONFIG_SMP
23455+ movl $pa(cpu_gdt_table),%edi
23456+ movl $__per_cpu_load,%eax
23457+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
23458+ rorl $16,%eax
23459+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
23460+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
23461+ movl $__per_cpu_end - 1,%eax
23462+ subl $__per_cpu_start,%eax
23463+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
23464+#endif
23465+
23466+#ifdef CONFIG_PAX_MEMORY_UDEREF
23467+ movl $NR_CPUS,%ecx
23468+ movl $pa(cpu_gdt_table),%edi
23469+1:
23470+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
23471+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
23472+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
23473+ addl $PAGE_SIZE_asm,%edi
23474+ loop 1b
23475+#endif
23476+
23477+#ifdef CONFIG_PAX_KERNEXEC
23478+ movl $pa(boot_gdt),%edi
23479+ movl $__LOAD_PHYSICAL_ADDR,%eax
23480+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
23481+ rorl $16,%eax
23482+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
23483+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
23484+ rorl $16,%eax
23485+
23486+ ljmp $(__BOOT_CS),$1f
23487+1:
23488+
23489+ movl $NR_CPUS,%ecx
23490+ movl $pa(cpu_gdt_table),%edi
23491+ addl $__PAGE_OFFSET,%eax
23492+1:
23493+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
23494+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
23495+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
23496+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
23497+ rorl $16,%eax
23498+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
23499+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
23500+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
23501+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
23502+ rorl $16,%eax
23503+ addl $PAGE_SIZE_asm,%edi
23504+ loop 1b
23505+#endif
23506+
23507 /*
23508 * Clear BSS first so that there are no surprises...
23509 */
23510@@ -201,8 +269,11 @@ ENTRY(startup_32)
23511 movl %eax, pa(max_pfn_mapped)
23512
23513 /* Do early initialization of the fixmap area */
23514- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23515- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
23516+#ifdef CONFIG_COMPAT_VDSO
23517+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
23518+#else
23519+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
23520+#endif
23521 #else /* Not PAE */
23522
23523 page_pde_offset = (__PAGE_OFFSET >> 20);
23524@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23525 movl %eax, pa(max_pfn_mapped)
23526
23527 /* Do early initialization of the fixmap area */
23528- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23529- movl %eax,pa(initial_page_table+0xffc)
23530+#ifdef CONFIG_COMPAT_VDSO
23531+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
23532+#else
23533+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
23534+#endif
23535 #endif
23536
23537 #ifdef CONFIG_PARAVIRT
23538@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23539 cmpl $num_subarch_entries, %eax
23540 jae bad_subarch
23541
23542- movl pa(subarch_entries)(,%eax,4), %eax
23543- subl $__PAGE_OFFSET, %eax
23544- jmp *%eax
23545+ jmp *pa(subarch_entries)(,%eax,4)
23546
23547 bad_subarch:
23548 WEAK(lguest_entry)
23549@@ -261,10 +333,10 @@ WEAK(xen_entry)
23550 __INITDATA
23551
23552 subarch_entries:
23553- .long default_entry /* normal x86/PC */
23554- .long lguest_entry /* lguest hypervisor */
23555- .long xen_entry /* Xen hypervisor */
23556- .long default_entry /* Moorestown MID */
23557+ .long ta(default_entry) /* normal x86/PC */
23558+ .long ta(lguest_entry) /* lguest hypervisor */
23559+ .long ta(xen_entry) /* Xen hypervisor */
23560+ .long ta(default_entry) /* Moorestown MID */
23561 num_subarch_entries = (. - subarch_entries) / 4
23562 .previous
23563 #else
23564@@ -354,6 +426,7 @@ default_entry:
23565 movl pa(mmu_cr4_features),%eax
23566 movl %eax,%cr4
23567
23568+#ifdef CONFIG_X86_PAE
23569 testb $X86_CR4_PAE, %al # check if PAE is enabled
23570 jz enable_paging
23571
23572@@ -382,6 +455,9 @@ default_entry:
23573 /* Make changes effective */
23574 wrmsr
23575
23576+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
23577+#endif
23578+
23579 enable_paging:
23580
23581 /*
23582@@ -449,14 +525,20 @@ is486:
23583 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
23584 movl %eax,%ss # after changing gdt.
23585
23586- movl $(__USER_DS),%eax # DS/ES contains default USER segment
23587+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
23588 movl %eax,%ds
23589 movl %eax,%es
23590
23591 movl $(__KERNEL_PERCPU), %eax
23592 movl %eax,%fs # set this cpu's percpu
23593
23594+#ifdef CONFIG_CC_STACKPROTECTOR
23595 movl $(__KERNEL_STACK_CANARY),%eax
23596+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23597+ movl $(__USER_DS),%eax
23598+#else
23599+ xorl %eax,%eax
23600+#endif
23601 movl %eax,%gs
23602
23603 xorl %eax,%eax # Clear LDT
23604@@ -512,8 +594,11 @@ setup_once:
23605 * relocation. Manually set base address in stack canary
23606 * segment descriptor.
23607 */
23608- movl $gdt_page,%eax
23609+ movl $cpu_gdt_table,%eax
23610 movl $stack_canary,%ecx
23611+#ifdef CONFIG_SMP
23612+ addl $__per_cpu_load,%ecx
23613+#endif
23614 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
23615 shrl $16, %ecx
23616 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
23617@@ -544,7 +629,7 @@ ENDPROC(early_idt_handlers)
23618 /* This is global to keep gas from relaxing the jumps */
23619 ENTRY(early_idt_handler)
23620 cld
23621- cmpl $2,%ss:early_recursion_flag
23622+ cmpl $1,%ss:early_recursion_flag
23623 je hlt_loop
23624 incl %ss:early_recursion_flag
23625
23626@@ -582,8 +667,8 @@ ENTRY(early_idt_handler)
23627 pushl (20+6*4)(%esp) /* trapno */
23628 pushl $fault_msg
23629 call printk
23630-#endif
23631 call dump_stack
23632+#endif
23633 hlt_loop:
23634 hlt
23635 jmp hlt_loop
23636@@ -602,8 +687,11 @@ ENDPROC(early_idt_handler)
23637 /* This is the default interrupt "handler" :-) */
23638 ALIGN
23639 ignore_int:
23640- cld
23641 #ifdef CONFIG_PRINTK
23642+ cmpl $2,%ss:early_recursion_flag
23643+ je hlt_loop
23644+ incl %ss:early_recursion_flag
23645+ cld
23646 pushl %eax
23647 pushl %ecx
23648 pushl %edx
23649@@ -612,9 +700,6 @@ ignore_int:
23650 movl $(__KERNEL_DS),%eax
23651 movl %eax,%ds
23652 movl %eax,%es
23653- cmpl $2,early_recursion_flag
23654- je hlt_loop
23655- incl early_recursion_flag
23656 pushl 16(%esp)
23657 pushl 24(%esp)
23658 pushl 32(%esp)
23659@@ -648,29 +733,34 @@ ENTRY(setup_once_ref)
23660 /*
23661 * BSS section
23662 */
23663-__PAGE_ALIGNED_BSS
23664- .align PAGE_SIZE
23665 #ifdef CONFIG_X86_PAE
23666+.section .initial_pg_pmd,"a",@progbits
23667 initial_pg_pmd:
23668 .fill 1024*KPMDS,4,0
23669 #else
23670+.section .initial_page_table,"a",@progbits
23671 ENTRY(initial_page_table)
23672 .fill 1024,4,0
23673 #endif
23674+.section .initial_pg_fixmap,"a",@progbits
23675 initial_pg_fixmap:
23676 .fill 1024,4,0
23677+.section .empty_zero_page,"a",@progbits
23678 ENTRY(empty_zero_page)
23679 .fill 4096,1,0
23680+.section .swapper_pg_dir,"a",@progbits
23681 ENTRY(swapper_pg_dir)
23682+#ifdef CONFIG_X86_PAE
23683+ .fill 4,8,0
23684+#else
23685 .fill 1024,4,0
23686+#endif
23687
23688 /*
23689 * This starts the data section.
23690 */
23691 #ifdef CONFIG_X86_PAE
23692-__PAGE_ALIGNED_DATA
23693- /* Page-aligned for the benefit of paravirt? */
23694- .align PAGE_SIZE
23695+.section .initial_page_table,"a",@progbits
23696 ENTRY(initial_page_table)
23697 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
23698 # if KPMDS == 3
23699@@ -689,12 +779,20 @@ ENTRY(initial_page_table)
23700 # error "Kernel PMDs should be 1, 2 or 3"
23701 # endif
23702 .align PAGE_SIZE /* needs to be page-sized too */
23703+
23704+#ifdef CONFIG_PAX_PER_CPU_PGD
23705+ENTRY(cpu_pgd)
23706+ .rept 2*NR_CPUS
23707+ .fill 4,8,0
23708+ .endr
23709+#endif
23710+
23711 #endif
23712
23713 .data
23714 .balign 4
23715 ENTRY(stack_start)
23716- .long init_thread_union+THREAD_SIZE
23717+ .long init_thread_union+THREAD_SIZE-8
23718
23719 __INITRODATA
23720 int_msg:
23721@@ -722,7 +820,7 @@ fault_msg:
23722 * segment size, and 32-bit linear address value:
23723 */
23724
23725- .data
23726+.section .rodata,"a",@progbits
23727 .globl boot_gdt_descr
23728 .globl idt_descr
23729
23730@@ -731,7 +829,7 @@ fault_msg:
23731 .word 0 # 32 bit align gdt_desc.address
23732 boot_gdt_descr:
23733 .word __BOOT_DS+7
23734- .long boot_gdt - __PAGE_OFFSET
23735+ .long pa(boot_gdt)
23736
23737 .word 0 # 32-bit align idt_desc.address
23738 idt_descr:
23739@@ -742,7 +840,7 @@ idt_descr:
23740 .word 0 # 32 bit align gdt_desc.address
23741 ENTRY(early_gdt_descr)
23742 .word GDT_ENTRIES*8-1
23743- .long gdt_page /* Overwritten for secondary CPUs */
23744+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
23745
23746 /*
23747 * The boot_gdt must mirror the equivalent in setup.S and is
23748@@ -751,5 +849,65 @@ ENTRY(early_gdt_descr)
23749 .align L1_CACHE_BYTES
23750 ENTRY(boot_gdt)
23751 .fill GDT_ENTRY_BOOT_CS,8,0
23752- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
23753- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
23754+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
23755+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
23756+
23757+ .align PAGE_SIZE_asm
23758+ENTRY(cpu_gdt_table)
23759+ .rept NR_CPUS
23760+ .quad 0x0000000000000000 /* NULL descriptor */
23761+ .quad 0x0000000000000000 /* 0x0b reserved */
23762+ .quad 0x0000000000000000 /* 0x13 reserved */
23763+ .quad 0x0000000000000000 /* 0x1b reserved */
23764+
23765+#ifdef CONFIG_PAX_KERNEXEC
23766+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
23767+#else
23768+ .quad 0x0000000000000000 /* 0x20 unused */
23769+#endif
23770+
23771+ .quad 0x0000000000000000 /* 0x28 unused */
23772+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
23773+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
23774+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
23775+ .quad 0x0000000000000000 /* 0x4b reserved */
23776+ .quad 0x0000000000000000 /* 0x53 reserved */
23777+ .quad 0x0000000000000000 /* 0x5b reserved */
23778+
23779+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
23780+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
23781+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
23782+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
23783+
23784+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
23785+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
23786+
23787+ /*
23788+ * Segments used for calling PnP BIOS have byte granularity.
23789+ * The code segments and data segments have fixed 64k limits,
23790+ * the transfer segment sizes are set at run time.
23791+ */
23792+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
23793+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
23794+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
23795+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
23796+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
23797+
23798+ /*
23799+ * The APM segments have byte granularity and their bases
23800+ * are set at run time. All have 64k limits.
23801+ */
23802+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
23803+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
23804+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
23805+
23806+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
23807+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
23808+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
23809+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
23810+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
23811+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
23812+
23813+ /* Be sure this is zeroed to avoid false validations in Xen */
23814+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
23815+ .endr
23816diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
23817index e1aabdb..fee4fee 100644
23818--- a/arch/x86/kernel/head_64.S
23819+++ b/arch/x86/kernel/head_64.S
23820@@ -20,6 +20,8 @@
23821 #include <asm/processor-flags.h>
23822 #include <asm/percpu.h>
23823 #include <asm/nops.h>
23824+#include <asm/cpufeature.h>
23825+#include <asm/alternative-asm.h>
23826
23827 #ifdef CONFIG_PARAVIRT
23828 #include <asm/asm-offsets.h>
23829@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
23830 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
23831 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
23832 L3_START_KERNEL = pud_index(__START_KERNEL_map)
23833+L4_VMALLOC_START = pgd_index(VMALLOC_START)
23834+L3_VMALLOC_START = pud_index(VMALLOC_START)
23835+L4_VMALLOC_END = pgd_index(VMALLOC_END)
23836+L3_VMALLOC_END = pud_index(VMALLOC_END)
23837+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
23838+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
23839
23840 .text
23841 __HEAD
23842@@ -89,11 +97,24 @@ startup_64:
23843 * Fixup the physical addresses in the page table
23844 */
23845 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23846+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23847+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23848+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23849+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23850+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23851
23852- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23853- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23854+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
23855+#ifndef CONFIG_XEN
23856+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
23857+#endif
23858+
23859+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
23860+
23861+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
23862+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
23863
23864 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
23865+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
23866
23867 /*
23868 * Set up the identity mapping for the switchover. These
23869@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
23870 movq $(init_level4_pgt - __START_KERNEL_map), %rax
23871 1:
23872
23873- /* Enable PAE mode and PGE */
23874- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
23875+ /* Enable PAE mode and PSE/PGE */
23876+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
23877 movq %rcx, %cr4
23878
23879 /* Setup early boot stage 4 level pagetables. */
23880@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
23881 movl $MSR_EFER, %ecx
23882 rdmsr
23883 btsl $_EFER_SCE, %eax /* Enable System Call */
23884- btl $20,%edi /* No Execute supported? */
23885+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
23886 jnc 1f
23887 btsl $_EFER_NX, %eax
23888 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
23889+#ifndef CONFIG_EFI
23890+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
23891+#endif
23892+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
23893+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
23894+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
23895+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
23896+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
23897+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
23898 1: wrmsr /* Make changes effective */
23899
23900 /* Setup cr0 */
23901@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
23902 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
23903 * address given in m16:64.
23904 */
23905+ pax_set_fptr_mask
23906 movq initial_code(%rip),%rax
23907 pushq $0 # fake return address to stop unwinder
23908 pushq $__KERNEL_CS # set correct cs
23909@@ -388,7 +419,7 @@ ENTRY(early_idt_handler)
23910 call dump_stack
23911 #ifdef CONFIG_KALLSYMS
23912 leaq early_idt_ripmsg(%rip),%rdi
23913- movq 40(%rsp),%rsi # %rip again
23914+ movq 88(%rsp),%rsi # %rip again
23915 call __print_symbol
23916 #endif
23917 #endif /* EARLY_PRINTK */
23918@@ -416,6 +447,7 @@ ENDPROC(early_idt_handler)
23919 early_recursion_flag:
23920 .long 0
23921
23922+ .section .rodata,"a",@progbits
23923 #ifdef CONFIG_EARLY_PRINTK
23924 early_idt_msg:
23925 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
23926@@ -443,29 +475,52 @@ NEXT_PAGE(early_level4_pgt)
23927 NEXT_PAGE(early_dynamic_pgts)
23928 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
23929
23930- .data
23931+ .section .rodata,"a",@progbits
23932
23933-#ifndef CONFIG_XEN
23934 NEXT_PAGE(init_level4_pgt)
23935- .fill 512,8,0
23936-#else
23937-NEXT_PAGE(init_level4_pgt)
23938- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23939 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
23940 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23941+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
23942+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
23943+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
23944+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
23945+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
23946+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23947 .org init_level4_pgt + L4_START_KERNEL*8, 0
23948 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
23949 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
23950
23951+#ifdef CONFIG_PAX_PER_CPU_PGD
23952+NEXT_PAGE(cpu_pgd)
23953+ .rept 2*NR_CPUS
23954+ .fill 512,8,0
23955+ .endr
23956+#endif
23957+
23958 NEXT_PAGE(level3_ident_pgt)
23959 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23960+#ifdef CONFIG_XEN
23961 .fill 511, 8, 0
23962+#else
23963+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
23964+ .fill 510,8,0
23965+#endif
23966+
23967+NEXT_PAGE(level3_vmalloc_start_pgt)
23968+ .fill 512,8,0
23969+
23970+NEXT_PAGE(level3_vmalloc_end_pgt)
23971+ .fill 512,8,0
23972+
23973+NEXT_PAGE(level3_vmemmap_pgt)
23974+ .fill L3_VMEMMAP_START,8,0
23975+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23976+
23977 NEXT_PAGE(level2_ident_pgt)
23978- /* Since I easily can, map the first 1G.
23979+ /* Since I easily can, map the first 2G.
23980 * Don't set NX because code runs from these pages.
23981 */
23982- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
23983-#endif
23984+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
23985
23986 NEXT_PAGE(level3_kernel_pgt)
23987 .fill L3_START_KERNEL,8,0
23988@@ -473,6 +528,9 @@ NEXT_PAGE(level3_kernel_pgt)
23989 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
23990 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23991
23992+NEXT_PAGE(level2_vmemmap_pgt)
23993+ .fill 512,8,0
23994+
23995 NEXT_PAGE(level2_kernel_pgt)
23996 /*
23997 * 512 MB kernel mapping. We spend a full page on this pagetable
23998@@ -490,28 +548,64 @@ NEXT_PAGE(level2_kernel_pgt)
23999 NEXT_PAGE(level2_fixmap_pgt)
24000 .fill 506,8,0
24001 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24002- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24003- .fill 5,8,0
24004+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24005+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24006+ .fill 4,8,0
24007
24008 NEXT_PAGE(level1_fixmap_pgt)
24009 .fill 512,8,0
24010
24011+NEXT_PAGE(level1_vsyscall_pgt)
24012+ .fill 512,8,0
24013+
24014 #undef PMDS
24015
24016- .data
24017+ .align PAGE_SIZE
24018+ENTRY(cpu_gdt_table)
24019+ .rept NR_CPUS
24020+ .quad 0x0000000000000000 /* NULL descriptor */
24021+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24022+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24023+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24024+ .quad 0x00cffb000000ffff /* __USER32_CS */
24025+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24026+ .quad 0x00affb000000ffff /* __USER_CS */
24027+
24028+#ifdef CONFIG_PAX_KERNEXEC
24029+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24030+#else
24031+ .quad 0x0 /* unused */
24032+#endif
24033+
24034+ .quad 0,0 /* TSS */
24035+ .quad 0,0 /* LDT */
24036+ .quad 0,0,0 /* three TLS descriptors */
24037+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24038+ /* asm/segment.h:GDT_ENTRIES must match this */
24039+
24040+#ifdef CONFIG_PAX_MEMORY_UDEREF
24041+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24042+#else
24043+ .quad 0x0 /* unused */
24044+#endif
24045+
24046+ /* zero the remaining page */
24047+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24048+ .endr
24049+
24050 .align 16
24051 .globl early_gdt_descr
24052 early_gdt_descr:
24053 .word GDT_ENTRIES*8-1
24054 early_gdt_descr_base:
24055- .quad INIT_PER_CPU_VAR(gdt_page)
24056+ .quad cpu_gdt_table
24057
24058 ENTRY(phys_base)
24059 /* This must match the first entry in level2_kernel_pgt */
24060 .quad 0x0000000000000000
24061
24062 #include "../../x86/xen/xen-head.S"
24063-
24064- __PAGE_ALIGNED_BSS
24065+
24066+ .section .rodata,"a",@progbits
24067 NEXT_PAGE(empty_zero_page)
24068 .skip PAGE_SIZE
24069diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
24070index 0fa6912..b37438b 100644
24071--- a/arch/x86/kernel/i386_ksyms_32.c
24072+++ b/arch/x86/kernel/i386_ksyms_32.c
24073@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
24074 EXPORT_SYMBOL(cmpxchg8b_emu);
24075 #endif
24076
24077+EXPORT_SYMBOL_GPL(cpu_gdt_table);
24078+
24079 /* Networking helper routines. */
24080 EXPORT_SYMBOL(csum_partial_copy_generic);
24081+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
24082+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
24083
24084 EXPORT_SYMBOL(__get_user_1);
24085 EXPORT_SYMBOL(__get_user_2);
24086@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
24087
24088 EXPORT_SYMBOL(csum_partial);
24089 EXPORT_SYMBOL(empty_zero_page);
24090+
24091+#ifdef CONFIG_PAX_KERNEXEC
24092+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
24093+#endif
24094+
24095+#ifdef CONFIG_PAX_PER_CPU_PGD
24096+EXPORT_SYMBOL(cpu_pgd);
24097+#endif
24098diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
24099index 5d576ab..1403a03 100644
24100--- a/arch/x86/kernel/i387.c
24101+++ b/arch/x86/kernel/i387.c
24102@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
24103 static inline bool interrupted_user_mode(void)
24104 {
24105 struct pt_regs *regs = get_irq_regs();
24106- return regs && user_mode_vm(regs);
24107+ return regs && user_mode(regs);
24108 }
24109
24110 /*
24111diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
24112index 9a5c460..84868423 100644
24113--- a/arch/x86/kernel/i8259.c
24114+++ b/arch/x86/kernel/i8259.c
24115@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
24116 static void make_8259A_irq(unsigned int irq)
24117 {
24118 disable_irq_nosync(irq);
24119- io_apic_irqs &= ~(1<<irq);
24120+ io_apic_irqs &= ~(1UL<<irq);
24121 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
24122 i8259A_chip.name);
24123 enable_irq(irq);
24124@@ -209,7 +209,7 @@ spurious_8259A_irq:
24125 "spurious 8259A interrupt: IRQ%d.\n", irq);
24126 spurious_irq_mask |= irqmask;
24127 }
24128- atomic_inc(&irq_err_count);
24129+ atomic_inc_unchecked(&irq_err_count);
24130 /*
24131 * Theoretically we do not have to handle this IRQ,
24132 * but in Linux this does not cause problems and is
24133@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
24134 /* (slave's support for AEOI in flat mode is to be investigated) */
24135 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
24136
24137+ pax_open_kernel();
24138 if (auto_eoi)
24139 /*
24140 * In AEOI mode we just have to mask the interrupt
24141 * when acking.
24142 */
24143- i8259A_chip.irq_mask_ack = disable_8259A_irq;
24144+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
24145 else
24146- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24147+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24148+ pax_close_kernel();
24149
24150 udelay(100); /* wait for 8259A to initialize */
24151
24152diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
24153index a979b5b..1d6db75 100644
24154--- a/arch/x86/kernel/io_delay.c
24155+++ b/arch/x86/kernel/io_delay.c
24156@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
24157 * Quirk table for systems that misbehave (lock up, etc.) if port
24158 * 0x80 is used:
24159 */
24160-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
24161+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
24162 {
24163 .callback = dmi_io_delay_0xed_port,
24164 .ident = "Compaq Presario V6000",
24165diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
24166index 4ddaf66..49d5c18 100644
24167--- a/arch/x86/kernel/ioport.c
24168+++ b/arch/x86/kernel/ioport.c
24169@@ -6,6 +6,7 @@
24170 #include <linux/sched.h>
24171 #include <linux/kernel.h>
24172 #include <linux/capability.h>
24173+#include <linux/security.h>
24174 #include <linux/errno.h>
24175 #include <linux/types.h>
24176 #include <linux/ioport.h>
24177@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24178 return -EINVAL;
24179 if (turn_on && !capable(CAP_SYS_RAWIO))
24180 return -EPERM;
24181+#ifdef CONFIG_GRKERNSEC_IO
24182+ if (turn_on && grsec_disable_privio) {
24183+ gr_handle_ioperm();
24184+ return -ENODEV;
24185+ }
24186+#endif
24187
24188 /*
24189 * If it's the first ioperm() call in this thread's lifetime, set the
24190@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24191 * because the ->io_bitmap_max value must match the bitmap
24192 * contents:
24193 */
24194- tss = &per_cpu(init_tss, get_cpu());
24195+ tss = init_tss + get_cpu();
24196
24197 if (turn_on)
24198 bitmap_clear(t->io_bitmap_ptr, from, num);
24199@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
24200 if (level > old) {
24201 if (!capable(CAP_SYS_RAWIO))
24202 return -EPERM;
24203+#ifdef CONFIG_GRKERNSEC_IO
24204+ if (grsec_disable_privio) {
24205+ gr_handle_iopl();
24206+ return -ENODEV;
24207+ }
24208+#endif
24209 }
24210 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
24211 t->iopl = level << 12;
24212diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
24213index 22d0687..e07b2a5 100644
24214--- a/arch/x86/kernel/irq.c
24215+++ b/arch/x86/kernel/irq.c
24216@@ -21,7 +21,7 @@
24217 #define CREATE_TRACE_POINTS
24218 #include <asm/trace/irq_vectors.h>
24219
24220-atomic_t irq_err_count;
24221+atomic_unchecked_t irq_err_count;
24222
24223 /* Function pointer for generic interrupt vector handling */
24224 void (*x86_platform_ipi_callback)(void) = NULL;
24225@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
24226 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
24227 seq_printf(p, " Machine check polls\n");
24228 #endif
24229- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
24230+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
24231 #if defined(CONFIG_X86_IO_APIC)
24232- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
24233+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
24234 #endif
24235 return 0;
24236 }
24237@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
24238
24239 u64 arch_irq_stat(void)
24240 {
24241- u64 sum = atomic_read(&irq_err_count);
24242+ u64 sum = atomic_read_unchecked(&irq_err_count);
24243 return sum;
24244 }
24245
24246diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
24247index 4186755..784efa0 100644
24248--- a/arch/x86/kernel/irq_32.c
24249+++ b/arch/x86/kernel/irq_32.c
24250@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
24251 __asm__ __volatile__("andl %%esp,%0" :
24252 "=r" (sp) : "0" (THREAD_SIZE - 1));
24253
24254- return sp < (sizeof(struct thread_info) + STACK_WARN);
24255+ return sp < STACK_WARN;
24256 }
24257
24258 static void print_stack_overflow(void)
24259@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
24260 * per-CPU IRQ handling contexts (thread information and stack)
24261 */
24262 union irq_ctx {
24263- struct thread_info tinfo;
24264- u32 stack[THREAD_SIZE/sizeof(u32)];
24265+ unsigned long previous_esp;
24266+ u32 stack[THREAD_SIZE/sizeof(u32)];
24267 } __attribute__((aligned(THREAD_SIZE)));
24268
24269 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
24270@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
24271 static inline int
24272 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24273 {
24274- union irq_ctx *curctx, *irqctx;
24275+ union irq_ctx *irqctx;
24276 u32 *isp, arg1, arg2;
24277
24278- curctx = (union irq_ctx *) current_thread_info();
24279 irqctx = __this_cpu_read(hardirq_ctx);
24280
24281 /*
24282@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24283 * handler) we can't do that and just have to keep using the
24284 * current stack (which is the irq stack already after all)
24285 */
24286- if (unlikely(curctx == irqctx))
24287+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
24288 return 0;
24289
24290 /* build the stack frame on the IRQ stack */
24291- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24292- irqctx->tinfo.task = curctx->tinfo.task;
24293- irqctx->tinfo.previous_esp = current_stack_pointer;
24294+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24295+ irqctx->previous_esp = current_stack_pointer;
24296
24297- /* Copy the preempt_count so that the [soft]irq checks work. */
24298- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
24299+#ifdef CONFIG_PAX_MEMORY_UDEREF
24300+ __set_fs(MAKE_MM_SEG(0));
24301+#endif
24302
24303 if (unlikely(overflow))
24304 call_on_stack(print_stack_overflow, isp);
24305@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24306 : "0" (irq), "1" (desc), "2" (isp),
24307 "D" (desc->handle_irq)
24308 : "memory", "cc", "ecx");
24309+
24310+#ifdef CONFIG_PAX_MEMORY_UDEREF
24311+ __set_fs(current_thread_info()->addr_limit);
24312+#endif
24313+
24314 return 1;
24315 }
24316
24317@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24318 */
24319 void irq_ctx_init(int cpu)
24320 {
24321- union irq_ctx *irqctx;
24322-
24323 if (per_cpu(hardirq_ctx, cpu))
24324 return;
24325
24326- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24327- THREADINFO_GFP,
24328- THREAD_SIZE_ORDER));
24329- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24330- irqctx->tinfo.cpu = cpu;
24331- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
24332- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24333+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24334+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24335
24336- per_cpu(hardirq_ctx, cpu) = irqctx;
24337-
24338- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24339- THREADINFO_GFP,
24340- THREAD_SIZE_ORDER));
24341- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24342- irqctx->tinfo.cpu = cpu;
24343- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24344-
24345- per_cpu(softirq_ctx, cpu) = irqctx;
24346+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24347+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24348
24349 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24350 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24351@@ -152,7 +141,6 @@ void irq_ctx_init(int cpu)
24352 asmlinkage void do_softirq(void)
24353 {
24354 unsigned long flags;
24355- struct thread_info *curctx;
24356 union irq_ctx *irqctx;
24357 u32 *isp;
24358
24359@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
24360 local_irq_save(flags);
24361
24362 if (local_softirq_pending()) {
24363- curctx = current_thread_info();
24364 irqctx = __this_cpu_read(softirq_ctx);
24365- irqctx->tinfo.task = curctx->task;
24366- irqctx->tinfo.previous_esp = current_stack_pointer;
24367+ irqctx->previous_esp = current_stack_pointer;
24368
24369 /* build the stack frame on the softirq stack */
24370- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24371+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24372+
24373+#ifdef CONFIG_PAX_MEMORY_UDEREF
24374+ __set_fs(MAKE_MM_SEG(0));
24375+#endif
24376
24377 call_on_stack(__do_softirq, isp);
24378+
24379+#ifdef CONFIG_PAX_MEMORY_UDEREF
24380+ __set_fs(current_thread_info()->addr_limit);
24381+#endif
24382+
24383 /*
24384 * Shouldn't happen, we returned above if in_interrupt():
24385 */
24386@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
24387 if (unlikely(!desc))
24388 return false;
24389
24390- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24391+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24392 if (unlikely(overflow))
24393 print_stack_overflow();
24394 desc->handle_irq(irq, desc);
24395diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
24396index d04d3ec..ea4b374 100644
24397--- a/arch/x86/kernel/irq_64.c
24398+++ b/arch/x86/kernel/irq_64.c
24399@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
24400 u64 estack_top, estack_bottom;
24401 u64 curbase = (u64)task_stack_page(current);
24402
24403- if (user_mode_vm(regs))
24404+ if (user_mode(regs))
24405 return;
24406
24407 if (regs->sp >= curbase + sizeof(struct thread_info) +
24408diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
24409index ee11b7d..4df4d0c 100644
24410--- a/arch/x86/kernel/jump_label.c
24411+++ b/arch/x86/kernel/jump_label.c
24412@@ -49,7 +49,7 @@ static void __jump_label_transform(struct jump_entry *entry,
24413 * We are enabling this jump label. If it is not a nop
24414 * then something must have gone wrong.
24415 */
24416- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
24417+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5) != 0))
24418 bug_at((void *)entry->code, __LINE__);
24419
24420 code.jump = 0xe9;
24421@@ -64,13 +64,13 @@ static void __jump_label_transform(struct jump_entry *entry,
24422 */
24423 if (init) {
24424 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
24425- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
24426+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
24427 bug_at((void *)entry->code, __LINE__);
24428 } else {
24429 code.jump = 0xe9;
24430 code.offset = entry->target -
24431 (entry->code + JUMP_LABEL_NOP_SIZE);
24432- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
24433+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
24434 bug_at((void *)entry->code, __LINE__);
24435 }
24436 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
24437diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
24438index 836f832..a8bda67 100644
24439--- a/arch/x86/kernel/kgdb.c
24440+++ b/arch/x86/kernel/kgdb.c
24441@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
24442 #ifdef CONFIG_X86_32
24443 switch (regno) {
24444 case GDB_SS:
24445- if (!user_mode_vm(regs))
24446+ if (!user_mode(regs))
24447 *(unsigned long *)mem = __KERNEL_DS;
24448 break;
24449 case GDB_SP:
24450- if (!user_mode_vm(regs))
24451+ if (!user_mode(regs))
24452 *(unsigned long *)mem = kernel_stack_pointer(regs);
24453 break;
24454 case GDB_GS:
24455@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
24456 bp->attr.bp_addr = breakinfo[breakno].addr;
24457 bp->attr.bp_len = breakinfo[breakno].len;
24458 bp->attr.bp_type = breakinfo[breakno].type;
24459- info->address = breakinfo[breakno].addr;
24460+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
24461+ info->address = ktla_ktva(breakinfo[breakno].addr);
24462+ else
24463+ info->address = breakinfo[breakno].addr;
24464 info->len = breakinfo[breakno].len;
24465 info->type = breakinfo[breakno].type;
24466 val = arch_install_hw_breakpoint(bp);
24467@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
24468 case 'k':
24469 /* clear the trace bit */
24470 linux_regs->flags &= ~X86_EFLAGS_TF;
24471- atomic_set(&kgdb_cpu_doing_single_step, -1);
24472+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
24473
24474 /* set the trace bit if we're stepping */
24475 if (remcomInBuffer[0] == 's') {
24476 linux_regs->flags |= X86_EFLAGS_TF;
24477- atomic_set(&kgdb_cpu_doing_single_step,
24478+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
24479 raw_smp_processor_id());
24480 }
24481
24482@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
24483
24484 switch (cmd) {
24485 case DIE_DEBUG:
24486- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
24487+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
24488 if (user_mode(regs))
24489 return single_step_cont(regs, args);
24490 break;
24491@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24492 #endif /* CONFIG_DEBUG_RODATA */
24493
24494 bpt->type = BP_BREAKPOINT;
24495- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
24496+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
24497 BREAK_INSTR_SIZE);
24498 if (err)
24499 return err;
24500- err = probe_kernel_write((char *)bpt->bpt_addr,
24501+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24502 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
24503 #ifdef CONFIG_DEBUG_RODATA
24504 if (!err)
24505@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24506 return -EBUSY;
24507 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
24508 BREAK_INSTR_SIZE);
24509- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24510+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24511 if (err)
24512 return err;
24513 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
24514@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
24515 if (mutex_is_locked(&text_mutex))
24516 goto knl_write;
24517 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
24518- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24519+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24520 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
24521 goto knl_write;
24522 return err;
24523 knl_write:
24524 #endif /* CONFIG_DEBUG_RODATA */
24525- return probe_kernel_write((char *)bpt->bpt_addr,
24526+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24527 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
24528 }
24529
24530diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
24531index 79a3f96..6ba030a 100644
24532--- a/arch/x86/kernel/kprobes/core.c
24533+++ b/arch/x86/kernel/kprobes/core.c
24534@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
24535 s32 raddr;
24536 } __packed *insn;
24537
24538- insn = (struct __arch_relative_insn *)from;
24539+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
24540+
24541+ pax_open_kernel();
24542 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
24543 insn->op = op;
24544+ pax_close_kernel();
24545 }
24546
24547 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
24548@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
24549 kprobe_opcode_t opcode;
24550 kprobe_opcode_t *orig_opcodes = opcodes;
24551
24552- if (search_exception_tables((unsigned long)opcodes))
24553+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
24554 return 0; /* Page fault may occur on this address. */
24555
24556 retry:
24557@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
24558 * for the first byte, we can recover the original instruction
24559 * from it and kp->opcode.
24560 */
24561- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24562+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24563 buf[0] = kp->opcode;
24564- return (unsigned long)buf;
24565+ return ktva_ktla((unsigned long)buf);
24566 }
24567
24568 /*
24569@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24570 /* Another subsystem puts a breakpoint, failed to recover */
24571 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
24572 return 0;
24573+ pax_open_kernel();
24574 memcpy(dest, insn.kaddr, insn.length);
24575+ pax_close_kernel();
24576
24577 #ifdef CONFIG_X86_64
24578 if (insn_rip_relative(&insn)) {
24579@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24580 return 0;
24581 }
24582 disp = (u8 *) dest + insn_offset_displacement(&insn);
24583+ pax_open_kernel();
24584 *(s32 *) disp = (s32) newdisp;
24585+ pax_close_kernel();
24586 }
24587 #endif
24588 return insn.length;
24589@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24590 * nor set current_kprobe, because it doesn't use single
24591 * stepping.
24592 */
24593- regs->ip = (unsigned long)p->ainsn.insn;
24594+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24595 preempt_enable_no_resched();
24596 return;
24597 }
24598@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24599 regs->flags &= ~X86_EFLAGS_IF;
24600 /* single step inline if the instruction is an int3 */
24601 if (p->opcode == BREAKPOINT_INSTRUCTION)
24602- regs->ip = (unsigned long)p->addr;
24603+ regs->ip = ktla_ktva((unsigned long)p->addr);
24604 else
24605- regs->ip = (unsigned long)p->ainsn.insn;
24606+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24607 }
24608
24609 /*
24610@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
24611 setup_singlestep(p, regs, kcb, 0);
24612 return 1;
24613 }
24614- } else if (*addr != BREAKPOINT_INSTRUCTION) {
24615+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
24616 /*
24617 * The breakpoint instruction was removed right
24618 * after we hit it. Another cpu has removed
24619@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
24620 " movq %rax, 152(%rsp)\n"
24621 RESTORE_REGS_STRING
24622 " popfq\n"
24623+#ifdef KERNEXEC_PLUGIN
24624+ " btsq $63,(%rsp)\n"
24625+#endif
24626 #else
24627 " pushf\n"
24628 SAVE_REGS_STRING
24629@@ -779,7 +789,7 @@ static void __kprobes
24630 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
24631 {
24632 unsigned long *tos = stack_addr(regs);
24633- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
24634+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
24635 unsigned long orig_ip = (unsigned long)p->addr;
24636 kprobe_opcode_t *insn = p->ainsn.insn;
24637
24638@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
24639 struct die_args *args = data;
24640 int ret = NOTIFY_DONE;
24641
24642- if (args->regs && user_mode_vm(args->regs))
24643+ if (args->regs && user_mode(args->regs))
24644 return ret;
24645
24646 switch (val) {
24647diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
24648index 898160b..758cde8 100644
24649--- a/arch/x86/kernel/kprobes/opt.c
24650+++ b/arch/x86/kernel/kprobes/opt.c
24651@@ -79,6 +79,7 @@ found:
24652 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
24653 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
24654 {
24655+ pax_open_kernel();
24656 #ifdef CONFIG_X86_64
24657 *addr++ = 0x48;
24658 *addr++ = 0xbf;
24659@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
24660 *addr++ = 0xb8;
24661 #endif
24662 *(unsigned long *)addr = val;
24663+ pax_close_kernel();
24664 }
24665
24666 asm (
24667@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24668 * Verify if the address gap is in 2GB range, because this uses
24669 * a relative jump.
24670 */
24671- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
24672+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
24673 if (abs(rel) > 0x7fffffff)
24674 return -ERANGE;
24675
24676@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24677 op->optinsn.size = ret;
24678
24679 /* Copy arch-dep-instance from template */
24680- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
24681+ pax_open_kernel();
24682+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
24683+ pax_close_kernel();
24684
24685 /* Set probe information */
24686 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
24687
24688 /* Set probe function call */
24689- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
24690+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
24691
24692 /* Set returning jmp instruction at the tail of out-of-line buffer */
24693- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
24694+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
24695 (u8 *)op->kp.addr + op->optinsn.size);
24696
24697 flush_icache_range((unsigned long) buf,
24698@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
24699 WARN_ON(kprobe_disabled(&op->kp));
24700
24701 /* Backup instructions which will be replaced by jump address */
24702- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
24703+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
24704 RELATIVE_ADDR_SIZE);
24705
24706 insn_buf[0] = RELATIVEJUMP_OPCODE;
24707@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
24708 /* This kprobe is really able to run optimized path. */
24709 op = container_of(p, struct optimized_kprobe, kp);
24710 /* Detour through copied instructions */
24711- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
24712+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
24713 if (!reenter)
24714 reset_current_kprobe();
24715 preempt_enable_no_resched();
24716diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
24717index ebc9873..1b9724b 100644
24718--- a/arch/x86/kernel/ldt.c
24719+++ b/arch/x86/kernel/ldt.c
24720@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
24721 if (reload) {
24722 #ifdef CONFIG_SMP
24723 preempt_disable();
24724- load_LDT(pc);
24725+ load_LDT_nolock(pc);
24726 if (!cpumask_equal(mm_cpumask(current->mm),
24727 cpumask_of(smp_processor_id())))
24728 smp_call_function(flush_ldt, current->mm, 1);
24729 preempt_enable();
24730 #else
24731- load_LDT(pc);
24732+ load_LDT_nolock(pc);
24733 #endif
24734 }
24735 if (oldsize) {
24736@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
24737 return err;
24738
24739 for (i = 0; i < old->size; i++)
24740- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
24741+ write_ldt_entry(new->ldt, i, old->ldt + i);
24742 return 0;
24743 }
24744
24745@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
24746 retval = copy_ldt(&mm->context, &old_mm->context);
24747 mutex_unlock(&old_mm->context.lock);
24748 }
24749+
24750+ if (tsk == current) {
24751+ mm->context.vdso = 0;
24752+
24753+#ifdef CONFIG_X86_32
24754+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24755+ mm->context.user_cs_base = 0UL;
24756+ mm->context.user_cs_limit = ~0UL;
24757+
24758+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
24759+ cpus_clear(mm->context.cpu_user_cs_mask);
24760+#endif
24761+
24762+#endif
24763+#endif
24764+
24765+ }
24766+
24767 return retval;
24768 }
24769
24770@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
24771 }
24772 }
24773
24774+#ifdef CONFIG_PAX_SEGMEXEC
24775+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
24776+ error = -EINVAL;
24777+ goto out_unlock;
24778+ }
24779+#endif
24780+
24781 fill_ldt(&ldt, &ldt_info);
24782 if (oldmode)
24783 ldt.avl = 0;
24784diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
24785index 5b19e4d..6476a76 100644
24786--- a/arch/x86/kernel/machine_kexec_32.c
24787+++ b/arch/x86/kernel/machine_kexec_32.c
24788@@ -26,7 +26,7 @@
24789 #include <asm/cacheflush.h>
24790 #include <asm/debugreg.h>
24791
24792-static void set_idt(void *newidt, __u16 limit)
24793+static void set_idt(struct desc_struct *newidt, __u16 limit)
24794 {
24795 struct desc_ptr curidt;
24796
24797@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
24798 }
24799
24800
24801-static void set_gdt(void *newgdt, __u16 limit)
24802+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
24803 {
24804 struct desc_ptr curgdt;
24805
24806@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
24807 }
24808
24809 control_page = page_address(image->control_code_page);
24810- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
24811+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
24812
24813 relocate_kernel_ptr = control_page;
24814 page_list[PA_CONTROL_PAGE] = __pa(control_page);
24815diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
24816index 15c9876..0a43909 100644
24817--- a/arch/x86/kernel/microcode_core.c
24818+++ b/arch/x86/kernel/microcode_core.c
24819@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
24820 return NOTIFY_OK;
24821 }
24822
24823-static struct notifier_block __refdata mc_cpu_notifier = {
24824+static struct notifier_block mc_cpu_notifier = {
24825 .notifier_call = mc_cpu_callback,
24826 };
24827
24828diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
24829index 5fb2ceb..3ae90bb 100644
24830--- a/arch/x86/kernel/microcode_intel.c
24831+++ b/arch/x86/kernel/microcode_intel.c
24832@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24833
24834 static int get_ucode_user(void *to, const void *from, size_t n)
24835 {
24836- return copy_from_user(to, from, n);
24837+ return copy_from_user(to, (const void __force_user *)from, n);
24838 }
24839
24840 static enum ucode_state
24841 request_microcode_user(int cpu, const void __user *buf, size_t size)
24842 {
24843- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24844+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24845 }
24846
24847 static void microcode_fini_cpu(int cpu)
24848diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24849index 216a4d7..228255a 100644
24850--- a/arch/x86/kernel/module.c
24851+++ b/arch/x86/kernel/module.c
24852@@ -43,15 +43,60 @@ do { \
24853 } while (0)
24854 #endif
24855
24856-void *module_alloc(unsigned long size)
24857+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24858 {
24859- if (PAGE_ALIGN(size) > MODULES_LEN)
24860+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
24861 return NULL;
24862 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
24863- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
24864+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
24865 -1, __builtin_return_address(0));
24866 }
24867
24868+void *module_alloc(unsigned long size)
24869+{
24870+
24871+#ifdef CONFIG_PAX_KERNEXEC
24872+ return __module_alloc(size, PAGE_KERNEL);
24873+#else
24874+ return __module_alloc(size, PAGE_KERNEL_EXEC);
24875+#endif
24876+
24877+}
24878+
24879+#ifdef CONFIG_PAX_KERNEXEC
24880+#ifdef CONFIG_X86_32
24881+void *module_alloc_exec(unsigned long size)
24882+{
24883+ struct vm_struct *area;
24884+
24885+ if (size == 0)
24886+ return NULL;
24887+
24888+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
24889+ return area ? area->addr : NULL;
24890+}
24891+EXPORT_SYMBOL(module_alloc_exec);
24892+
24893+void module_free_exec(struct module *mod, void *module_region)
24894+{
24895+ vunmap(module_region);
24896+}
24897+EXPORT_SYMBOL(module_free_exec);
24898+#else
24899+void module_free_exec(struct module *mod, void *module_region)
24900+{
24901+ module_free(mod, module_region);
24902+}
24903+EXPORT_SYMBOL(module_free_exec);
24904+
24905+void *module_alloc_exec(unsigned long size)
24906+{
24907+ return __module_alloc(size, PAGE_KERNEL_RX);
24908+}
24909+EXPORT_SYMBOL(module_alloc_exec);
24910+#endif
24911+#endif
24912+
24913 #ifdef CONFIG_X86_32
24914 int apply_relocate(Elf32_Shdr *sechdrs,
24915 const char *strtab,
24916@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24917 unsigned int i;
24918 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
24919 Elf32_Sym *sym;
24920- uint32_t *location;
24921+ uint32_t *plocation, location;
24922
24923 DEBUGP("Applying relocate section %u to %u\n",
24924 relsec, sechdrs[relsec].sh_info);
24925 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
24926 /* This is where to make the change */
24927- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
24928- + rel[i].r_offset;
24929+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
24930+ location = (uint32_t)plocation;
24931+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
24932+ plocation = ktla_ktva((void *)plocation);
24933 /* This is the symbol it is referring to. Note that all
24934 undefined symbols have been resolved. */
24935 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
24936@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24937 switch (ELF32_R_TYPE(rel[i].r_info)) {
24938 case R_386_32:
24939 /* We add the value into the location given */
24940- *location += sym->st_value;
24941+ pax_open_kernel();
24942+ *plocation += sym->st_value;
24943+ pax_close_kernel();
24944 break;
24945 case R_386_PC32:
24946 /* Add the value, subtract its position */
24947- *location += sym->st_value - (uint32_t)location;
24948+ pax_open_kernel();
24949+ *plocation += sym->st_value - location;
24950+ pax_close_kernel();
24951 break;
24952 default:
24953 pr_err("%s: Unknown relocation: %u\n",
24954@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
24955 case R_X86_64_NONE:
24956 break;
24957 case R_X86_64_64:
24958+ pax_open_kernel();
24959 *(u64 *)loc = val;
24960+ pax_close_kernel();
24961 break;
24962 case R_X86_64_32:
24963+ pax_open_kernel();
24964 *(u32 *)loc = val;
24965+ pax_close_kernel();
24966 if (val != *(u32 *)loc)
24967 goto overflow;
24968 break;
24969 case R_X86_64_32S:
24970+ pax_open_kernel();
24971 *(s32 *)loc = val;
24972+ pax_close_kernel();
24973 if ((s64)val != *(s32 *)loc)
24974 goto overflow;
24975 break;
24976 case R_X86_64_PC32:
24977 val -= (u64)loc;
24978+ pax_open_kernel();
24979 *(u32 *)loc = val;
24980+ pax_close_kernel();
24981+
24982 #if 0
24983 if ((s64)val != *(s32 *)loc)
24984 goto overflow;
24985diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
24986index 88458fa..349f7a4 100644
24987--- a/arch/x86/kernel/msr.c
24988+++ b/arch/x86/kernel/msr.c
24989@@ -233,7 +233,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
24990 return notifier_from_errno(err);
24991 }
24992
24993-static struct notifier_block __refdata msr_class_cpu_notifier = {
24994+static struct notifier_block msr_class_cpu_notifier = {
24995 .notifier_call = msr_class_cpu_callback,
24996 };
24997
24998diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
24999index 6fcb49c..5b3f4ff 100644
25000--- a/arch/x86/kernel/nmi.c
25001+++ b/arch/x86/kernel/nmi.c
25002@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
25003 return handled;
25004 }
25005
25006-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25007+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
25008 {
25009 struct nmi_desc *desc = nmi_to_desc(type);
25010 unsigned long flags;
25011@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25012 * event confuses some handlers (kdump uses this flag)
25013 */
25014 if (action->flags & NMI_FLAG_FIRST)
25015- list_add_rcu(&action->list, &desc->head);
25016+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
25017 else
25018- list_add_tail_rcu(&action->list, &desc->head);
25019+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
25020
25021 spin_unlock_irqrestore(&desc->lock, flags);
25022 return 0;
25023@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
25024 if (!strcmp(n->name, name)) {
25025 WARN(in_nmi(),
25026 "Trying to free NMI (%s) from NMI context!\n", n->name);
25027- list_del_rcu(&n->list);
25028+ pax_list_del_rcu((struct list_head *)&n->list);
25029 break;
25030 }
25031 }
25032@@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
25033 dotraplinkage notrace __kprobes void
25034 do_nmi(struct pt_regs *regs, long error_code)
25035 {
25036+
25037+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25038+ if (!user_mode(regs)) {
25039+ unsigned long cs = regs->cs & 0xFFFF;
25040+ unsigned long ip = ktva_ktla(regs->ip);
25041+
25042+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
25043+ regs->ip = ip;
25044+ }
25045+#endif
25046+
25047 nmi_nesting_preprocess(regs);
25048
25049 nmi_enter();
25050diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
25051index 6d9582e..f746287 100644
25052--- a/arch/x86/kernel/nmi_selftest.c
25053+++ b/arch/x86/kernel/nmi_selftest.c
25054@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
25055 {
25056 /* trap all the unknown NMIs we may generate */
25057 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
25058- __initdata);
25059+ __initconst);
25060 }
25061
25062 static void __init cleanup_nmi_testsuite(void)
25063@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
25064 unsigned long timeout;
25065
25066 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
25067- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
25068+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
25069 nmi_fail = FAILURE;
25070 return;
25071 }
25072diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
25073index bbb6c73..24a58ef 100644
25074--- a/arch/x86/kernel/paravirt-spinlocks.c
25075+++ b/arch/x86/kernel/paravirt-spinlocks.c
25076@@ -8,7 +8,7 @@
25077
25078 #include <asm/paravirt.h>
25079
25080-struct pv_lock_ops pv_lock_ops = {
25081+struct pv_lock_ops pv_lock_ops __read_only = {
25082 #ifdef CONFIG_SMP
25083 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
25084 .unlock_kick = paravirt_nop,
25085diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
25086index 1b10af8..0b58cbc 100644
25087--- a/arch/x86/kernel/paravirt.c
25088+++ b/arch/x86/kernel/paravirt.c
25089@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
25090 {
25091 return x;
25092 }
25093+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25094+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
25095+#endif
25096
25097 void __init default_banner(void)
25098 {
25099@@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
25100 if (opfunc == NULL)
25101 /* If there's no function, patch it with a ud2a (BUG) */
25102 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
25103- else if (opfunc == _paravirt_nop)
25104+ else if (opfunc == (void *)_paravirt_nop)
25105 /* If the operation is a nop, then nop the callsite */
25106 ret = paravirt_patch_nop();
25107
25108 /* identity functions just return their single argument */
25109- else if (opfunc == _paravirt_ident_32)
25110+ else if (opfunc == (void *)_paravirt_ident_32)
25111 ret = paravirt_patch_ident_32(insnbuf, len);
25112- else if (opfunc == _paravirt_ident_64)
25113+ else if (opfunc == (void *)_paravirt_ident_64)
25114 ret = paravirt_patch_ident_64(insnbuf, len);
25115+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25116+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
25117+ ret = paravirt_patch_ident_64(insnbuf, len);
25118+#endif
25119
25120 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
25121 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
25122@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
25123 if (insn_len > len || start == NULL)
25124 insn_len = len;
25125 else
25126- memcpy(insnbuf, start, insn_len);
25127+ memcpy(insnbuf, ktla_ktva(start), insn_len);
25128
25129 return insn_len;
25130 }
25131@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
25132 return this_cpu_read(paravirt_lazy_mode);
25133 }
25134
25135-struct pv_info pv_info = {
25136+struct pv_info pv_info __read_only = {
25137 .name = "bare hardware",
25138 .paravirt_enabled = 0,
25139 .kernel_rpl = 0,
25140@@ -310,16 +317,16 @@ struct pv_info pv_info = {
25141 #endif
25142 };
25143
25144-struct pv_init_ops pv_init_ops = {
25145+struct pv_init_ops pv_init_ops __read_only = {
25146 .patch = native_patch,
25147 };
25148
25149-struct pv_time_ops pv_time_ops = {
25150+struct pv_time_ops pv_time_ops __read_only = {
25151 .sched_clock = native_sched_clock,
25152 .steal_clock = native_steal_clock,
25153 };
25154
25155-__visible struct pv_irq_ops pv_irq_ops = {
25156+__visible struct pv_irq_ops pv_irq_ops __read_only = {
25157 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
25158 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
25159 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
25160@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
25161 #endif
25162 };
25163
25164-__visible struct pv_cpu_ops pv_cpu_ops = {
25165+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
25166 .cpuid = native_cpuid,
25167 .get_debugreg = native_get_debugreg,
25168 .set_debugreg = native_set_debugreg,
25169@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
25170 .end_context_switch = paravirt_nop,
25171 };
25172
25173-struct pv_apic_ops pv_apic_ops = {
25174+struct pv_apic_ops pv_apic_ops __read_only= {
25175 #ifdef CONFIG_X86_LOCAL_APIC
25176 .startup_ipi_hook = paravirt_nop,
25177 #endif
25178 };
25179
25180-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
25181+#ifdef CONFIG_X86_32
25182+#ifdef CONFIG_X86_PAE
25183+/* 64-bit pagetable entries */
25184+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
25185+#else
25186 /* 32-bit pagetable entries */
25187 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
25188+#endif
25189 #else
25190 /* 64-bit pagetable entries */
25191 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
25192 #endif
25193
25194-struct pv_mmu_ops pv_mmu_ops = {
25195+struct pv_mmu_ops pv_mmu_ops __read_only = {
25196
25197 .read_cr2 = native_read_cr2,
25198 .write_cr2 = native_write_cr2,
25199@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
25200 .make_pud = PTE_IDENT,
25201
25202 .set_pgd = native_set_pgd,
25203+ .set_pgd_batched = native_set_pgd_batched,
25204 #endif
25205 #endif /* PAGETABLE_LEVELS >= 3 */
25206
25207@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
25208 },
25209
25210 .set_fixmap = native_set_fixmap,
25211+
25212+#ifdef CONFIG_PAX_KERNEXEC
25213+ .pax_open_kernel = native_pax_open_kernel,
25214+ .pax_close_kernel = native_pax_close_kernel,
25215+#endif
25216+
25217 };
25218
25219 EXPORT_SYMBOL_GPL(pv_time_ops);
25220diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
25221index 299d493..2ccb0ee 100644
25222--- a/arch/x86/kernel/pci-calgary_64.c
25223+++ b/arch/x86/kernel/pci-calgary_64.c
25224@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
25225 tce_space = be64_to_cpu(readq(target));
25226 tce_space = tce_space & TAR_SW_BITS;
25227
25228- tce_space = tce_space & (~specified_table_size);
25229+ tce_space = tce_space & (~(unsigned long)specified_table_size);
25230 info->tce_space = (u64 *)__va(tce_space);
25231 }
25232 }
25233diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
25234index 35ccf75..7a15747 100644
25235--- a/arch/x86/kernel/pci-iommu_table.c
25236+++ b/arch/x86/kernel/pci-iommu_table.c
25237@@ -2,7 +2,7 @@
25238 #include <asm/iommu_table.h>
25239 #include <linux/string.h>
25240 #include <linux/kallsyms.h>
25241-
25242+#include <linux/sched.h>
25243
25244 #define DEBUG 1
25245
25246diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
25247index 6c483ba..d10ce2f 100644
25248--- a/arch/x86/kernel/pci-swiotlb.c
25249+++ b/arch/x86/kernel/pci-swiotlb.c
25250@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
25251 void *vaddr, dma_addr_t dma_addr,
25252 struct dma_attrs *attrs)
25253 {
25254- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
25255+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
25256 }
25257
25258 static struct dma_map_ops swiotlb_dma_ops = {
25259diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
25260index 3fb8d95..254dc51 100644
25261--- a/arch/x86/kernel/process.c
25262+++ b/arch/x86/kernel/process.c
25263@@ -36,7 +36,8 @@
25264 * section. Since TSS's are completely CPU-local, we want them
25265 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
25266 */
25267-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
25268+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
25269+EXPORT_SYMBOL(init_tss);
25270
25271 #ifdef CONFIG_X86_64
25272 static DEFINE_PER_CPU(unsigned char, is_idle);
25273@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
25274 task_xstate_cachep =
25275 kmem_cache_create("task_xstate", xstate_size,
25276 __alignof__(union thread_xstate),
25277- SLAB_PANIC | SLAB_NOTRACK, NULL);
25278+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
25279 }
25280
25281 /*
25282@@ -105,7 +106,7 @@ void exit_thread(void)
25283 unsigned long *bp = t->io_bitmap_ptr;
25284
25285 if (bp) {
25286- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
25287+ struct tss_struct *tss = init_tss + get_cpu();
25288
25289 t->io_bitmap_ptr = NULL;
25290 clear_thread_flag(TIF_IO_BITMAP);
25291@@ -125,6 +126,9 @@ void flush_thread(void)
25292 {
25293 struct task_struct *tsk = current;
25294
25295+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
25296+ loadsegment(gs, 0);
25297+#endif
25298 flush_ptrace_hw_breakpoint(tsk);
25299 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
25300 drop_init_fpu(tsk);
25301@@ -271,7 +275,7 @@ static void __exit_idle(void)
25302 void exit_idle(void)
25303 {
25304 /* idle loop has pid 0 */
25305- if (current->pid)
25306+ if (task_pid_nr(current))
25307 return;
25308 __exit_idle();
25309 }
25310@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
25311 return ret;
25312 }
25313 #endif
25314-void stop_this_cpu(void *dummy)
25315+__noreturn void stop_this_cpu(void *dummy)
25316 {
25317 local_irq_disable();
25318 /*
25319@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
25320 }
25321 early_param("idle", idle_setup);
25322
25323-unsigned long arch_align_stack(unsigned long sp)
25324+#ifdef CONFIG_PAX_RANDKSTACK
25325+void pax_randomize_kstack(struct pt_regs *regs)
25326 {
25327- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
25328- sp -= get_random_int() % 8192;
25329- return sp & ~0xf;
25330-}
25331+ struct thread_struct *thread = &current->thread;
25332+ unsigned long time;
25333
25334-unsigned long arch_randomize_brk(struct mm_struct *mm)
25335-{
25336- unsigned long range_end = mm->brk + 0x02000000;
25337- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
25338-}
25339+ if (!randomize_va_space)
25340+ return;
25341+
25342+ if (v8086_mode(regs))
25343+ return;
25344
25345+ rdtscl(time);
25346+
25347+ /* P4 seems to return a 0 LSB, ignore it */
25348+#ifdef CONFIG_MPENTIUM4
25349+ time &= 0x3EUL;
25350+ time <<= 2;
25351+#elif defined(CONFIG_X86_64)
25352+ time &= 0xFUL;
25353+ time <<= 4;
25354+#else
25355+ time &= 0x1FUL;
25356+ time <<= 3;
25357+#endif
25358+
25359+ thread->sp0 ^= time;
25360+ load_sp0(init_tss + smp_processor_id(), thread);
25361+
25362+#ifdef CONFIG_X86_64
25363+ this_cpu_write(kernel_stack, thread->sp0);
25364+#endif
25365+}
25366+#endif
25367diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
25368index 884f98f..ec23e04 100644
25369--- a/arch/x86/kernel/process_32.c
25370+++ b/arch/x86/kernel/process_32.c
25371@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
25372 unsigned long thread_saved_pc(struct task_struct *tsk)
25373 {
25374 return ((unsigned long *)tsk->thread.sp)[3];
25375+//XXX return tsk->thread.eip;
25376 }
25377
25378 void __show_regs(struct pt_regs *regs, int all)
25379@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
25380 unsigned long sp;
25381 unsigned short ss, gs;
25382
25383- if (user_mode_vm(regs)) {
25384+ if (user_mode(regs)) {
25385 sp = regs->sp;
25386 ss = regs->ss & 0xffff;
25387- gs = get_user_gs(regs);
25388 } else {
25389 sp = kernel_stack_pointer(regs);
25390 savesegment(ss, ss);
25391- savesegment(gs, gs);
25392 }
25393+ gs = get_user_gs(regs);
25394
25395 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
25396 (u16)regs->cs, regs->ip, regs->flags,
25397- smp_processor_id());
25398+ raw_smp_processor_id());
25399 print_symbol("EIP is at %s\n", regs->ip);
25400
25401 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
25402@@ -133,20 +133,21 @@ void release_thread(struct task_struct *dead_task)
25403 int copy_thread(unsigned long clone_flags, unsigned long sp,
25404 unsigned long arg, struct task_struct *p)
25405 {
25406- struct pt_regs *childregs = task_pt_regs(p);
25407+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
25408 struct task_struct *tsk;
25409 int err;
25410
25411 p->thread.sp = (unsigned long) childregs;
25412 p->thread.sp0 = (unsigned long) (childregs+1);
25413+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25414
25415 if (unlikely(p->flags & PF_KTHREAD)) {
25416 /* kernel thread */
25417 memset(childregs, 0, sizeof(struct pt_regs));
25418 p->thread.ip = (unsigned long) ret_from_kernel_thread;
25419- task_user_gs(p) = __KERNEL_STACK_CANARY;
25420- childregs->ds = __USER_DS;
25421- childregs->es = __USER_DS;
25422+ savesegment(gs, childregs->gs);
25423+ childregs->ds = __KERNEL_DS;
25424+ childregs->es = __KERNEL_DS;
25425 childregs->fs = __KERNEL_PERCPU;
25426 childregs->bx = sp; /* function */
25427 childregs->bp = arg;
25428@@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25429 struct thread_struct *prev = &prev_p->thread,
25430 *next = &next_p->thread;
25431 int cpu = smp_processor_id();
25432- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25433+ struct tss_struct *tss = init_tss + cpu;
25434 fpu_switch_t fpu;
25435
25436 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
25437@@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25438 */
25439 lazy_save_gs(prev->gs);
25440
25441+#ifdef CONFIG_PAX_MEMORY_UDEREF
25442+ __set_fs(task_thread_info(next_p)->addr_limit);
25443+#endif
25444+
25445 /*
25446 * Load the per-thread Thread-Local Storage descriptor.
25447 */
25448@@ -307,6 +312,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25449 */
25450 arch_end_context_switch(next_p);
25451
25452+ this_cpu_write(current_task, next_p);
25453+ this_cpu_write(current_tinfo, &next_p->tinfo);
25454+
25455 /*
25456 * Restore %gs if needed (which is common)
25457 */
25458@@ -315,8 +323,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25459
25460 switch_fpu_finish(next_p, fpu);
25461
25462- this_cpu_write(current_task, next_p);
25463-
25464 return prev_p;
25465 }
25466
25467@@ -346,4 +352,3 @@ unsigned long get_wchan(struct task_struct *p)
25468 } while (count++ < 16);
25469 return 0;
25470 }
25471-
25472diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
25473index bb1dc51..08dda7f 100644
25474--- a/arch/x86/kernel/process_64.c
25475+++ b/arch/x86/kernel/process_64.c
25476@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25477 struct pt_regs *childregs;
25478 struct task_struct *me = current;
25479
25480- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
25481+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
25482 childregs = task_pt_regs(p);
25483 p->thread.sp = (unsigned long) childregs;
25484 p->thread.usersp = me->thread.usersp;
25485+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25486 set_tsk_thread_flag(p, TIF_FORK);
25487 p->fpu_counter = 0;
25488 p->thread.io_bitmap_ptr = NULL;
25489@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25490 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
25491 savesegment(es, p->thread.es);
25492 savesegment(ds, p->thread.ds);
25493+ savesegment(ss, p->thread.ss);
25494+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
25495 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
25496
25497 if (unlikely(p->flags & PF_KTHREAD)) {
25498@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25499 struct thread_struct *prev = &prev_p->thread;
25500 struct thread_struct *next = &next_p->thread;
25501 int cpu = smp_processor_id();
25502- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25503+ struct tss_struct *tss = init_tss + cpu;
25504 unsigned fsindex, gsindex;
25505 fpu_switch_t fpu;
25506
25507@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25508 if (unlikely(next->ds | prev->ds))
25509 loadsegment(ds, next->ds);
25510
25511+ savesegment(ss, prev->ss);
25512+ if (unlikely(next->ss != prev->ss))
25513+ loadsegment(ss, next->ss);
25514
25515 /* We must save %fs and %gs before load_TLS() because
25516 * %fs and %gs may be cleared by load_TLS().
25517@@ -362,10 +368,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25518 prev->usersp = this_cpu_read(old_rsp);
25519 this_cpu_write(old_rsp, next->usersp);
25520 this_cpu_write(current_task, next_p);
25521+ this_cpu_write(current_tinfo, &next_p->tinfo);
25522
25523- this_cpu_write(kernel_stack,
25524- (unsigned long)task_stack_page(next_p) +
25525- THREAD_SIZE - KERNEL_STACK_OFFSET);
25526+ this_cpu_write(kernel_stack, next->sp0);
25527
25528 /*
25529 * Now maybe reload the debug registers and handle I/O bitmaps
25530@@ -434,12 +439,11 @@ unsigned long get_wchan(struct task_struct *p)
25531 if (!p || p == current || p->state == TASK_RUNNING)
25532 return 0;
25533 stack = (unsigned long)task_stack_page(p);
25534- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
25535+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
25536 return 0;
25537 fp = *(u64 *)(p->thread.sp);
25538 do {
25539- if (fp < (unsigned long)stack ||
25540- fp >= (unsigned long)stack+THREAD_SIZE)
25541+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
25542 return 0;
25543 ip = *(u64 *)(fp+8);
25544 if (!in_sched_functions(ip))
25545diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
25546index 7461f50..1334029 100644
25547--- a/arch/x86/kernel/ptrace.c
25548+++ b/arch/x86/kernel/ptrace.c
25549@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
25550 {
25551 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
25552 unsigned long sp = (unsigned long)&regs->sp;
25553- struct thread_info *tinfo;
25554
25555- if (context == (sp & ~(THREAD_SIZE - 1)))
25556+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
25557 return sp;
25558
25559- tinfo = (struct thread_info *)context;
25560- if (tinfo->previous_esp)
25561- return tinfo->previous_esp;
25562+ sp = *(unsigned long *)context;
25563+ if (sp)
25564+ return sp;
25565
25566 return (unsigned long)regs;
25567 }
25568@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
25569 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
25570 {
25571 int i;
25572- int dr7 = 0;
25573+ unsigned long dr7 = 0;
25574 struct arch_hw_breakpoint *info;
25575
25576 for (i = 0; i < HBP_NUM; i++) {
25577@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
25578 unsigned long addr, unsigned long data)
25579 {
25580 int ret;
25581- unsigned long __user *datap = (unsigned long __user *)data;
25582+ unsigned long __user *datap = (__force unsigned long __user *)data;
25583
25584 switch (request) {
25585 /* read the word at location addr in the USER area. */
25586@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
25587 if ((int) addr < 0)
25588 return -EIO;
25589 ret = do_get_thread_area(child, addr,
25590- (struct user_desc __user *)data);
25591+ (__force struct user_desc __user *) data);
25592 break;
25593
25594 case PTRACE_SET_THREAD_AREA:
25595 if ((int) addr < 0)
25596 return -EIO;
25597 ret = do_set_thread_area(child, addr,
25598- (struct user_desc __user *)data, 0);
25599+ (__force struct user_desc __user *) data, 0);
25600 break;
25601 #endif
25602
25603@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
25604
25605 #ifdef CONFIG_X86_64
25606
25607-static struct user_regset x86_64_regsets[] __read_mostly = {
25608+static user_regset_no_const x86_64_regsets[] __read_only = {
25609 [REGSET_GENERAL] = {
25610 .core_note_type = NT_PRSTATUS,
25611 .n = sizeof(struct user_regs_struct) / sizeof(long),
25612@@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
25613 #endif /* CONFIG_X86_64 */
25614
25615 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
25616-static struct user_regset x86_32_regsets[] __read_mostly = {
25617+static user_regset_no_const x86_32_regsets[] __read_only = {
25618 [REGSET_GENERAL] = {
25619 .core_note_type = NT_PRSTATUS,
25620 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
25621@@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
25622 */
25623 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
25624
25625-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25626+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25627 {
25628 #ifdef CONFIG_X86_64
25629 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
25630@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
25631 memset(info, 0, sizeof(*info));
25632 info->si_signo = SIGTRAP;
25633 info->si_code = si_code;
25634- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
25635+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
25636 }
25637
25638 void user_single_step_siginfo(struct task_struct *tsk,
25639@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
25640 # define IS_IA32 0
25641 #endif
25642
25643+#ifdef CONFIG_GRKERNSEC_SETXID
25644+extern void gr_delayed_cred_worker(void);
25645+#endif
25646+
25647 /*
25648 * We must return the syscall number to actually look up in the table.
25649 * This can be -1L to skip running any syscall at all.
25650@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
25651
25652 user_exit();
25653
25654+#ifdef CONFIG_GRKERNSEC_SETXID
25655+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25656+ gr_delayed_cred_worker();
25657+#endif
25658+
25659 /*
25660 * If we stepped into a sysenter/syscall insn, it trapped in
25661 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
25662@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
25663 */
25664 user_exit();
25665
25666+#ifdef CONFIG_GRKERNSEC_SETXID
25667+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25668+ gr_delayed_cred_worker();
25669+#endif
25670+
25671 audit_syscall_exit(regs);
25672
25673 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
25674diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
25675index a16bae3..1f65f25 100644
25676--- a/arch/x86/kernel/pvclock.c
25677+++ b/arch/x86/kernel/pvclock.c
25678@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
25679 return pv_tsc_khz;
25680 }
25681
25682-static atomic64_t last_value = ATOMIC64_INIT(0);
25683+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
25684
25685 void pvclock_resume(void)
25686 {
25687- atomic64_set(&last_value, 0);
25688+ atomic64_set_unchecked(&last_value, 0);
25689 }
25690
25691 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
25692@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
25693 * updating at the same time, and one of them could be slightly behind,
25694 * making the assumption that last_value always go forward fail to hold.
25695 */
25696- last = atomic64_read(&last_value);
25697+ last = atomic64_read_unchecked(&last_value);
25698 do {
25699 if (ret < last)
25700 return last;
25701- last = atomic64_cmpxchg(&last_value, last, ret);
25702+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
25703 } while (unlikely(last != ret));
25704
25705 return ret;
25706diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
25707index 618ce26..ec7e21c 100644
25708--- a/arch/x86/kernel/reboot.c
25709+++ b/arch/x86/kernel/reboot.c
25710@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
25711
25712 void __noreturn machine_real_restart(unsigned int type)
25713 {
25714+
25715+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
25716+ struct desc_struct *gdt;
25717+#endif
25718+
25719 local_irq_disable();
25720
25721 /*
25722@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
25723
25724 /* Jump to the identity-mapped low memory code */
25725 #ifdef CONFIG_X86_32
25726- asm volatile("jmpl *%0" : :
25727+
25728+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
25729+ gdt = get_cpu_gdt_table(smp_processor_id());
25730+ pax_open_kernel();
25731+#ifdef CONFIG_PAX_MEMORY_UDEREF
25732+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
25733+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
25734+ loadsegment(ds, __KERNEL_DS);
25735+ loadsegment(es, __KERNEL_DS);
25736+ loadsegment(ss, __KERNEL_DS);
25737+#endif
25738+#ifdef CONFIG_PAX_KERNEXEC
25739+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
25740+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
25741+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
25742+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
25743+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
25744+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
25745+#endif
25746+ pax_close_kernel();
25747+#endif
25748+
25749+ asm volatile("ljmpl *%0" : :
25750 "rm" (real_mode_header->machine_real_restart_asm),
25751 "a" (type));
25752 #else
25753@@ -466,7 +493,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
25754 * try to force a triple fault and then cycle between hitting the keyboard
25755 * controller and doing that
25756 */
25757-static void native_machine_emergency_restart(void)
25758+static void __noreturn native_machine_emergency_restart(void)
25759 {
25760 int i;
25761 int attempt = 0;
25762@@ -575,13 +602,13 @@ void native_machine_shutdown(void)
25763 #endif
25764 }
25765
25766-static void __machine_emergency_restart(int emergency)
25767+static void __noreturn __machine_emergency_restart(int emergency)
25768 {
25769 reboot_emergency = emergency;
25770 machine_ops.emergency_restart();
25771 }
25772
25773-static void native_machine_restart(char *__unused)
25774+static void __noreturn native_machine_restart(char *__unused)
25775 {
25776 pr_notice("machine restart\n");
25777
25778@@ -590,7 +617,7 @@ static void native_machine_restart(char *__unused)
25779 __machine_emergency_restart(0);
25780 }
25781
25782-static void native_machine_halt(void)
25783+static void __noreturn native_machine_halt(void)
25784 {
25785 /* Stop other cpus and apics */
25786 machine_shutdown();
25787@@ -600,7 +627,7 @@ static void native_machine_halt(void)
25788 stop_this_cpu(NULL);
25789 }
25790
25791-static void native_machine_power_off(void)
25792+static void __noreturn native_machine_power_off(void)
25793 {
25794 if (pm_power_off) {
25795 if (!reboot_force)
25796@@ -609,9 +636,10 @@ static void native_machine_power_off(void)
25797 }
25798 /* A fallback in case there is no PM info available */
25799 tboot_shutdown(TB_SHUTDOWN_HALT);
25800+ unreachable();
25801 }
25802
25803-struct machine_ops machine_ops = {
25804+struct machine_ops machine_ops __read_only = {
25805 .power_off = native_machine_power_off,
25806 .shutdown = native_machine_shutdown,
25807 .emergency_restart = native_machine_emergency_restart,
25808diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
25809index c8e41e9..64049ef 100644
25810--- a/arch/x86/kernel/reboot_fixups_32.c
25811+++ b/arch/x86/kernel/reboot_fixups_32.c
25812@@ -57,7 +57,7 @@ struct device_fixup {
25813 unsigned int vendor;
25814 unsigned int device;
25815 void (*reboot_fixup)(struct pci_dev *);
25816-};
25817+} __do_const;
25818
25819 /*
25820 * PCI ids solely used for fixups_table go here
25821diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
25822index 3fd2c69..16ef367 100644
25823--- a/arch/x86/kernel/relocate_kernel_64.S
25824+++ b/arch/x86/kernel/relocate_kernel_64.S
25825@@ -11,6 +11,7 @@
25826 #include <asm/kexec.h>
25827 #include <asm/processor-flags.h>
25828 #include <asm/pgtable_types.h>
25829+#include <asm/alternative-asm.h>
25830
25831 /*
25832 * Must be relocatable PIC code callable as a C function
25833@@ -96,8 +97,7 @@ relocate_kernel:
25834
25835 /* jump to identity mapped page */
25836 addq $(identity_mapped - relocate_kernel), %r8
25837- pushq %r8
25838- ret
25839+ jmp *%r8
25840
25841 identity_mapped:
25842 /* set return address to 0 if not preserving context */
25843@@ -167,6 +167,7 @@ identity_mapped:
25844 xorl %r14d, %r14d
25845 xorl %r15d, %r15d
25846
25847+ pax_force_retaddr 0, 1
25848 ret
25849
25850 1:
25851diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
25852index f0de629..a4978a8f 100644
25853--- a/arch/x86/kernel/setup.c
25854+++ b/arch/x86/kernel/setup.c
25855@@ -110,6 +110,7 @@
25856 #include <asm/mce.h>
25857 #include <asm/alternative.h>
25858 #include <asm/prom.h>
25859+#include <asm/boot.h>
25860
25861 /*
25862 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
25863@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
25864 #endif
25865
25866
25867-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
25868-__visible unsigned long mmu_cr4_features;
25869+#ifdef CONFIG_X86_64
25870+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
25871+#elif defined(CONFIG_X86_PAE)
25872+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
25873 #else
25874-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
25875+__visible unsigned long mmu_cr4_features __read_only;
25876 #endif
25877
25878+void set_in_cr4(unsigned long mask)
25879+{
25880+ unsigned long cr4 = read_cr4();
25881+
25882+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
25883+ return;
25884+
25885+ pax_open_kernel();
25886+ mmu_cr4_features |= mask;
25887+ pax_close_kernel();
25888+
25889+ if (trampoline_cr4_features)
25890+ *trampoline_cr4_features = mmu_cr4_features;
25891+ cr4 |= mask;
25892+ write_cr4(cr4);
25893+}
25894+EXPORT_SYMBOL(set_in_cr4);
25895+
25896+void clear_in_cr4(unsigned long mask)
25897+{
25898+ unsigned long cr4 = read_cr4();
25899+
25900+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
25901+ return;
25902+
25903+ pax_open_kernel();
25904+ mmu_cr4_features &= ~mask;
25905+ pax_close_kernel();
25906+
25907+ if (trampoline_cr4_features)
25908+ *trampoline_cr4_features = mmu_cr4_features;
25909+ cr4 &= ~mask;
25910+ write_cr4(cr4);
25911+}
25912+EXPORT_SYMBOL(clear_in_cr4);
25913+
25914 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
25915 int bootloader_type, bootloader_version;
25916
25917@@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
25918 * area (640->1Mb) as ram even though it is not.
25919 * take them out.
25920 */
25921- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
25922+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
25923
25924 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
25925 }
25926@@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
25927 /* called before trim_bios_range() to spare extra sanitize */
25928 static void __init e820_add_kernel_range(void)
25929 {
25930- u64 start = __pa_symbol(_text);
25931+ u64 start = __pa_symbol(ktla_ktva(_text));
25932 u64 size = __pa_symbol(_end) - start;
25933
25934 /*
25935@@ -838,8 +877,12 @@ static void __init trim_low_memory_range(void)
25936
25937 void __init setup_arch(char **cmdline_p)
25938 {
25939+#ifdef CONFIG_X86_32
25940+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
25941+#else
25942 memblock_reserve(__pa_symbol(_text),
25943 (unsigned long)__bss_stop - (unsigned long)_text);
25944+#endif
25945
25946 early_reserve_initrd();
25947
25948@@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
25949
25950 if (!boot_params.hdr.root_flags)
25951 root_mountflags &= ~MS_RDONLY;
25952- init_mm.start_code = (unsigned long) _text;
25953- init_mm.end_code = (unsigned long) _etext;
25954+ init_mm.start_code = ktla_ktva((unsigned long) _text);
25955+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
25956 init_mm.end_data = (unsigned long) _edata;
25957 init_mm.brk = _brk_end;
25958
25959- code_resource.start = __pa_symbol(_text);
25960- code_resource.end = __pa_symbol(_etext)-1;
25961- data_resource.start = __pa_symbol(_etext);
25962+ code_resource.start = __pa_symbol(ktla_ktva(_text));
25963+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
25964+ data_resource.start = __pa_symbol(_sdata);
25965 data_resource.end = __pa_symbol(_edata)-1;
25966 bss_resource.start = __pa_symbol(__bss_start);
25967 bss_resource.end = __pa_symbol(__bss_stop)-1;
25968diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
25969index 5cdff03..80fa283 100644
25970--- a/arch/x86/kernel/setup_percpu.c
25971+++ b/arch/x86/kernel/setup_percpu.c
25972@@ -21,19 +21,17 @@
25973 #include <asm/cpu.h>
25974 #include <asm/stackprotector.h>
25975
25976-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
25977+#ifdef CONFIG_SMP
25978+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
25979 EXPORT_PER_CPU_SYMBOL(cpu_number);
25980+#endif
25981
25982-#ifdef CONFIG_X86_64
25983 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
25984-#else
25985-#define BOOT_PERCPU_OFFSET 0
25986-#endif
25987
25988 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
25989 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
25990
25991-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
25992+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
25993 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
25994 };
25995 EXPORT_SYMBOL(__per_cpu_offset);
25996@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
25997 {
25998 #ifdef CONFIG_NEED_MULTIPLE_NODES
25999 pg_data_t *last = NULL;
26000- unsigned int cpu;
26001+ int cpu;
26002
26003 for_each_possible_cpu(cpu) {
26004 int node = early_cpu_to_node(cpu);
26005@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
26006 {
26007 #ifdef CONFIG_X86_32
26008 struct desc_struct gdt;
26009+ unsigned long base = per_cpu_offset(cpu);
26010
26011- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
26012- 0x2 | DESCTYPE_S, 0x8);
26013- gdt.s = 1;
26014+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
26015+ 0x83 | DESCTYPE_S, 0xC);
26016 write_gdt_entry(get_cpu_gdt_table(cpu),
26017 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26018 #endif
26019@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
26020 /* alrighty, percpu areas up and running */
26021 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
26022 for_each_possible_cpu(cpu) {
26023+#ifdef CONFIG_CC_STACKPROTECTOR
26024+#ifdef CONFIG_X86_32
26025+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
26026+#endif
26027+#endif
26028 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26029 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
26030 per_cpu(cpu_number, cpu) = cpu;
26031@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
26032 */
26033 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
26034 #endif
26035+#ifdef CONFIG_CC_STACKPROTECTOR
26036+#ifdef CONFIG_X86_32
26037+ if (!cpu)
26038+ per_cpu(stack_canary.canary, cpu) = canary;
26039+#endif
26040+#endif
26041 /*
26042 * Up to this point, the boot CPU has been using .init.data
26043 * area. Reload any changed state for the boot CPU.
26044diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
26045index 9e5de68..16c53cb 100644
26046--- a/arch/x86/kernel/signal.c
26047+++ b/arch/x86/kernel/signal.c
26048@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
26049 * Align the stack pointer according to the i386 ABI,
26050 * i.e. so that on function entry ((sp + 4) & 15) == 0.
26051 */
26052- sp = ((sp + 4) & -16ul) - 4;
26053+ sp = ((sp - 12) & -16ul) - 4;
26054 #else /* !CONFIG_X86_32 */
26055 sp = round_down(sp, 16) - 8;
26056 #endif
26057@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26058 }
26059
26060 if (current->mm->context.vdso)
26061- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26062+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26063 else
26064- restorer = &frame->retcode;
26065+ restorer = (void __user *)&frame->retcode;
26066 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26067 restorer = ksig->ka.sa.sa_restorer;
26068
26069@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26070 * reasons and because gdb uses it as a signature to notice
26071 * signal handler stack frames.
26072 */
26073- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
26074+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
26075
26076 if (err)
26077 return -EFAULT;
26078@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26079 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
26080
26081 /* Set up to return from userspace. */
26082- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26083+ if (current->mm->context.vdso)
26084+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26085+ else
26086+ restorer = (void __user *)&frame->retcode;
26087 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26088 restorer = ksig->ka.sa.sa_restorer;
26089 put_user_ex(restorer, &frame->pretcode);
26090@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26091 * reasons and because gdb uses it as a signature to notice
26092 * signal handler stack frames.
26093 */
26094- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
26095+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
26096 } put_user_catch(err);
26097
26098 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
26099@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26100 {
26101 int usig = signr_convert(ksig->sig);
26102 sigset_t *set = sigmask_to_save();
26103- compat_sigset_t *cset = (compat_sigset_t *) set;
26104+ sigset_t sigcopy;
26105+ compat_sigset_t *cset;
26106+
26107+ sigcopy = *set;
26108+
26109+ cset = (compat_sigset_t *) &sigcopy;
26110
26111 /* Set up the stack frame */
26112 if (is_ia32_frame()) {
26113@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26114 } else if (is_x32_frame()) {
26115 return x32_setup_rt_frame(ksig, cset, regs);
26116 } else {
26117- return __setup_rt_frame(ksig->sig, ksig, set, regs);
26118+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
26119 }
26120 }
26121
26122diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
26123index 7c3a5a6..f0a8961 100644
26124--- a/arch/x86/kernel/smp.c
26125+++ b/arch/x86/kernel/smp.c
26126@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
26127
26128 __setup("nonmi_ipi", nonmi_ipi_setup);
26129
26130-struct smp_ops smp_ops = {
26131+struct smp_ops smp_ops __read_only = {
26132 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
26133 .smp_prepare_cpus = native_smp_prepare_cpus,
26134 .smp_cpus_done = native_smp_cpus_done,
26135diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
26136index 6cacab6..750636a 100644
26137--- a/arch/x86/kernel/smpboot.c
26138+++ b/arch/x86/kernel/smpboot.c
26139@@ -251,14 +251,18 @@ static void notrace start_secondary(void *unused)
26140
26141 enable_start_cpu0 = 0;
26142
26143-#ifdef CONFIG_X86_32
26144- /* switch away from the initial page table */
26145- load_cr3(swapper_pg_dir);
26146- __flush_tlb_all();
26147-#endif
26148-
26149 /* otherwise gcc will move up smp_processor_id before the cpu_init */
26150 barrier();
26151+
26152+ /* switch away from the initial page table */
26153+#ifdef CONFIG_PAX_PER_CPU_PGD
26154+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
26155+ __flush_tlb_all();
26156+#elif defined(CONFIG_X86_32)
26157+ load_cr3(swapper_pg_dir);
26158+ __flush_tlb_all();
26159+#endif
26160+
26161 /*
26162 * Check TSC synchronization with the BP:
26163 */
26164@@ -749,6 +753,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26165 idle->thread.sp = (unsigned long) (((struct pt_regs *)
26166 (THREAD_SIZE + task_stack_page(idle))) - 1);
26167 per_cpu(current_task, cpu) = idle;
26168+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26169
26170 #ifdef CONFIG_X86_32
26171 /* Stack for startup_32 can be just as for start_secondary onwards */
26172@@ -756,11 +761,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26173 #else
26174 clear_tsk_thread_flag(idle, TIF_FORK);
26175 initial_gs = per_cpu_offset(cpu);
26176- per_cpu(kernel_stack, cpu) =
26177- (unsigned long)task_stack_page(idle) -
26178- KERNEL_STACK_OFFSET + THREAD_SIZE;
26179+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26180 #endif
26181+
26182+ pax_open_kernel();
26183 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
26184+ pax_close_kernel();
26185+
26186 initial_code = (unsigned long)start_secondary;
26187 stack_start = idle->thread.sp;
26188
26189@@ -909,6 +916,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
26190 /* the FPU context is blank, nobody can own it */
26191 __cpu_disable_lazy_restore(cpu);
26192
26193+#ifdef CONFIG_PAX_PER_CPU_PGD
26194+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
26195+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26196+ KERNEL_PGD_PTRS);
26197+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
26198+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26199+ KERNEL_PGD_PTRS);
26200+#endif
26201+
26202 err = do_boot_cpu(apicid, cpu, tidle);
26203 if (err) {
26204 pr_debug("do_boot_cpu failed %d\n", err);
26205diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
26206index 9b4d51d..5d28b58 100644
26207--- a/arch/x86/kernel/step.c
26208+++ b/arch/x86/kernel/step.c
26209@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26210 struct desc_struct *desc;
26211 unsigned long base;
26212
26213- seg &= ~7UL;
26214+ seg >>= 3;
26215
26216 mutex_lock(&child->mm->context.lock);
26217- if (unlikely((seg >> 3) >= child->mm->context.size))
26218+ if (unlikely(seg >= child->mm->context.size))
26219 addr = -1L; /* bogus selector, access would fault */
26220 else {
26221 desc = child->mm->context.ldt + seg;
26222@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26223 addr += base;
26224 }
26225 mutex_unlock(&child->mm->context.lock);
26226- }
26227+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
26228+ addr = ktla_ktva(addr);
26229
26230 return addr;
26231 }
26232@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
26233 unsigned char opcode[15];
26234 unsigned long addr = convert_ip_to_linear(child, regs);
26235
26236+ if (addr == -EINVAL)
26237+ return 0;
26238+
26239 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
26240 for (i = 0; i < copied; i++) {
26241 switch (opcode[i]) {
26242diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
26243new file mode 100644
26244index 0000000..5877189
26245--- /dev/null
26246+++ b/arch/x86/kernel/sys_i386_32.c
26247@@ -0,0 +1,189 @@
26248+/*
26249+ * This file contains various random system calls that
26250+ * have a non-standard calling sequence on the Linux/i386
26251+ * platform.
26252+ */
26253+
26254+#include <linux/errno.h>
26255+#include <linux/sched.h>
26256+#include <linux/mm.h>
26257+#include <linux/fs.h>
26258+#include <linux/smp.h>
26259+#include <linux/sem.h>
26260+#include <linux/msg.h>
26261+#include <linux/shm.h>
26262+#include <linux/stat.h>
26263+#include <linux/syscalls.h>
26264+#include <linux/mman.h>
26265+#include <linux/file.h>
26266+#include <linux/utsname.h>
26267+#include <linux/ipc.h>
26268+#include <linux/elf.h>
26269+
26270+#include <linux/uaccess.h>
26271+#include <linux/unistd.h>
26272+
26273+#include <asm/syscalls.h>
26274+
26275+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
26276+{
26277+ unsigned long pax_task_size = TASK_SIZE;
26278+
26279+#ifdef CONFIG_PAX_SEGMEXEC
26280+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
26281+ pax_task_size = SEGMEXEC_TASK_SIZE;
26282+#endif
26283+
26284+ if (flags & MAP_FIXED)
26285+ if (len > pax_task_size || addr > pax_task_size - len)
26286+ return -EINVAL;
26287+
26288+ return 0;
26289+}
26290+
26291+/*
26292+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
26293+ */
26294+static unsigned long get_align_mask(void)
26295+{
26296+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
26297+ return 0;
26298+
26299+ if (!(current->flags & PF_RANDOMIZE))
26300+ return 0;
26301+
26302+ return va_align.mask;
26303+}
26304+
26305+unsigned long
26306+arch_get_unmapped_area(struct file *filp, unsigned long addr,
26307+ unsigned long len, unsigned long pgoff, unsigned long flags)
26308+{
26309+ struct mm_struct *mm = current->mm;
26310+ struct vm_area_struct *vma;
26311+ unsigned long pax_task_size = TASK_SIZE;
26312+ struct vm_unmapped_area_info info;
26313+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26314+
26315+#ifdef CONFIG_PAX_SEGMEXEC
26316+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26317+ pax_task_size = SEGMEXEC_TASK_SIZE;
26318+#endif
26319+
26320+ pax_task_size -= PAGE_SIZE;
26321+
26322+ if (len > pax_task_size)
26323+ return -ENOMEM;
26324+
26325+ if (flags & MAP_FIXED)
26326+ return addr;
26327+
26328+#ifdef CONFIG_PAX_RANDMMAP
26329+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26330+#endif
26331+
26332+ if (addr) {
26333+ addr = PAGE_ALIGN(addr);
26334+ if (pax_task_size - len >= addr) {
26335+ vma = find_vma(mm, addr);
26336+ if (check_heap_stack_gap(vma, addr, len, offset))
26337+ return addr;
26338+ }
26339+ }
26340+
26341+ info.flags = 0;
26342+ info.length = len;
26343+ info.align_mask = filp ? get_align_mask() : 0;
26344+ info.align_offset = pgoff << PAGE_SHIFT;
26345+ info.threadstack_offset = offset;
26346+
26347+#ifdef CONFIG_PAX_PAGEEXEC
26348+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
26349+ info.low_limit = 0x00110000UL;
26350+ info.high_limit = mm->start_code;
26351+
26352+#ifdef CONFIG_PAX_RANDMMAP
26353+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26354+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
26355+#endif
26356+
26357+ if (info.low_limit < info.high_limit) {
26358+ addr = vm_unmapped_area(&info);
26359+ if (!IS_ERR_VALUE(addr))
26360+ return addr;
26361+ }
26362+ } else
26363+#endif
26364+
26365+ info.low_limit = mm->mmap_base;
26366+ info.high_limit = pax_task_size;
26367+
26368+ return vm_unmapped_area(&info);
26369+}
26370+
26371+unsigned long
26372+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26373+ const unsigned long len, const unsigned long pgoff,
26374+ const unsigned long flags)
26375+{
26376+ struct vm_area_struct *vma;
26377+ struct mm_struct *mm = current->mm;
26378+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
26379+ struct vm_unmapped_area_info info;
26380+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26381+
26382+#ifdef CONFIG_PAX_SEGMEXEC
26383+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26384+ pax_task_size = SEGMEXEC_TASK_SIZE;
26385+#endif
26386+
26387+ pax_task_size -= PAGE_SIZE;
26388+
26389+ /* requested length too big for entire address space */
26390+ if (len > pax_task_size)
26391+ return -ENOMEM;
26392+
26393+ if (flags & MAP_FIXED)
26394+ return addr;
26395+
26396+#ifdef CONFIG_PAX_PAGEEXEC
26397+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
26398+ goto bottomup;
26399+#endif
26400+
26401+#ifdef CONFIG_PAX_RANDMMAP
26402+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26403+#endif
26404+
26405+ /* requesting a specific address */
26406+ if (addr) {
26407+ addr = PAGE_ALIGN(addr);
26408+ if (pax_task_size - len >= addr) {
26409+ vma = find_vma(mm, addr);
26410+ if (check_heap_stack_gap(vma, addr, len, offset))
26411+ return addr;
26412+ }
26413+ }
26414+
26415+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
26416+ info.length = len;
26417+ info.low_limit = PAGE_SIZE;
26418+ info.high_limit = mm->mmap_base;
26419+ info.align_mask = filp ? get_align_mask() : 0;
26420+ info.align_offset = pgoff << PAGE_SHIFT;
26421+ info.threadstack_offset = offset;
26422+
26423+ addr = vm_unmapped_area(&info);
26424+ if (!(addr & ~PAGE_MASK))
26425+ return addr;
26426+ VM_BUG_ON(addr != -ENOMEM);
26427+
26428+bottomup:
26429+ /*
26430+ * A failed mmap() very likely causes application failure,
26431+ * so fall back to the bottom-up function here. This scenario
26432+ * can happen with large stack limits and large mmap()
26433+ * allocations.
26434+ */
26435+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
26436+}
26437diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
26438index 30277e2..5664a29 100644
26439--- a/arch/x86/kernel/sys_x86_64.c
26440+++ b/arch/x86/kernel/sys_x86_64.c
26441@@ -81,8 +81,8 @@ out:
26442 return error;
26443 }
26444
26445-static void find_start_end(unsigned long flags, unsigned long *begin,
26446- unsigned long *end)
26447+static void find_start_end(struct mm_struct *mm, unsigned long flags,
26448+ unsigned long *begin, unsigned long *end)
26449 {
26450 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
26451 unsigned long new_begin;
26452@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
26453 *begin = new_begin;
26454 }
26455 } else {
26456- *begin = current->mm->mmap_legacy_base;
26457+ *begin = mm->mmap_legacy_base;
26458 *end = TASK_SIZE;
26459 }
26460 }
26461@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26462 struct vm_area_struct *vma;
26463 struct vm_unmapped_area_info info;
26464 unsigned long begin, end;
26465+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26466
26467 if (flags & MAP_FIXED)
26468 return addr;
26469
26470- find_start_end(flags, &begin, &end);
26471+ find_start_end(mm, flags, &begin, &end);
26472
26473 if (len > end)
26474 return -ENOMEM;
26475
26476+#ifdef CONFIG_PAX_RANDMMAP
26477+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26478+#endif
26479+
26480 if (addr) {
26481 addr = PAGE_ALIGN(addr);
26482 vma = find_vma(mm, addr);
26483- if (end - len >= addr &&
26484- (!vma || addr + len <= vma->vm_start))
26485+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26486 return addr;
26487 }
26488
26489@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26490 info.high_limit = end;
26491 info.align_mask = filp ? get_align_mask() : 0;
26492 info.align_offset = pgoff << PAGE_SHIFT;
26493+ info.threadstack_offset = offset;
26494 return vm_unmapped_area(&info);
26495 }
26496
26497@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26498 struct mm_struct *mm = current->mm;
26499 unsigned long addr = addr0;
26500 struct vm_unmapped_area_info info;
26501+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26502
26503 /* requested length too big for entire address space */
26504 if (len > TASK_SIZE)
26505@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26506 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
26507 goto bottomup;
26508
26509+#ifdef CONFIG_PAX_RANDMMAP
26510+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26511+#endif
26512+
26513 /* requesting a specific address */
26514 if (addr) {
26515 addr = PAGE_ALIGN(addr);
26516 vma = find_vma(mm, addr);
26517- if (TASK_SIZE - len >= addr &&
26518- (!vma || addr + len <= vma->vm_start))
26519+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26520 return addr;
26521 }
26522
26523@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26524 info.high_limit = mm->mmap_base;
26525 info.align_mask = filp ? get_align_mask() : 0;
26526 info.align_offset = pgoff << PAGE_SHIFT;
26527+ info.threadstack_offset = offset;
26528 addr = vm_unmapped_area(&info);
26529 if (!(addr & ~PAGE_MASK))
26530 return addr;
26531diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
26532index 91a4496..1730bff 100644
26533--- a/arch/x86/kernel/tboot.c
26534+++ b/arch/x86/kernel/tboot.c
26535@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
26536
26537 void tboot_shutdown(u32 shutdown_type)
26538 {
26539- void (*shutdown)(void);
26540+ void (* __noreturn shutdown)(void);
26541
26542 if (!tboot_enabled())
26543 return;
26544@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
26545
26546 switch_to_tboot_pt();
26547
26548- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
26549+ shutdown = (void *)tboot->shutdown_entry;
26550 shutdown();
26551
26552 /* should not reach here */
26553@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
26554 return -ENODEV;
26555 }
26556
26557-static atomic_t ap_wfs_count;
26558+static atomic_unchecked_t ap_wfs_count;
26559
26560 static int tboot_wait_for_aps(int num_aps)
26561 {
26562@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
26563 {
26564 switch (action) {
26565 case CPU_DYING:
26566- atomic_inc(&ap_wfs_count);
26567+ atomic_inc_unchecked(&ap_wfs_count);
26568 if (num_online_cpus() == 1)
26569- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
26570+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
26571 return NOTIFY_BAD;
26572 break;
26573 }
26574@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
26575
26576 tboot_create_trampoline();
26577
26578- atomic_set(&ap_wfs_count, 0);
26579+ atomic_set_unchecked(&ap_wfs_count, 0);
26580 register_hotcpu_notifier(&tboot_cpu_notifier);
26581
26582 #ifdef CONFIG_DEBUG_FS
26583diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
26584index 24d3c91..d06b473 100644
26585--- a/arch/x86/kernel/time.c
26586+++ b/arch/x86/kernel/time.c
26587@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
26588 {
26589 unsigned long pc = instruction_pointer(regs);
26590
26591- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
26592+ if (!user_mode(regs) && in_lock_functions(pc)) {
26593 #ifdef CONFIG_FRAME_POINTER
26594- return *(unsigned long *)(regs->bp + sizeof(long));
26595+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
26596 #else
26597 unsigned long *sp =
26598 (unsigned long *)kernel_stack_pointer(regs);
26599@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
26600 * or above a saved flags. Eflags has bits 22-31 zero,
26601 * kernel addresses don't.
26602 */
26603+
26604+#ifdef CONFIG_PAX_KERNEXEC
26605+ return ktla_ktva(sp[0]);
26606+#else
26607 if (sp[0] >> 22)
26608 return sp[0];
26609 if (sp[1] >> 22)
26610 return sp[1];
26611 #endif
26612+
26613+#endif
26614 }
26615 return pc;
26616 }
26617diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
26618index f7fec09..9991981 100644
26619--- a/arch/x86/kernel/tls.c
26620+++ b/arch/x86/kernel/tls.c
26621@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
26622 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
26623 return -EINVAL;
26624
26625+#ifdef CONFIG_PAX_SEGMEXEC
26626+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
26627+ return -EINVAL;
26628+#endif
26629+
26630 set_tls_desc(p, idx, &info, 1);
26631
26632 return 0;
26633@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
26634
26635 if (kbuf)
26636 info = kbuf;
26637- else if (__copy_from_user(infobuf, ubuf, count))
26638+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
26639 return -EFAULT;
26640 else
26641 info = infobuf;
26642diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
26643index 1c113db..287b42e 100644
26644--- a/arch/x86/kernel/tracepoint.c
26645+++ b/arch/x86/kernel/tracepoint.c
26646@@ -9,11 +9,11 @@
26647 #include <linux/atomic.h>
26648
26649 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
26650-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26651+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26652 (unsigned long) trace_idt_table };
26653
26654 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26655-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
26656+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
26657
26658 static int trace_irq_vector_refcount;
26659 static DEFINE_MUTEX(irq_vector_mutex);
26660diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
26661index 8c8093b..c93f581 100644
26662--- a/arch/x86/kernel/traps.c
26663+++ b/arch/x86/kernel/traps.c
26664@@ -66,7 +66,7 @@
26665 #include <asm/proto.h>
26666
26667 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26668-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
26669+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
26670 #else
26671 #include <asm/processor-flags.h>
26672 #include <asm/setup.h>
26673@@ -75,7 +75,7 @@ asmlinkage int system_call(void);
26674 #endif
26675
26676 /* Must be page-aligned because the real IDT is used in a fixmap. */
26677-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
26678+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
26679
26680 DECLARE_BITMAP(used_vectors, NR_VECTORS);
26681 EXPORT_SYMBOL_GPL(used_vectors);
26682@@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
26683 }
26684
26685 static int __kprobes
26686-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26687+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
26688 struct pt_regs *regs, long error_code)
26689 {
26690 #ifdef CONFIG_X86_32
26691- if (regs->flags & X86_VM_MASK) {
26692+ if (v8086_mode(regs)) {
26693 /*
26694 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
26695 * On nmi (interrupt 2), do_trap should not be called.
26696@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26697 return -1;
26698 }
26699 #endif
26700- if (!user_mode(regs)) {
26701+ if (!user_mode_novm(regs)) {
26702 if (!fixup_exception(regs)) {
26703 tsk->thread.error_code = error_code;
26704 tsk->thread.trap_nr = trapnr;
26705+
26706+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26707+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
26708+ str = "PAX: suspicious stack segment fault";
26709+#endif
26710+
26711 die(str, regs, error_code);
26712 }
26713+
26714+#ifdef CONFIG_PAX_REFCOUNT
26715+ if (trapnr == 4)
26716+ pax_report_refcount_overflow(regs);
26717+#endif
26718+
26719 return 0;
26720 }
26721
26722@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26723 }
26724
26725 static void __kprobes
26726-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26727+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
26728 long error_code, siginfo_t *info)
26729 {
26730 struct task_struct *tsk = current;
26731@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26732 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
26733 printk_ratelimit()) {
26734 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
26735- tsk->comm, tsk->pid, str,
26736+ tsk->comm, task_pid_nr(tsk), str,
26737 regs->ip, regs->sp, error_code);
26738 print_vma_addr(" in ", regs->ip);
26739 pr_cont("\n");
26740@@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
26741 conditional_sti(regs);
26742
26743 #ifdef CONFIG_X86_32
26744- if (regs->flags & X86_VM_MASK) {
26745+ if (v8086_mode(regs)) {
26746 local_irq_enable();
26747 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
26748 goto exit;
26749@@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
26750 #endif
26751
26752 tsk = current;
26753- if (!user_mode(regs)) {
26754+ if (!user_mode_novm(regs)) {
26755 if (fixup_exception(regs))
26756 goto exit;
26757
26758 tsk->thread.error_code = error_code;
26759 tsk->thread.trap_nr = X86_TRAP_GP;
26760 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
26761- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
26762+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
26763+
26764+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26765+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
26766+ die("PAX: suspicious general protection fault", regs, error_code);
26767+ else
26768+#endif
26769+
26770 die("general protection fault", regs, error_code);
26771+ }
26772 goto exit;
26773 }
26774
26775+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26776+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
26777+ struct mm_struct *mm = tsk->mm;
26778+ unsigned long limit;
26779+
26780+ down_write(&mm->mmap_sem);
26781+ limit = mm->context.user_cs_limit;
26782+ if (limit < TASK_SIZE) {
26783+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
26784+ up_write(&mm->mmap_sem);
26785+ return;
26786+ }
26787+ up_write(&mm->mmap_sem);
26788+ }
26789+#endif
26790+
26791 tsk->thread.error_code = error_code;
26792 tsk->thread.trap_nr = X86_TRAP_GP;
26793
26794@@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26795 /* It's safe to allow irq's after DR6 has been saved */
26796 preempt_conditional_sti(regs);
26797
26798- if (regs->flags & X86_VM_MASK) {
26799+ if (v8086_mode(regs)) {
26800 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
26801 X86_TRAP_DB);
26802 preempt_conditional_cli(regs);
26803@@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26804 * We already checked v86 mode above, so we can check for kernel mode
26805 * by just checking the CPL of CS.
26806 */
26807- if ((dr6 & DR_STEP) && !user_mode(regs)) {
26808+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
26809 tsk->thread.debugreg6 &= ~DR_STEP;
26810 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
26811 regs->flags &= ~X86_EFLAGS_TF;
26812@@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
26813 return;
26814 conditional_sti(regs);
26815
26816- if (!user_mode_vm(regs))
26817+ if (!user_mode(regs))
26818 {
26819 if (!fixup_exception(regs)) {
26820 task->thread.error_code = error_code;
26821diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
26822index 2ed8459..7cf329f 100644
26823--- a/arch/x86/kernel/uprobes.c
26824+++ b/arch/x86/kernel/uprobes.c
26825@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
26826 int ret = NOTIFY_DONE;
26827
26828 /* We are only interested in userspace traps */
26829- if (regs && !user_mode_vm(regs))
26830+ if (regs && !user_mode(regs))
26831 return NOTIFY_DONE;
26832
26833 switch (val) {
26834@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
26835
26836 if (ncopied != rasize) {
26837 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
26838- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
26839+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
26840
26841 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
26842 }
26843diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
26844index b9242ba..50c5edd 100644
26845--- a/arch/x86/kernel/verify_cpu.S
26846+++ b/arch/x86/kernel/verify_cpu.S
26847@@ -20,6 +20,7 @@
26848 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
26849 * arch/x86/kernel/trampoline_64.S: secondary processor verification
26850 * arch/x86/kernel/head_32.S: processor startup
26851+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
26852 *
26853 * verify_cpu, returns the status of longmode and SSE in register %eax.
26854 * 0: Success 1: Failure
26855diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
26856index e8edcf5..27f9344 100644
26857--- a/arch/x86/kernel/vm86_32.c
26858+++ b/arch/x86/kernel/vm86_32.c
26859@@ -44,6 +44,7 @@
26860 #include <linux/ptrace.h>
26861 #include <linux/audit.h>
26862 #include <linux/stddef.h>
26863+#include <linux/grsecurity.h>
26864
26865 #include <asm/uaccess.h>
26866 #include <asm/io.h>
26867@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
26868 do_exit(SIGSEGV);
26869 }
26870
26871- tss = &per_cpu(init_tss, get_cpu());
26872+ tss = init_tss + get_cpu();
26873 current->thread.sp0 = current->thread.saved_sp0;
26874 current->thread.sysenter_cs = __KERNEL_CS;
26875 load_sp0(tss, &current->thread);
26876@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
26877
26878 if (tsk->thread.saved_sp0)
26879 return -EPERM;
26880+
26881+#ifdef CONFIG_GRKERNSEC_VM86
26882+ if (!capable(CAP_SYS_RAWIO)) {
26883+ gr_handle_vm86();
26884+ return -EPERM;
26885+ }
26886+#endif
26887+
26888 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
26889 offsetof(struct kernel_vm86_struct, vm86plus) -
26890 sizeof(info.regs));
26891@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
26892 int tmp;
26893 struct vm86plus_struct __user *v86;
26894
26895+#ifdef CONFIG_GRKERNSEC_VM86
26896+ if (!capable(CAP_SYS_RAWIO)) {
26897+ gr_handle_vm86();
26898+ return -EPERM;
26899+ }
26900+#endif
26901+
26902 tsk = current;
26903 switch (cmd) {
26904 case VM86_REQUEST_IRQ:
26905@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
26906 tsk->thread.saved_fs = info->regs32->fs;
26907 tsk->thread.saved_gs = get_user_gs(info->regs32);
26908
26909- tss = &per_cpu(init_tss, get_cpu());
26910+ tss = init_tss + get_cpu();
26911 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
26912 if (cpu_has_sep)
26913 tsk->thread.sysenter_cs = 0;
26914@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
26915 goto cannot_handle;
26916 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
26917 goto cannot_handle;
26918- intr_ptr = (unsigned long __user *) (i << 2);
26919+ intr_ptr = (__force unsigned long __user *) (i << 2);
26920 if (get_user(segoffs, intr_ptr))
26921 goto cannot_handle;
26922 if ((segoffs >> 16) == BIOSSEG)
26923diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
26924index 10c4f30..65408b9 100644
26925--- a/arch/x86/kernel/vmlinux.lds.S
26926+++ b/arch/x86/kernel/vmlinux.lds.S
26927@@ -26,6 +26,13 @@
26928 #include <asm/page_types.h>
26929 #include <asm/cache.h>
26930 #include <asm/boot.h>
26931+#include <asm/segment.h>
26932+
26933+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26934+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
26935+#else
26936+#define __KERNEL_TEXT_OFFSET 0
26937+#endif
26938
26939 #undef i386 /* in case the preprocessor is a 32bit one */
26940
26941@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
26942
26943 PHDRS {
26944 text PT_LOAD FLAGS(5); /* R_E */
26945+#ifdef CONFIG_X86_32
26946+ module PT_LOAD FLAGS(5); /* R_E */
26947+#endif
26948+#ifdef CONFIG_XEN
26949+ rodata PT_LOAD FLAGS(5); /* R_E */
26950+#else
26951+ rodata PT_LOAD FLAGS(4); /* R__ */
26952+#endif
26953 data PT_LOAD FLAGS(6); /* RW_ */
26954-#ifdef CONFIG_X86_64
26955+ init.begin PT_LOAD FLAGS(6); /* RW_ */
26956 #ifdef CONFIG_SMP
26957 percpu PT_LOAD FLAGS(6); /* RW_ */
26958 #endif
26959+ text.init PT_LOAD FLAGS(5); /* R_E */
26960+ text.exit PT_LOAD FLAGS(5); /* R_E */
26961 init PT_LOAD FLAGS(7); /* RWE */
26962-#endif
26963 note PT_NOTE FLAGS(0); /* ___ */
26964 }
26965
26966 SECTIONS
26967 {
26968 #ifdef CONFIG_X86_32
26969- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
26970- phys_startup_32 = startup_32 - LOAD_OFFSET;
26971+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
26972 #else
26973- . = __START_KERNEL;
26974- phys_startup_64 = startup_64 - LOAD_OFFSET;
26975+ . = __START_KERNEL;
26976 #endif
26977
26978 /* Text and read-only data */
26979- .text : AT(ADDR(.text) - LOAD_OFFSET) {
26980- _text = .;
26981+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26982 /* bootstrapping code */
26983+#ifdef CONFIG_X86_32
26984+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26985+#else
26986+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26987+#endif
26988+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26989+ _text = .;
26990 HEAD_TEXT
26991 . = ALIGN(8);
26992 _stext = .;
26993@@ -104,13 +124,47 @@ SECTIONS
26994 IRQENTRY_TEXT
26995 *(.fixup)
26996 *(.gnu.warning)
26997- /* End of text section */
26998- _etext = .;
26999 } :text = 0x9090
27000
27001- NOTES :text :note
27002+ . += __KERNEL_TEXT_OFFSET;
27003
27004- EXCEPTION_TABLE(16) :text = 0x9090
27005+#ifdef CONFIG_X86_32
27006+ . = ALIGN(PAGE_SIZE);
27007+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
27008+
27009+#ifdef CONFIG_PAX_KERNEXEC
27010+ MODULES_EXEC_VADDR = .;
27011+ BYTE(0)
27012+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
27013+ . = ALIGN(HPAGE_SIZE) - 1;
27014+ MODULES_EXEC_END = .;
27015+#endif
27016+
27017+ } :module
27018+#endif
27019+
27020+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
27021+ /* End of text section */
27022+ BYTE(0)
27023+ _etext = . - __KERNEL_TEXT_OFFSET;
27024+ }
27025+
27026+#ifdef CONFIG_X86_32
27027+ . = ALIGN(PAGE_SIZE);
27028+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
27029+ . = ALIGN(PAGE_SIZE);
27030+ *(.empty_zero_page)
27031+ *(.initial_pg_fixmap)
27032+ *(.initial_pg_pmd)
27033+ *(.initial_page_table)
27034+ *(.swapper_pg_dir)
27035+ } :rodata
27036+#endif
27037+
27038+ . = ALIGN(PAGE_SIZE);
27039+ NOTES :rodata :note
27040+
27041+ EXCEPTION_TABLE(16) :rodata
27042
27043 #if defined(CONFIG_DEBUG_RODATA)
27044 /* .text should occupy whole number of pages */
27045@@ -122,16 +176,20 @@ SECTIONS
27046
27047 /* Data */
27048 .data : AT(ADDR(.data) - LOAD_OFFSET) {
27049+
27050+#ifdef CONFIG_PAX_KERNEXEC
27051+ . = ALIGN(HPAGE_SIZE);
27052+#else
27053+ . = ALIGN(PAGE_SIZE);
27054+#endif
27055+
27056 /* Start of data section */
27057 _sdata = .;
27058
27059 /* init_task */
27060 INIT_TASK_DATA(THREAD_SIZE)
27061
27062-#ifdef CONFIG_X86_32
27063- /* 32 bit has nosave before _edata */
27064 NOSAVE_DATA
27065-#endif
27066
27067 PAGE_ALIGNED_DATA(PAGE_SIZE)
27068
27069@@ -172,12 +230,19 @@ SECTIONS
27070 #endif /* CONFIG_X86_64 */
27071
27072 /* Init code and data - will be freed after init */
27073- . = ALIGN(PAGE_SIZE);
27074 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
27075+ BYTE(0)
27076+
27077+#ifdef CONFIG_PAX_KERNEXEC
27078+ . = ALIGN(HPAGE_SIZE);
27079+#else
27080+ . = ALIGN(PAGE_SIZE);
27081+#endif
27082+
27083 __init_begin = .; /* paired with __init_end */
27084- }
27085+ } :init.begin
27086
27087-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
27088+#ifdef CONFIG_SMP
27089 /*
27090 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
27091 * output PHDR, so the next output section - .init.text - should
27092@@ -186,12 +251,27 @@ SECTIONS
27093 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
27094 #endif
27095
27096- INIT_TEXT_SECTION(PAGE_SIZE)
27097-#ifdef CONFIG_X86_64
27098- :init
27099-#endif
27100+ . = ALIGN(PAGE_SIZE);
27101+ init_begin = .;
27102+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
27103+ VMLINUX_SYMBOL(_sinittext) = .;
27104+ INIT_TEXT
27105+ VMLINUX_SYMBOL(_einittext) = .;
27106+ . = ALIGN(PAGE_SIZE);
27107+ } :text.init
27108
27109- INIT_DATA_SECTION(16)
27110+ /*
27111+ * .exit.text is discard at runtime, not link time, to deal with
27112+ * references from .altinstructions and .eh_frame
27113+ */
27114+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27115+ EXIT_TEXT
27116+ . = ALIGN(16);
27117+ } :text.exit
27118+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
27119+
27120+ . = ALIGN(PAGE_SIZE);
27121+ INIT_DATA_SECTION(16) :init
27122
27123 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
27124 __x86_cpu_dev_start = .;
27125@@ -253,19 +333,12 @@ SECTIONS
27126 }
27127
27128 . = ALIGN(8);
27129- /*
27130- * .exit.text is discard at runtime, not link time, to deal with
27131- * references from .altinstructions and .eh_frame
27132- */
27133- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
27134- EXIT_TEXT
27135- }
27136
27137 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
27138 EXIT_DATA
27139 }
27140
27141-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
27142+#ifndef CONFIG_SMP
27143 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
27144 #endif
27145
27146@@ -284,16 +357,10 @@ SECTIONS
27147 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
27148 __smp_locks = .;
27149 *(.smp_locks)
27150- . = ALIGN(PAGE_SIZE);
27151 __smp_locks_end = .;
27152+ . = ALIGN(PAGE_SIZE);
27153 }
27154
27155-#ifdef CONFIG_X86_64
27156- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
27157- NOSAVE_DATA
27158- }
27159-#endif
27160-
27161 /* BSS */
27162 . = ALIGN(PAGE_SIZE);
27163 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
27164@@ -309,6 +376,7 @@ SECTIONS
27165 __brk_base = .;
27166 . += 64 * 1024; /* 64k alignment slop space */
27167 *(.brk_reservation) /* areas brk users have reserved */
27168+ . = ALIGN(HPAGE_SIZE);
27169 __brk_limit = .;
27170 }
27171
27172@@ -335,13 +403,12 @@ SECTIONS
27173 * for the boot processor.
27174 */
27175 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
27176-INIT_PER_CPU(gdt_page);
27177 INIT_PER_CPU(irq_stack_union);
27178
27179 /*
27180 * Build-time check on the image size:
27181 */
27182-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
27183+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
27184 "kernel image bigger than KERNEL_IMAGE_SIZE");
27185
27186 #ifdef CONFIG_SMP
27187diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
27188index 1f96f93..d5c8f7a 100644
27189--- a/arch/x86/kernel/vsyscall_64.c
27190+++ b/arch/x86/kernel/vsyscall_64.c
27191@@ -56,15 +56,13 @@
27192 DEFINE_VVAR(int, vgetcpu_mode);
27193 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
27194
27195-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
27196+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
27197
27198 static int __init vsyscall_setup(char *str)
27199 {
27200 if (str) {
27201 if (!strcmp("emulate", str))
27202 vsyscall_mode = EMULATE;
27203- else if (!strcmp("native", str))
27204- vsyscall_mode = NATIVE;
27205 else if (!strcmp("none", str))
27206 vsyscall_mode = NONE;
27207 else
27208@@ -323,8 +321,7 @@ do_ret:
27209 return true;
27210
27211 sigsegv:
27212- force_sig(SIGSEGV, current);
27213- return true;
27214+ do_group_exit(SIGKILL);
27215 }
27216
27217 /*
27218@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
27219 extern char __vvar_page;
27220 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
27221
27222- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
27223- vsyscall_mode == NATIVE
27224- ? PAGE_KERNEL_VSYSCALL
27225- : PAGE_KERNEL_VVAR);
27226+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
27227 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
27228 (unsigned long)VSYSCALL_START);
27229
27230diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
27231index b014d94..e775258 100644
27232--- a/arch/x86/kernel/x8664_ksyms_64.c
27233+++ b/arch/x86/kernel/x8664_ksyms_64.c
27234@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
27235 EXPORT_SYMBOL(copy_user_generic_unrolled);
27236 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
27237 EXPORT_SYMBOL(__copy_user_nocache);
27238-EXPORT_SYMBOL(_copy_from_user);
27239-EXPORT_SYMBOL(_copy_to_user);
27240
27241 EXPORT_SYMBOL(copy_page);
27242 EXPORT_SYMBOL(clear_page);
27243@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
27244 #ifndef CONFIG_PARAVIRT
27245 EXPORT_SYMBOL(native_load_gs_index);
27246 #endif
27247+
27248+#ifdef CONFIG_PAX_PER_CPU_PGD
27249+EXPORT_SYMBOL(cpu_pgd);
27250+#endif
27251diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
27252index 8ce0072..431a0e7 100644
27253--- a/arch/x86/kernel/x86_init.c
27254+++ b/arch/x86/kernel/x86_init.c
27255@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
27256 static void default_nmi_init(void) { };
27257 static int default_i8042_detect(void) { return 1; };
27258
27259-struct x86_platform_ops x86_platform = {
27260+struct x86_platform_ops x86_platform __read_only = {
27261 .calibrate_tsc = native_calibrate_tsc,
27262 .get_wallclock = mach_get_cmos_time,
27263 .set_wallclock = mach_set_rtc_mmss,
27264@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
27265 EXPORT_SYMBOL_GPL(x86_platform);
27266
27267 #if defined(CONFIG_PCI_MSI)
27268-struct x86_msi_ops x86_msi = {
27269+struct x86_msi_ops x86_msi __read_only = {
27270 .setup_msi_irqs = native_setup_msi_irqs,
27271 .compose_msi_msg = native_compose_msi_msg,
27272 .teardown_msi_irq = native_teardown_msi_irq,
27273@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev, int irq)
27274 }
27275 #endif
27276
27277-struct x86_io_apic_ops x86_io_apic_ops = {
27278+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
27279 .init = native_io_apic_init_mappings,
27280 .read = native_io_apic_read,
27281 .write = native_io_apic_write,
27282diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
27283index 422fd82..c3687ca 100644
27284--- a/arch/x86/kernel/xsave.c
27285+++ b/arch/x86/kernel/xsave.c
27286@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
27287 {
27288 int err;
27289
27290+ buf = (struct xsave_struct __user *)____m(buf);
27291 if (use_xsave())
27292 err = xsave_user(buf);
27293 else if (use_fxsr())
27294@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
27295 */
27296 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
27297 {
27298+ buf = (void __user *)____m(buf);
27299 if (use_xsave()) {
27300 if ((unsigned long)buf % 64 || fx_only) {
27301 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
27302diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
27303index b110fe6..d9c19f2 100644
27304--- a/arch/x86/kvm/cpuid.c
27305+++ b/arch/x86/kvm/cpuid.c
27306@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27307 struct kvm_cpuid2 *cpuid,
27308 struct kvm_cpuid_entry2 __user *entries)
27309 {
27310- int r;
27311+ int r, i;
27312
27313 r = -E2BIG;
27314 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
27315 goto out;
27316 r = -EFAULT;
27317- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
27318- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27319+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27320 goto out;
27321+ for (i = 0; i < cpuid->nent; ++i) {
27322+ struct kvm_cpuid_entry2 cpuid_entry;
27323+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
27324+ goto out;
27325+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
27326+ }
27327 vcpu->arch.cpuid_nent = cpuid->nent;
27328 kvm_apic_set_version(vcpu);
27329 kvm_x86_ops->cpuid_update(vcpu);
27330@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27331 struct kvm_cpuid2 *cpuid,
27332 struct kvm_cpuid_entry2 __user *entries)
27333 {
27334- int r;
27335+ int r, i;
27336
27337 r = -E2BIG;
27338 if (cpuid->nent < vcpu->arch.cpuid_nent)
27339 goto out;
27340 r = -EFAULT;
27341- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
27342- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27343+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27344 goto out;
27345+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
27346+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
27347+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
27348+ goto out;
27349+ }
27350 return 0;
27351
27352 out:
27353diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
27354index dec48bf..f4d21f7 100644
27355--- a/arch/x86/kvm/lapic.c
27356+++ b/arch/x86/kvm/lapic.c
27357@@ -55,7 +55,7 @@
27358 #define APIC_BUS_CYCLE_NS 1
27359
27360 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
27361-#define apic_debug(fmt, arg...)
27362+#define apic_debug(fmt, arg...) do {} while (0)
27363
27364 #define APIC_LVT_NUM 6
27365 /* 14 is the version for Xeon and Pentium 8.4.8*/
27366diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
27367index ad75d77..a679d32 100644
27368--- a/arch/x86/kvm/paging_tmpl.h
27369+++ b/arch/x86/kvm/paging_tmpl.h
27370@@ -331,7 +331,7 @@ retry_walk:
27371 if (unlikely(kvm_is_error_hva(host_addr)))
27372 goto error;
27373
27374- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
27375+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
27376 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
27377 goto error;
27378 walker->ptep_user[walker->level - 1] = ptep_user;
27379diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
27380index c0bc803..6837a50 100644
27381--- a/arch/x86/kvm/svm.c
27382+++ b/arch/x86/kvm/svm.c
27383@@ -3501,7 +3501,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
27384 int cpu = raw_smp_processor_id();
27385
27386 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
27387+
27388+ pax_open_kernel();
27389 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
27390+ pax_close_kernel();
27391+
27392 load_TR_desc();
27393 }
27394
27395@@ -3902,6 +3906,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
27396 #endif
27397 #endif
27398
27399+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27400+ __set_fs(current_thread_info()->addr_limit);
27401+#endif
27402+
27403 reload_tss(vcpu);
27404
27405 local_irq_disable();
27406diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
27407index 2b2fce1..da76be4 100644
27408--- a/arch/x86/kvm/vmx.c
27409+++ b/arch/x86/kvm/vmx.c
27410@@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long field, u64 value)
27411 #endif
27412 }
27413
27414-static void vmcs_clear_bits(unsigned long field, u32 mask)
27415+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
27416 {
27417 vmcs_writel(field, vmcs_readl(field) & ~mask);
27418 }
27419
27420-static void vmcs_set_bits(unsigned long field, u32 mask)
27421+static void vmcs_set_bits(unsigned long field, unsigned long mask)
27422 {
27423 vmcs_writel(field, vmcs_readl(field) | mask);
27424 }
27425@@ -1522,7 +1522,11 @@ static void reload_tss(void)
27426 struct desc_struct *descs;
27427
27428 descs = (void *)gdt->address;
27429+
27430+ pax_open_kernel();
27431 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
27432+ pax_close_kernel();
27433+
27434 load_TR_desc();
27435 }
27436
27437@@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
27438 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
27439 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
27440
27441+#ifdef CONFIG_PAX_PER_CPU_PGD
27442+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27443+#endif
27444+
27445 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
27446 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
27447 vmx->loaded_vmcs->cpu = cpu;
27448@@ -2037,7 +2045,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
27449 * reads and returns guest's timestamp counter "register"
27450 * guest_tsc = host_tsc + tsc_offset -- 21.3
27451 */
27452-static u64 guest_read_tsc(void)
27453+static u64 __intentional_overflow(-1) guest_read_tsc(void)
27454 {
27455 u64 host_tsc, tsc_offset;
27456
27457@@ -2982,8 +2990,11 @@ static __init int hardware_setup(void)
27458 if (!cpu_has_vmx_flexpriority())
27459 flexpriority_enabled = 0;
27460
27461- if (!cpu_has_vmx_tpr_shadow())
27462- kvm_x86_ops->update_cr8_intercept = NULL;
27463+ if (!cpu_has_vmx_tpr_shadow()) {
27464+ pax_open_kernel();
27465+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27466+ pax_close_kernel();
27467+ }
27468
27469 if (enable_ept && !cpu_has_vmx_ept_2m_page())
27470 kvm_disable_largepages();
27471@@ -2994,13 +3005,15 @@ static __init int hardware_setup(void)
27472 if (!cpu_has_vmx_apicv())
27473 enable_apicv = 0;
27474
27475+ pax_open_kernel();
27476 if (enable_apicv)
27477- kvm_x86_ops->update_cr8_intercept = NULL;
27478+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27479 else {
27480- kvm_x86_ops->hwapic_irr_update = NULL;
27481- kvm_x86_ops->deliver_posted_interrupt = NULL;
27482- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27483+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
27484+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
27485+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27486 }
27487+ pax_close_kernel();
27488
27489 if (nested)
27490 nested_vmx_setup_ctls_msrs();
27491@@ -4127,7 +4140,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27492
27493 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
27494 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
27495+
27496+#ifndef CONFIG_PAX_PER_CPU_PGD
27497 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27498+#endif
27499
27500 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
27501 #ifdef CONFIG_X86_64
27502@@ -4149,7 +4165,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27503 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
27504 vmx->host_idt_base = dt.address;
27505
27506- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
27507+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
27508
27509 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
27510 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
27511@@ -7191,6 +7207,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27512 "jmp 2f \n\t"
27513 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
27514 "2: "
27515+
27516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27517+ "ljmp %[cs],$3f\n\t"
27518+ "3: "
27519+#endif
27520+
27521 /* Save guest registers, load host registers, keep flags */
27522 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
27523 "pop %0 \n\t"
27524@@ -7243,6 +7265,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27525 #endif
27526 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
27527 [wordsize]"i"(sizeof(ulong))
27528+
27529+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27530+ ,[cs]"i"(__KERNEL_CS)
27531+#endif
27532+
27533 : "cc", "memory"
27534 #ifdef CONFIG_X86_64
27535 , "rax", "rbx", "rdi", "rsi"
27536@@ -7256,7 +7283,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27537 if (debugctlmsr)
27538 update_debugctlmsr(debugctlmsr);
27539
27540-#ifndef CONFIG_X86_64
27541+#ifdef CONFIG_X86_32
27542 /*
27543 * The sysexit path does not restore ds/es, so we must set them to
27544 * a reasonable value ourselves.
27545@@ -7265,8 +7292,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27546 * may be executed in interrupt context, which saves and restore segments
27547 * around it, nullifying its effect.
27548 */
27549- loadsegment(ds, __USER_DS);
27550- loadsegment(es, __USER_DS);
27551+ loadsegment(ds, __KERNEL_DS);
27552+ loadsegment(es, __KERNEL_DS);
27553+ loadsegment(ss, __KERNEL_DS);
27554+
27555+#ifdef CONFIG_PAX_KERNEXEC
27556+ loadsegment(fs, __KERNEL_PERCPU);
27557+#endif
27558+
27559+#ifdef CONFIG_PAX_MEMORY_UDEREF
27560+ __set_fs(current_thread_info()->addr_limit);
27561+#endif
27562+
27563 #endif
27564
27565 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
27566diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
27567index eb9b9c9..0f30b12 100644
27568--- a/arch/x86/kvm/x86.c
27569+++ b/arch/x86/kvm/x86.c
27570@@ -1779,8 +1779,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
27571 {
27572 struct kvm *kvm = vcpu->kvm;
27573 int lm = is_long_mode(vcpu);
27574- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27575- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27576+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27577+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27578 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
27579 : kvm->arch.xen_hvm_config.blob_size_32;
27580 u32 page_num = data & ~PAGE_MASK;
27581@@ -2663,6 +2663,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
27582 if (n < msr_list.nmsrs)
27583 goto out;
27584 r = -EFAULT;
27585+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
27586+ goto out;
27587 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
27588 num_msrs_to_save * sizeof(u32)))
27589 goto out;
27590@@ -5461,7 +5463,7 @@ static struct notifier_block pvclock_gtod_notifier = {
27591 };
27592 #endif
27593
27594-int kvm_arch_init(void *opaque)
27595+int kvm_arch_init(const void *opaque)
27596 {
27597 int r;
27598 struct kvm_x86_ops *ops = opaque;
27599diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
27600index bdf8532..f63c587 100644
27601--- a/arch/x86/lguest/boot.c
27602+++ b/arch/x86/lguest/boot.c
27603@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
27604 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
27605 * Launcher to reboot us.
27606 */
27607-static void lguest_restart(char *reason)
27608+static __noreturn void lguest_restart(char *reason)
27609 {
27610 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
27611+ BUG();
27612 }
27613
27614 /*G:050
27615diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
27616index 00933d5..3a64af9 100644
27617--- a/arch/x86/lib/atomic64_386_32.S
27618+++ b/arch/x86/lib/atomic64_386_32.S
27619@@ -48,6 +48,10 @@ BEGIN(read)
27620 movl (v), %eax
27621 movl 4(v), %edx
27622 RET_ENDP
27623+BEGIN(read_unchecked)
27624+ movl (v), %eax
27625+ movl 4(v), %edx
27626+RET_ENDP
27627 #undef v
27628
27629 #define v %esi
27630@@ -55,6 +59,10 @@ BEGIN(set)
27631 movl %ebx, (v)
27632 movl %ecx, 4(v)
27633 RET_ENDP
27634+BEGIN(set_unchecked)
27635+ movl %ebx, (v)
27636+ movl %ecx, 4(v)
27637+RET_ENDP
27638 #undef v
27639
27640 #define v %esi
27641@@ -70,6 +78,20 @@ RET_ENDP
27642 BEGIN(add)
27643 addl %eax, (v)
27644 adcl %edx, 4(v)
27645+
27646+#ifdef CONFIG_PAX_REFCOUNT
27647+ jno 0f
27648+ subl %eax, (v)
27649+ sbbl %edx, 4(v)
27650+ int $4
27651+0:
27652+ _ASM_EXTABLE(0b, 0b)
27653+#endif
27654+
27655+RET_ENDP
27656+BEGIN(add_unchecked)
27657+ addl %eax, (v)
27658+ adcl %edx, 4(v)
27659 RET_ENDP
27660 #undef v
27661
27662@@ -77,6 +99,24 @@ RET_ENDP
27663 BEGIN(add_return)
27664 addl (v), %eax
27665 adcl 4(v), %edx
27666+
27667+#ifdef CONFIG_PAX_REFCOUNT
27668+ into
27669+1234:
27670+ _ASM_EXTABLE(1234b, 2f)
27671+#endif
27672+
27673+ movl %eax, (v)
27674+ movl %edx, 4(v)
27675+
27676+#ifdef CONFIG_PAX_REFCOUNT
27677+2:
27678+#endif
27679+
27680+RET_ENDP
27681+BEGIN(add_return_unchecked)
27682+ addl (v), %eax
27683+ adcl 4(v), %edx
27684 movl %eax, (v)
27685 movl %edx, 4(v)
27686 RET_ENDP
27687@@ -86,6 +126,20 @@ RET_ENDP
27688 BEGIN(sub)
27689 subl %eax, (v)
27690 sbbl %edx, 4(v)
27691+
27692+#ifdef CONFIG_PAX_REFCOUNT
27693+ jno 0f
27694+ addl %eax, (v)
27695+ adcl %edx, 4(v)
27696+ int $4
27697+0:
27698+ _ASM_EXTABLE(0b, 0b)
27699+#endif
27700+
27701+RET_ENDP
27702+BEGIN(sub_unchecked)
27703+ subl %eax, (v)
27704+ sbbl %edx, 4(v)
27705 RET_ENDP
27706 #undef v
27707
27708@@ -96,6 +150,27 @@ BEGIN(sub_return)
27709 sbbl $0, %edx
27710 addl (v), %eax
27711 adcl 4(v), %edx
27712+
27713+#ifdef CONFIG_PAX_REFCOUNT
27714+ into
27715+1234:
27716+ _ASM_EXTABLE(1234b, 2f)
27717+#endif
27718+
27719+ movl %eax, (v)
27720+ movl %edx, 4(v)
27721+
27722+#ifdef CONFIG_PAX_REFCOUNT
27723+2:
27724+#endif
27725+
27726+RET_ENDP
27727+BEGIN(sub_return_unchecked)
27728+ negl %edx
27729+ negl %eax
27730+ sbbl $0, %edx
27731+ addl (v), %eax
27732+ adcl 4(v), %edx
27733 movl %eax, (v)
27734 movl %edx, 4(v)
27735 RET_ENDP
27736@@ -105,6 +180,20 @@ RET_ENDP
27737 BEGIN(inc)
27738 addl $1, (v)
27739 adcl $0, 4(v)
27740+
27741+#ifdef CONFIG_PAX_REFCOUNT
27742+ jno 0f
27743+ subl $1, (v)
27744+ sbbl $0, 4(v)
27745+ int $4
27746+0:
27747+ _ASM_EXTABLE(0b, 0b)
27748+#endif
27749+
27750+RET_ENDP
27751+BEGIN(inc_unchecked)
27752+ addl $1, (v)
27753+ adcl $0, 4(v)
27754 RET_ENDP
27755 #undef v
27756
27757@@ -114,6 +203,26 @@ BEGIN(inc_return)
27758 movl 4(v), %edx
27759 addl $1, %eax
27760 adcl $0, %edx
27761+
27762+#ifdef CONFIG_PAX_REFCOUNT
27763+ into
27764+1234:
27765+ _ASM_EXTABLE(1234b, 2f)
27766+#endif
27767+
27768+ movl %eax, (v)
27769+ movl %edx, 4(v)
27770+
27771+#ifdef CONFIG_PAX_REFCOUNT
27772+2:
27773+#endif
27774+
27775+RET_ENDP
27776+BEGIN(inc_return_unchecked)
27777+ movl (v), %eax
27778+ movl 4(v), %edx
27779+ addl $1, %eax
27780+ adcl $0, %edx
27781 movl %eax, (v)
27782 movl %edx, 4(v)
27783 RET_ENDP
27784@@ -123,6 +232,20 @@ RET_ENDP
27785 BEGIN(dec)
27786 subl $1, (v)
27787 sbbl $0, 4(v)
27788+
27789+#ifdef CONFIG_PAX_REFCOUNT
27790+ jno 0f
27791+ addl $1, (v)
27792+ adcl $0, 4(v)
27793+ int $4
27794+0:
27795+ _ASM_EXTABLE(0b, 0b)
27796+#endif
27797+
27798+RET_ENDP
27799+BEGIN(dec_unchecked)
27800+ subl $1, (v)
27801+ sbbl $0, 4(v)
27802 RET_ENDP
27803 #undef v
27804
27805@@ -132,6 +255,26 @@ BEGIN(dec_return)
27806 movl 4(v), %edx
27807 subl $1, %eax
27808 sbbl $0, %edx
27809+
27810+#ifdef CONFIG_PAX_REFCOUNT
27811+ into
27812+1234:
27813+ _ASM_EXTABLE(1234b, 2f)
27814+#endif
27815+
27816+ movl %eax, (v)
27817+ movl %edx, 4(v)
27818+
27819+#ifdef CONFIG_PAX_REFCOUNT
27820+2:
27821+#endif
27822+
27823+RET_ENDP
27824+BEGIN(dec_return_unchecked)
27825+ movl (v), %eax
27826+ movl 4(v), %edx
27827+ subl $1, %eax
27828+ sbbl $0, %edx
27829 movl %eax, (v)
27830 movl %edx, 4(v)
27831 RET_ENDP
27832@@ -143,6 +286,13 @@ BEGIN(add_unless)
27833 adcl %edx, %edi
27834 addl (v), %eax
27835 adcl 4(v), %edx
27836+
27837+#ifdef CONFIG_PAX_REFCOUNT
27838+ into
27839+1234:
27840+ _ASM_EXTABLE(1234b, 2f)
27841+#endif
27842+
27843 cmpl %eax, %ecx
27844 je 3f
27845 1:
27846@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
27847 1:
27848 addl $1, %eax
27849 adcl $0, %edx
27850+
27851+#ifdef CONFIG_PAX_REFCOUNT
27852+ into
27853+1234:
27854+ _ASM_EXTABLE(1234b, 2f)
27855+#endif
27856+
27857 movl %eax, (v)
27858 movl %edx, 4(v)
27859 movl $1, %eax
27860@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
27861 movl 4(v), %edx
27862 subl $1, %eax
27863 sbbl $0, %edx
27864+
27865+#ifdef CONFIG_PAX_REFCOUNT
27866+ into
27867+1234:
27868+ _ASM_EXTABLE(1234b, 1f)
27869+#endif
27870+
27871 js 1f
27872 movl %eax, (v)
27873 movl %edx, 4(v)
27874diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
27875index f5cc9eb..51fa319 100644
27876--- a/arch/x86/lib/atomic64_cx8_32.S
27877+++ b/arch/x86/lib/atomic64_cx8_32.S
27878@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
27879 CFI_STARTPROC
27880
27881 read64 %ecx
27882+ pax_force_retaddr
27883 ret
27884 CFI_ENDPROC
27885 ENDPROC(atomic64_read_cx8)
27886
27887+ENTRY(atomic64_read_unchecked_cx8)
27888+ CFI_STARTPROC
27889+
27890+ read64 %ecx
27891+ pax_force_retaddr
27892+ ret
27893+ CFI_ENDPROC
27894+ENDPROC(atomic64_read_unchecked_cx8)
27895+
27896 ENTRY(atomic64_set_cx8)
27897 CFI_STARTPROC
27898
27899@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
27900 cmpxchg8b (%esi)
27901 jne 1b
27902
27903+ pax_force_retaddr
27904 ret
27905 CFI_ENDPROC
27906 ENDPROC(atomic64_set_cx8)
27907
27908+ENTRY(atomic64_set_unchecked_cx8)
27909+ CFI_STARTPROC
27910+
27911+1:
27912+/* we don't need LOCK_PREFIX since aligned 64-bit writes
27913+ * are atomic on 586 and newer */
27914+ cmpxchg8b (%esi)
27915+ jne 1b
27916+
27917+ pax_force_retaddr
27918+ ret
27919+ CFI_ENDPROC
27920+ENDPROC(atomic64_set_unchecked_cx8)
27921+
27922 ENTRY(atomic64_xchg_cx8)
27923 CFI_STARTPROC
27924
27925@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
27926 cmpxchg8b (%esi)
27927 jne 1b
27928
27929+ pax_force_retaddr
27930 ret
27931 CFI_ENDPROC
27932 ENDPROC(atomic64_xchg_cx8)
27933
27934-.macro addsub_return func ins insc
27935-ENTRY(atomic64_\func\()_return_cx8)
27936+.macro addsub_return func ins insc unchecked=""
27937+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27938 CFI_STARTPROC
27939 SAVE ebp
27940 SAVE ebx
27941@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
27942 movl %edx, %ecx
27943 \ins\()l %esi, %ebx
27944 \insc\()l %edi, %ecx
27945+
27946+.ifb \unchecked
27947+#ifdef CONFIG_PAX_REFCOUNT
27948+ into
27949+2:
27950+ _ASM_EXTABLE(2b, 3f)
27951+#endif
27952+.endif
27953+
27954 LOCK_PREFIX
27955 cmpxchg8b (%ebp)
27956 jne 1b
27957-
27958-10:
27959 movl %ebx, %eax
27960 movl %ecx, %edx
27961+
27962+.ifb \unchecked
27963+#ifdef CONFIG_PAX_REFCOUNT
27964+3:
27965+#endif
27966+.endif
27967+
27968 RESTORE edi
27969 RESTORE esi
27970 RESTORE ebx
27971 RESTORE ebp
27972+ pax_force_retaddr
27973 ret
27974 CFI_ENDPROC
27975-ENDPROC(atomic64_\func\()_return_cx8)
27976+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27977 .endm
27978
27979 addsub_return add add adc
27980 addsub_return sub sub sbb
27981+addsub_return add add adc _unchecked
27982+addsub_return sub sub sbb _unchecked
27983
27984-.macro incdec_return func ins insc
27985-ENTRY(atomic64_\func\()_return_cx8)
27986+.macro incdec_return func ins insc unchecked=""
27987+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27988 CFI_STARTPROC
27989 SAVE ebx
27990
27991@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
27992 movl %edx, %ecx
27993 \ins\()l $1, %ebx
27994 \insc\()l $0, %ecx
27995+
27996+.ifb \unchecked
27997+#ifdef CONFIG_PAX_REFCOUNT
27998+ into
27999+2:
28000+ _ASM_EXTABLE(2b, 3f)
28001+#endif
28002+.endif
28003+
28004 LOCK_PREFIX
28005 cmpxchg8b (%esi)
28006 jne 1b
28007
28008-10:
28009 movl %ebx, %eax
28010 movl %ecx, %edx
28011+
28012+.ifb \unchecked
28013+#ifdef CONFIG_PAX_REFCOUNT
28014+3:
28015+#endif
28016+.endif
28017+
28018 RESTORE ebx
28019+ pax_force_retaddr
28020 ret
28021 CFI_ENDPROC
28022-ENDPROC(atomic64_\func\()_return_cx8)
28023+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28024 .endm
28025
28026 incdec_return inc add adc
28027 incdec_return dec sub sbb
28028+incdec_return inc add adc _unchecked
28029+incdec_return dec sub sbb _unchecked
28030
28031 ENTRY(atomic64_dec_if_positive_cx8)
28032 CFI_STARTPROC
28033@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
28034 movl %edx, %ecx
28035 subl $1, %ebx
28036 sbb $0, %ecx
28037+
28038+#ifdef CONFIG_PAX_REFCOUNT
28039+ into
28040+1234:
28041+ _ASM_EXTABLE(1234b, 2f)
28042+#endif
28043+
28044 js 2f
28045 LOCK_PREFIX
28046 cmpxchg8b (%esi)
28047@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
28048 movl %ebx, %eax
28049 movl %ecx, %edx
28050 RESTORE ebx
28051+ pax_force_retaddr
28052 ret
28053 CFI_ENDPROC
28054 ENDPROC(atomic64_dec_if_positive_cx8)
28055@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
28056 movl %edx, %ecx
28057 addl %ebp, %ebx
28058 adcl %edi, %ecx
28059+
28060+#ifdef CONFIG_PAX_REFCOUNT
28061+ into
28062+1234:
28063+ _ASM_EXTABLE(1234b, 3f)
28064+#endif
28065+
28066 LOCK_PREFIX
28067 cmpxchg8b (%esi)
28068 jne 1b
28069@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
28070 CFI_ADJUST_CFA_OFFSET -8
28071 RESTORE ebx
28072 RESTORE ebp
28073+ pax_force_retaddr
28074 ret
28075 4:
28076 cmpl %edx, 4(%esp)
28077@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
28078 xorl %ecx, %ecx
28079 addl $1, %ebx
28080 adcl %edx, %ecx
28081+
28082+#ifdef CONFIG_PAX_REFCOUNT
28083+ into
28084+1234:
28085+ _ASM_EXTABLE(1234b, 3f)
28086+#endif
28087+
28088 LOCK_PREFIX
28089 cmpxchg8b (%esi)
28090 jne 1b
28091@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
28092 movl $1, %eax
28093 3:
28094 RESTORE ebx
28095+ pax_force_retaddr
28096 ret
28097 CFI_ENDPROC
28098 ENDPROC(atomic64_inc_not_zero_cx8)
28099diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
28100index e78b8ee..7e173a8 100644
28101--- a/arch/x86/lib/checksum_32.S
28102+++ b/arch/x86/lib/checksum_32.S
28103@@ -29,7 +29,8 @@
28104 #include <asm/dwarf2.h>
28105 #include <asm/errno.h>
28106 #include <asm/asm.h>
28107-
28108+#include <asm/segment.h>
28109+
28110 /*
28111 * computes a partial checksum, e.g. for TCP/UDP fragments
28112 */
28113@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
28114
28115 #define ARGBASE 16
28116 #define FP 12
28117-
28118-ENTRY(csum_partial_copy_generic)
28119+
28120+ENTRY(csum_partial_copy_generic_to_user)
28121 CFI_STARTPROC
28122+
28123+#ifdef CONFIG_PAX_MEMORY_UDEREF
28124+ pushl_cfi %gs
28125+ popl_cfi %es
28126+ jmp csum_partial_copy_generic
28127+#endif
28128+
28129+ENTRY(csum_partial_copy_generic_from_user)
28130+
28131+#ifdef CONFIG_PAX_MEMORY_UDEREF
28132+ pushl_cfi %gs
28133+ popl_cfi %ds
28134+#endif
28135+
28136+ENTRY(csum_partial_copy_generic)
28137 subl $4,%esp
28138 CFI_ADJUST_CFA_OFFSET 4
28139 pushl_cfi %edi
28140@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
28141 jmp 4f
28142 SRC(1: movw (%esi), %bx )
28143 addl $2, %esi
28144-DST( movw %bx, (%edi) )
28145+DST( movw %bx, %es:(%edi) )
28146 addl $2, %edi
28147 addw %bx, %ax
28148 adcl $0, %eax
28149@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
28150 SRC(1: movl (%esi), %ebx )
28151 SRC( movl 4(%esi), %edx )
28152 adcl %ebx, %eax
28153-DST( movl %ebx, (%edi) )
28154+DST( movl %ebx, %es:(%edi) )
28155 adcl %edx, %eax
28156-DST( movl %edx, 4(%edi) )
28157+DST( movl %edx, %es:4(%edi) )
28158
28159 SRC( movl 8(%esi), %ebx )
28160 SRC( movl 12(%esi), %edx )
28161 adcl %ebx, %eax
28162-DST( movl %ebx, 8(%edi) )
28163+DST( movl %ebx, %es:8(%edi) )
28164 adcl %edx, %eax
28165-DST( movl %edx, 12(%edi) )
28166+DST( movl %edx, %es:12(%edi) )
28167
28168 SRC( movl 16(%esi), %ebx )
28169 SRC( movl 20(%esi), %edx )
28170 adcl %ebx, %eax
28171-DST( movl %ebx, 16(%edi) )
28172+DST( movl %ebx, %es:16(%edi) )
28173 adcl %edx, %eax
28174-DST( movl %edx, 20(%edi) )
28175+DST( movl %edx, %es:20(%edi) )
28176
28177 SRC( movl 24(%esi), %ebx )
28178 SRC( movl 28(%esi), %edx )
28179 adcl %ebx, %eax
28180-DST( movl %ebx, 24(%edi) )
28181+DST( movl %ebx, %es:24(%edi) )
28182 adcl %edx, %eax
28183-DST( movl %edx, 28(%edi) )
28184+DST( movl %edx, %es:28(%edi) )
28185
28186 lea 32(%esi), %esi
28187 lea 32(%edi), %edi
28188@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
28189 shrl $2, %edx # This clears CF
28190 SRC(3: movl (%esi), %ebx )
28191 adcl %ebx, %eax
28192-DST( movl %ebx, (%edi) )
28193+DST( movl %ebx, %es:(%edi) )
28194 lea 4(%esi), %esi
28195 lea 4(%edi), %edi
28196 dec %edx
28197@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
28198 jb 5f
28199 SRC( movw (%esi), %cx )
28200 leal 2(%esi), %esi
28201-DST( movw %cx, (%edi) )
28202+DST( movw %cx, %es:(%edi) )
28203 leal 2(%edi), %edi
28204 je 6f
28205 shll $16,%ecx
28206 SRC(5: movb (%esi), %cl )
28207-DST( movb %cl, (%edi) )
28208+DST( movb %cl, %es:(%edi) )
28209 6: addl %ecx, %eax
28210 adcl $0, %eax
28211 7:
28212@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
28213
28214 6001:
28215 movl ARGBASE+20(%esp), %ebx # src_err_ptr
28216- movl $-EFAULT, (%ebx)
28217+ movl $-EFAULT, %ss:(%ebx)
28218
28219 # zero the complete destination - computing the rest
28220 # is too much work
28221@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
28222
28223 6002:
28224 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28225- movl $-EFAULT,(%ebx)
28226+ movl $-EFAULT,%ss:(%ebx)
28227 jmp 5000b
28228
28229 .previous
28230
28231+ pushl_cfi %ss
28232+ popl_cfi %ds
28233+ pushl_cfi %ss
28234+ popl_cfi %es
28235 popl_cfi %ebx
28236 CFI_RESTORE ebx
28237 popl_cfi %esi
28238@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
28239 popl_cfi %ecx # equivalent to addl $4,%esp
28240 ret
28241 CFI_ENDPROC
28242-ENDPROC(csum_partial_copy_generic)
28243+ENDPROC(csum_partial_copy_generic_to_user)
28244
28245 #else
28246
28247 /* Version for PentiumII/PPro */
28248
28249 #define ROUND1(x) \
28250+ nop; nop; nop; \
28251 SRC(movl x(%esi), %ebx ) ; \
28252 addl %ebx, %eax ; \
28253- DST(movl %ebx, x(%edi) ) ;
28254+ DST(movl %ebx, %es:x(%edi)) ;
28255
28256 #define ROUND(x) \
28257+ nop; nop; nop; \
28258 SRC(movl x(%esi), %ebx ) ; \
28259 adcl %ebx, %eax ; \
28260- DST(movl %ebx, x(%edi) ) ;
28261+ DST(movl %ebx, %es:x(%edi)) ;
28262
28263 #define ARGBASE 12
28264-
28265-ENTRY(csum_partial_copy_generic)
28266+
28267+ENTRY(csum_partial_copy_generic_to_user)
28268 CFI_STARTPROC
28269+
28270+#ifdef CONFIG_PAX_MEMORY_UDEREF
28271+ pushl_cfi %gs
28272+ popl_cfi %es
28273+ jmp csum_partial_copy_generic
28274+#endif
28275+
28276+ENTRY(csum_partial_copy_generic_from_user)
28277+
28278+#ifdef CONFIG_PAX_MEMORY_UDEREF
28279+ pushl_cfi %gs
28280+ popl_cfi %ds
28281+#endif
28282+
28283+ENTRY(csum_partial_copy_generic)
28284 pushl_cfi %ebx
28285 CFI_REL_OFFSET ebx, 0
28286 pushl_cfi %edi
28287@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
28288 subl %ebx, %edi
28289 lea -1(%esi),%edx
28290 andl $-32,%edx
28291- lea 3f(%ebx,%ebx), %ebx
28292+ lea 3f(%ebx,%ebx,2), %ebx
28293 testl %esi, %esi
28294 jmp *%ebx
28295 1: addl $64,%esi
28296@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
28297 jb 5f
28298 SRC( movw (%esi), %dx )
28299 leal 2(%esi), %esi
28300-DST( movw %dx, (%edi) )
28301+DST( movw %dx, %es:(%edi) )
28302 leal 2(%edi), %edi
28303 je 6f
28304 shll $16,%edx
28305 5:
28306 SRC( movb (%esi), %dl )
28307-DST( movb %dl, (%edi) )
28308+DST( movb %dl, %es:(%edi) )
28309 6: addl %edx, %eax
28310 adcl $0, %eax
28311 7:
28312 .section .fixup, "ax"
28313 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
28314- movl $-EFAULT, (%ebx)
28315+ movl $-EFAULT, %ss:(%ebx)
28316 # zero the complete destination (computing the rest is too much work)
28317 movl ARGBASE+8(%esp),%edi # dst
28318 movl ARGBASE+12(%esp),%ecx # len
28319@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
28320 rep; stosb
28321 jmp 7b
28322 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28323- movl $-EFAULT, (%ebx)
28324+ movl $-EFAULT, %ss:(%ebx)
28325 jmp 7b
28326 .previous
28327
28328+#ifdef CONFIG_PAX_MEMORY_UDEREF
28329+ pushl_cfi %ss
28330+ popl_cfi %ds
28331+ pushl_cfi %ss
28332+ popl_cfi %es
28333+#endif
28334+
28335 popl_cfi %esi
28336 CFI_RESTORE esi
28337 popl_cfi %edi
28338@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
28339 CFI_RESTORE ebx
28340 ret
28341 CFI_ENDPROC
28342-ENDPROC(csum_partial_copy_generic)
28343+ENDPROC(csum_partial_copy_generic_to_user)
28344
28345 #undef ROUND
28346 #undef ROUND1
28347diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
28348index f2145cf..cea889d 100644
28349--- a/arch/x86/lib/clear_page_64.S
28350+++ b/arch/x86/lib/clear_page_64.S
28351@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
28352 movl $4096/8,%ecx
28353 xorl %eax,%eax
28354 rep stosq
28355+ pax_force_retaddr
28356 ret
28357 CFI_ENDPROC
28358 ENDPROC(clear_page_c)
28359@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
28360 movl $4096,%ecx
28361 xorl %eax,%eax
28362 rep stosb
28363+ pax_force_retaddr
28364 ret
28365 CFI_ENDPROC
28366 ENDPROC(clear_page_c_e)
28367@@ -43,6 +45,7 @@ ENTRY(clear_page)
28368 leaq 64(%rdi),%rdi
28369 jnz .Lloop
28370 nop
28371+ pax_force_retaddr
28372 ret
28373 CFI_ENDPROC
28374 .Lclear_page_end:
28375@@ -58,7 +61,7 @@ ENDPROC(clear_page)
28376
28377 #include <asm/cpufeature.h>
28378
28379- .section .altinstr_replacement,"ax"
28380+ .section .altinstr_replacement,"a"
28381 1: .byte 0xeb /* jmp <disp8> */
28382 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
28383 2: .byte 0xeb /* jmp <disp8> */
28384diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
28385index 1e572c5..2a162cd 100644
28386--- a/arch/x86/lib/cmpxchg16b_emu.S
28387+++ b/arch/x86/lib/cmpxchg16b_emu.S
28388@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
28389
28390 popf
28391 mov $1, %al
28392+ pax_force_retaddr
28393 ret
28394
28395 not_same:
28396 popf
28397 xor %al,%al
28398+ pax_force_retaddr
28399 ret
28400
28401 CFI_ENDPROC
28402diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
28403index 176cca6..e0d658e 100644
28404--- a/arch/x86/lib/copy_page_64.S
28405+++ b/arch/x86/lib/copy_page_64.S
28406@@ -9,6 +9,7 @@ copy_page_rep:
28407 CFI_STARTPROC
28408 movl $4096/8, %ecx
28409 rep movsq
28410+ pax_force_retaddr
28411 ret
28412 CFI_ENDPROC
28413 ENDPROC(copy_page_rep)
28414@@ -24,8 +25,8 @@ ENTRY(copy_page)
28415 CFI_ADJUST_CFA_OFFSET 2*8
28416 movq %rbx, (%rsp)
28417 CFI_REL_OFFSET rbx, 0
28418- movq %r12, 1*8(%rsp)
28419- CFI_REL_OFFSET r12, 1*8
28420+ movq %r13, 1*8(%rsp)
28421+ CFI_REL_OFFSET r13, 1*8
28422
28423 movl $(4096/64)-5, %ecx
28424 .p2align 4
28425@@ -38,7 +39,7 @@ ENTRY(copy_page)
28426 movq 0x8*4(%rsi), %r9
28427 movq 0x8*5(%rsi), %r10
28428 movq 0x8*6(%rsi), %r11
28429- movq 0x8*7(%rsi), %r12
28430+ movq 0x8*7(%rsi), %r13
28431
28432 prefetcht0 5*64(%rsi)
28433
28434@@ -49,7 +50,7 @@ ENTRY(copy_page)
28435 movq %r9, 0x8*4(%rdi)
28436 movq %r10, 0x8*5(%rdi)
28437 movq %r11, 0x8*6(%rdi)
28438- movq %r12, 0x8*7(%rdi)
28439+ movq %r13, 0x8*7(%rdi)
28440
28441 leaq 64 (%rsi), %rsi
28442 leaq 64 (%rdi), %rdi
28443@@ -68,7 +69,7 @@ ENTRY(copy_page)
28444 movq 0x8*4(%rsi), %r9
28445 movq 0x8*5(%rsi), %r10
28446 movq 0x8*6(%rsi), %r11
28447- movq 0x8*7(%rsi), %r12
28448+ movq 0x8*7(%rsi), %r13
28449
28450 movq %rax, 0x8*0(%rdi)
28451 movq %rbx, 0x8*1(%rdi)
28452@@ -77,7 +78,7 @@ ENTRY(copy_page)
28453 movq %r9, 0x8*4(%rdi)
28454 movq %r10, 0x8*5(%rdi)
28455 movq %r11, 0x8*6(%rdi)
28456- movq %r12, 0x8*7(%rdi)
28457+ movq %r13, 0x8*7(%rdi)
28458
28459 leaq 64(%rdi), %rdi
28460 leaq 64(%rsi), %rsi
28461@@ -85,10 +86,11 @@ ENTRY(copy_page)
28462
28463 movq (%rsp), %rbx
28464 CFI_RESTORE rbx
28465- movq 1*8(%rsp), %r12
28466- CFI_RESTORE r12
28467+ movq 1*8(%rsp), %r13
28468+ CFI_RESTORE r13
28469 addq $2*8, %rsp
28470 CFI_ADJUST_CFA_OFFSET -2*8
28471+ pax_force_retaddr
28472 ret
28473 .Lcopy_page_end:
28474 CFI_ENDPROC
28475@@ -99,7 +101,7 @@ ENDPROC(copy_page)
28476
28477 #include <asm/cpufeature.h>
28478
28479- .section .altinstr_replacement,"ax"
28480+ .section .altinstr_replacement,"a"
28481 1: .byte 0xeb /* jmp <disp8> */
28482 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
28483 2:
28484diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
28485index a30ca15..407412b 100644
28486--- a/arch/x86/lib/copy_user_64.S
28487+++ b/arch/x86/lib/copy_user_64.S
28488@@ -18,31 +18,7 @@
28489 #include <asm/alternative-asm.h>
28490 #include <asm/asm.h>
28491 #include <asm/smap.h>
28492-
28493-/*
28494- * By placing feature2 after feature1 in altinstructions section, we logically
28495- * implement:
28496- * If CPU has feature2, jmp to alt2 is used
28497- * else if CPU has feature1, jmp to alt1 is used
28498- * else jmp to orig is used.
28499- */
28500- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
28501-0:
28502- .byte 0xe9 /* 32bit jump */
28503- .long \orig-1f /* by default jump to orig */
28504-1:
28505- .section .altinstr_replacement,"ax"
28506-2: .byte 0xe9 /* near jump with 32bit immediate */
28507- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
28508-3: .byte 0xe9 /* near jump with 32bit immediate */
28509- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
28510- .previous
28511-
28512- .section .altinstructions,"a"
28513- altinstruction_entry 0b,2b,\feature1,5,5
28514- altinstruction_entry 0b,3b,\feature2,5,5
28515- .previous
28516- .endm
28517+#include <asm/pgtable.h>
28518
28519 .macro ALIGN_DESTINATION
28520 #ifdef FIX_ALIGNMENT
28521@@ -70,52 +46,6 @@
28522 #endif
28523 .endm
28524
28525-/* Standard copy_to_user with segment limit checking */
28526-ENTRY(_copy_to_user)
28527- CFI_STARTPROC
28528- GET_THREAD_INFO(%rax)
28529- movq %rdi,%rcx
28530- addq %rdx,%rcx
28531- jc bad_to_user
28532- cmpq TI_addr_limit(%rax),%rcx
28533- ja bad_to_user
28534- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28535- copy_user_generic_unrolled,copy_user_generic_string, \
28536- copy_user_enhanced_fast_string
28537- CFI_ENDPROC
28538-ENDPROC(_copy_to_user)
28539-
28540-/* Standard copy_from_user with segment limit checking */
28541-ENTRY(_copy_from_user)
28542- CFI_STARTPROC
28543- GET_THREAD_INFO(%rax)
28544- movq %rsi,%rcx
28545- addq %rdx,%rcx
28546- jc bad_from_user
28547- cmpq TI_addr_limit(%rax),%rcx
28548- ja bad_from_user
28549- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28550- copy_user_generic_unrolled,copy_user_generic_string, \
28551- copy_user_enhanced_fast_string
28552- CFI_ENDPROC
28553-ENDPROC(_copy_from_user)
28554-
28555- .section .fixup,"ax"
28556- /* must zero dest */
28557-ENTRY(bad_from_user)
28558-bad_from_user:
28559- CFI_STARTPROC
28560- movl %edx,%ecx
28561- xorl %eax,%eax
28562- rep
28563- stosb
28564-bad_to_user:
28565- movl %edx,%eax
28566- ret
28567- CFI_ENDPROC
28568-ENDPROC(bad_from_user)
28569- .previous
28570-
28571 /*
28572 * copy_user_generic_unrolled - memory copy with exception handling.
28573 * This version is for CPUs like P4 that don't have efficient micro
28574@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
28575 */
28576 ENTRY(copy_user_generic_unrolled)
28577 CFI_STARTPROC
28578+ ASM_PAX_OPEN_USERLAND
28579 ASM_STAC
28580 cmpl $8,%edx
28581 jb 20f /* less then 8 bytes, go to byte copy loop */
28582@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
28583 jnz 21b
28584 23: xor %eax,%eax
28585 ASM_CLAC
28586+ ASM_PAX_CLOSE_USERLAND
28587+ pax_force_retaddr
28588 ret
28589
28590 .section .fixup,"ax"
28591@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
28592 */
28593 ENTRY(copy_user_generic_string)
28594 CFI_STARTPROC
28595+ ASM_PAX_OPEN_USERLAND
28596 ASM_STAC
28597 andl %edx,%edx
28598 jz 4f
28599@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
28600 movsb
28601 4: xorl %eax,%eax
28602 ASM_CLAC
28603+ ASM_PAX_CLOSE_USERLAND
28604+ pax_force_retaddr
28605 ret
28606
28607 .section .fixup,"ax"
28608@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
28609 */
28610 ENTRY(copy_user_enhanced_fast_string)
28611 CFI_STARTPROC
28612+ ASM_PAX_OPEN_USERLAND
28613 ASM_STAC
28614 andl %edx,%edx
28615 jz 2f
28616@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
28617 movsb
28618 2: xorl %eax,%eax
28619 ASM_CLAC
28620+ ASM_PAX_CLOSE_USERLAND
28621+ pax_force_retaddr
28622 ret
28623
28624 .section .fixup,"ax"
28625diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
28626index 6a4f43c..c70fb52 100644
28627--- a/arch/x86/lib/copy_user_nocache_64.S
28628+++ b/arch/x86/lib/copy_user_nocache_64.S
28629@@ -8,6 +8,7 @@
28630
28631 #include <linux/linkage.h>
28632 #include <asm/dwarf2.h>
28633+#include <asm/alternative-asm.h>
28634
28635 #define FIX_ALIGNMENT 1
28636
28637@@ -16,6 +17,7 @@
28638 #include <asm/thread_info.h>
28639 #include <asm/asm.h>
28640 #include <asm/smap.h>
28641+#include <asm/pgtable.h>
28642
28643 .macro ALIGN_DESTINATION
28644 #ifdef FIX_ALIGNMENT
28645@@ -49,6 +51,16 @@
28646 */
28647 ENTRY(__copy_user_nocache)
28648 CFI_STARTPROC
28649+
28650+#ifdef CONFIG_PAX_MEMORY_UDEREF
28651+ mov pax_user_shadow_base,%rcx
28652+ cmp %rcx,%rsi
28653+ jae 1f
28654+ add %rcx,%rsi
28655+1:
28656+#endif
28657+
28658+ ASM_PAX_OPEN_USERLAND
28659 ASM_STAC
28660 cmpl $8,%edx
28661 jb 20f /* less then 8 bytes, go to byte copy loop */
28662@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
28663 jnz 21b
28664 23: xorl %eax,%eax
28665 ASM_CLAC
28666+ ASM_PAX_CLOSE_USERLAND
28667 sfence
28668+ pax_force_retaddr
28669 ret
28670
28671 .section .fixup,"ax"
28672diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
28673index 2419d5f..fe52d0e 100644
28674--- a/arch/x86/lib/csum-copy_64.S
28675+++ b/arch/x86/lib/csum-copy_64.S
28676@@ -9,6 +9,7 @@
28677 #include <asm/dwarf2.h>
28678 #include <asm/errno.h>
28679 #include <asm/asm.h>
28680+#include <asm/alternative-asm.h>
28681
28682 /*
28683 * Checksum copy with exception handling.
28684@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
28685 CFI_ADJUST_CFA_OFFSET 7*8
28686 movq %rbx, 2*8(%rsp)
28687 CFI_REL_OFFSET rbx, 2*8
28688- movq %r12, 3*8(%rsp)
28689- CFI_REL_OFFSET r12, 3*8
28690+ movq %r15, 3*8(%rsp)
28691+ CFI_REL_OFFSET r15, 3*8
28692 movq %r14, 4*8(%rsp)
28693 CFI_REL_OFFSET r14, 4*8
28694 movq %r13, 5*8(%rsp)
28695@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
28696 movl %edx, %ecx
28697
28698 xorl %r9d, %r9d
28699- movq %rcx, %r12
28700+ movq %rcx, %r15
28701
28702- shrq $6, %r12
28703+ shrq $6, %r15
28704 jz .Lhandle_tail /* < 64 */
28705
28706 clc
28707
28708 /* main loop. clear in 64 byte blocks */
28709 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
28710- /* r11: temp3, rdx: temp4, r12 loopcnt */
28711+ /* r11: temp3, rdx: temp4, r15 loopcnt */
28712 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
28713 .p2align 4
28714 .Lloop:
28715@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
28716 adcq %r14, %rax
28717 adcq %r13, %rax
28718
28719- decl %r12d
28720+ decl %r15d
28721
28722 dest
28723 movq %rbx, (%rsi)
28724@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
28725 .Lende:
28726 movq 2*8(%rsp), %rbx
28727 CFI_RESTORE rbx
28728- movq 3*8(%rsp), %r12
28729- CFI_RESTORE r12
28730+ movq 3*8(%rsp), %r15
28731+ CFI_RESTORE r15
28732 movq 4*8(%rsp), %r14
28733 CFI_RESTORE r14
28734 movq 5*8(%rsp), %r13
28735@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
28736 CFI_RESTORE rbp
28737 addq $7*8, %rsp
28738 CFI_ADJUST_CFA_OFFSET -7*8
28739+ pax_force_retaddr
28740 ret
28741 CFI_RESTORE_STATE
28742
28743diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
28744index 7609e0e..b449b98 100644
28745--- a/arch/x86/lib/csum-wrappers_64.c
28746+++ b/arch/x86/lib/csum-wrappers_64.c
28747@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
28748 len -= 2;
28749 }
28750 }
28751+ pax_open_userland();
28752 stac();
28753- isum = csum_partial_copy_generic((__force const void *)src,
28754+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
28755 dst, len, isum, errp, NULL);
28756 clac();
28757+ pax_close_userland();
28758 if (unlikely(*errp))
28759 goto out_err;
28760
28761@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
28762 }
28763
28764 *errp = 0;
28765+ pax_open_userland();
28766 stac();
28767- ret = csum_partial_copy_generic(src, (void __force *)dst,
28768+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
28769 len, isum, NULL, errp);
28770 clac();
28771+ pax_close_userland();
28772 return ret;
28773 }
28774 EXPORT_SYMBOL(csum_partial_copy_to_user);
28775diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
28776index a451235..1daa956 100644
28777--- a/arch/x86/lib/getuser.S
28778+++ b/arch/x86/lib/getuser.S
28779@@ -33,17 +33,40 @@
28780 #include <asm/thread_info.h>
28781 #include <asm/asm.h>
28782 #include <asm/smap.h>
28783+#include <asm/segment.h>
28784+#include <asm/pgtable.h>
28785+#include <asm/alternative-asm.h>
28786+
28787+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28788+#define __copyuser_seg gs;
28789+#else
28790+#define __copyuser_seg
28791+#endif
28792
28793 .text
28794 ENTRY(__get_user_1)
28795 CFI_STARTPROC
28796+
28797+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28798 GET_THREAD_INFO(%_ASM_DX)
28799 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28800 jae bad_get_user
28801 ASM_STAC
28802-1: movzbl (%_ASM_AX),%edx
28803+
28804+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28805+ mov pax_user_shadow_base,%_ASM_DX
28806+ cmp %_ASM_DX,%_ASM_AX
28807+ jae 1234f
28808+ add %_ASM_DX,%_ASM_AX
28809+1234:
28810+#endif
28811+
28812+#endif
28813+
28814+1: __copyuser_seg movzbl (%_ASM_AX),%edx
28815 xor %eax,%eax
28816 ASM_CLAC
28817+ pax_force_retaddr
28818 ret
28819 CFI_ENDPROC
28820 ENDPROC(__get_user_1)
28821@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
28822 ENTRY(__get_user_2)
28823 CFI_STARTPROC
28824 add $1,%_ASM_AX
28825+
28826+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28827 jc bad_get_user
28828 GET_THREAD_INFO(%_ASM_DX)
28829 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28830 jae bad_get_user
28831 ASM_STAC
28832-2: movzwl -1(%_ASM_AX),%edx
28833+
28834+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28835+ mov pax_user_shadow_base,%_ASM_DX
28836+ cmp %_ASM_DX,%_ASM_AX
28837+ jae 1234f
28838+ add %_ASM_DX,%_ASM_AX
28839+1234:
28840+#endif
28841+
28842+#endif
28843+
28844+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
28845 xor %eax,%eax
28846 ASM_CLAC
28847+ pax_force_retaddr
28848 ret
28849 CFI_ENDPROC
28850 ENDPROC(__get_user_2)
28851@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
28852 ENTRY(__get_user_4)
28853 CFI_STARTPROC
28854 add $3,%_ASM_AX
28855+
28856+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28857 jc bad_get_user
28858 GET_THREAD_INFO(%_ASM_DX)
28859 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28860 jae bad_get_user
28861 ASM_STAC
28862-3: movl -3(%_ASM_AX),%edx
28863+
28864+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28865+ mov pax_user_shadow_base,%_ASM_DX
28866+ cmp %_ASM_DX,%_ASM_AX
28867+ jae 1234f
28868+ add %_ASM_DX,%_ASM_AX
28869+1234:
28870+#endif
28871+
28872+#endif
28873+
28874+3: __copyuser_seg movl -3(%_ASM_AX),%edx
28875 xor %eax,%eax
28876 ASM_CLAC
28877+ pax_force_retaddr
28878 ret
28879 CFI_ENDPROC
28880 ENDPROC(__get_user_4)
28881@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
28882 GET_THREAD_INFO(%_ASM_DX)
28883 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28884 jae bad_get_user
28885+
28886+#ifdef CONFIG_PAX_MEMORY_UDEREF
28887+ mov pax_user_shadow_base,%_ASM_DX
28888+ cmp %_ASM_DX,%_ASM_AX
28889+ jae 1234f
28890+ add %_ASM_DX,%_ASM_AX
28891+1234:
28892+#endif
28893+
28894 ASM_STAC
28895 4: movq -7(%_ASM_AX),%rdx
28896 xor %eax,%eax
28897 ASM_CLAC
28898+ pax_force_retaddr
28899 ret
28900 #else
28901 add $7,%_ASM_AX
28902@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
28903 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28904 jae bad_get_user_8
28905 ASM_STAC
28906-4: movl -7(%_ASM_AX),%edx
28907-5: movl -3(%_ASM_AX),%ecx
28908+4: __copyuser_seg movl -7(%_ASM_AX),%edx
28909+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
28910 xor %eax,%eax
28911 ASM_CLAC
28912+ pax_force_retaddr
28913 ret
28914 #endif
28915 CFI_ENDPROC
28916@@ -113,6 +175,7 @@ bad_get_user:
28917 xor %edx,%edx
28918 mov $(-EFAULT),%_ASM_AX
28919 ASM_CLAC
28920+ pax_force_retaddr
28921 ret
28922 CFI_ENDPROC
28923 END(bad_get_user)
28924@@ -124,6 +187,7 @@ bad_get_user_8:
28925 xor %ecx,%ecx
28926 mov $(-EFAULT),%_ASM_AX
28927 ASM_CLAC
28928+ pax_force_retaddr
28929 ret
28930 CFI_ENDPROC
28931 END(bad_get_user_8)
28932diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
28933index 54fcffe..7be149e 100644
28934--- a/arch/x86/lib/insn.c
28935+++ b/arch/x86/lib/insn.c
28936@@ -20,8 +20,10 @@
28937
28938 #ifdef __KERNEL__
28939 #include <linux/string.h>
28940+#include <asm/pgtable_types.h>
28941 #else
28942 #include <string.h>
28943+#define ktla_ktva(addr) addr
28944 #endif
28945 #include <asm/inat.h>
28946 #include <asm/insn.h>
28947@@ -53,8 +55,8 @@
28948 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
28949 {
28950 memset(insn, 0, sizeof(*insn));
28951- insn->kaddr = kaddr;
28952- insn->next_byte = kaddr;
28953+ insn->kaddr = ktla_ktva(kaddr);
28954+ insn->next_byte = ktla_ktva(kaddr);
28955 insn->x86_64 = x86_64 ? 1 : 0;
28956 insn->opnd_bytes = 4;
28957 if (x86_64)
28958diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
28959index 05a95e7..326f2fa 100644
28960--- a/arch/x86/lib/iomap_copy_64.S
28961+++ b/arch/x86/lib/iomap_copy_64.S
28962@@ -17,6 +17,7 @@
28963
28964 #include <linux/linkage.h>
28965 #include <asm/dwarf2.h>
28966+#include <asm/alternative-asm.h>
28967
28968 /*
28969 * override generic version in lib/iomap_copy.c
28970@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
28971 CFI_STARTPROC
28972 movl %edx,%ecx
28973 rep movsd
28974+ pax_force_retaddr
28975 ret
28976 CFI_ENDPROC
28977 ENDPROC(__iowrite32_copy)
28978diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
28979index 56313a3..0db417e 100644
28980--- a/arch/x86/lib/memcpy_64.S
28981+++ b/arch/x86/lib/memcpy_64.S
28982@@ -24,7 +24,7 @@
28983 * This gets patched over the unrolled variant (below) via the
28984 * alternative instructions framework:
28985 */
28986- .section .altinstr_replacement, "ax", @progbits
28987+ .section .altinstr_replacement, "a", @progbits
28988 .Lmemcpy_c:
28989 movq %rdi, %rax
28990 movq %rdx, %rcx
28991@@ -33,6 +33,7 @@
28992 rep movsq
28993 movl %edx, %ecx
28994 rep movsb
28995+ pax_force_retaddr
28996 ret
28997 .Lmemcpy_e:
28998 .previous
28999@@ -44,11 +45,12 @@
29000 * This gets patched over the unrolled variant (below) via the
29001 * alternative instructions framework:
29002 */
29003- .section .altinstr_replacement, "ax", @progbits
29004+ .section .altinstr_replacement, "a", @progbits
29005 .Lmemcpy_c_e:
29006 movq %rdi, %rax
29007 movq %rdx, %rcx
29008 rep movsb
29009+ pax_force_retaddr
29010 ret
29011 .Lmemcpy_e_e:
29012 .previous
29013@@ -136,6 +138,7 @@ ENTRY(memcpy)
29014 movq %r9, 1*8(%rdi)
29015 movq %r10, -2*8(%rdi, %rdx)
29016 movq %r11, -1*8(%rdi, %rdx)
29017+ pax_force_retaddr
29018 retq
29019 .p2align 4
29020 .Lless_16bytes:
29021@@ -148,6 +151,7 @@ ENTRY(memcpy)
29022 movq -1*8(%rsi, %rdx), %r9
29023 movq %r8, 0*8(%rdi)
29024 movq %r9, -1*8(%rdi, %rdx)
29025+ pax_force_retaddr
29026 retq
29027 .p2align 4
29028 .Lless_8bytes:
29029@@ -161,6 +165,7 @@ ENTRY(memcpy)
29030 movl -4(%rsi, %rdx), %r8d
29031 movl %ecx, (%rdi)
29032 movl %r8d, -4(%rdi, %rdx)
29033+ pax_force_retaddr
29034 retq
29035 .p2align 4
29036 .Lless_3bytes:
29037@@ -179,6 +184,7 @@ ENTRY(memcpy)
29038 movb %cl, (%rdi)
29039
29040 .Lend:
29041+ pax_force_retaddr
29042 retq
29043 CFI_ENDPROC
29044 ENDPROC(memcpy)
29045diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
29046index 65268a6..dd1de11 100644
29047--- a/arch/x86/lib/memmove_64.S
29048+++ b/arch/x86/lib/memmove_64.S
29049@@ -202,14 +202,16 @@ ENTRY(memmove)
29050 movb (%rsi), %r11b
29051 movb %r11b, (%rdi)
29052 13:
29053+ pax_force_retaddr
29054 retq
29055 CFI_ENDPROC
29056
29057- .section .altinstr_replacement,"ax"
29058+ .section .altinstr_replacement,"a"
29059 .Lmemmove_begin_forward_efs:
29060 /* Forward moving data. */
29061 movq %rdx, %rcx
29062 rep movsb
29063+ pax_force_retaddr
29064 retq
29065 .Lmemmove_end_forward_efs:
29066 .previous
29067diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
29068index 2dcb380..2eb79fe 100644
29069--- a/arch/x86/lib/memset_64.S
29070+++ b/arch/x86/lib/memset_64.S
29071@@ -16,7 +16,7 @@
29072 *
29073 * rax original destination
29074 */
29075- .section .altinstr_replacement, "ax", @progbits
29076+ .section .altinstr_replacement, "a", @progbits
29077 .Lmemset_c:
29078 movq %rdi,%r9
29079 movq %rdx,%rcx
29080@@ -30,6 +30,7 @@
29081 movl %edx,%ecx
29082 rep stosb
29083 movq %r9,%rax
29084+ pax_force_retaddr
29085 ret
29086 .Lmemset_e:
29087 .previous
29088@@ -45,13 +46,14 @@
29089 *
29090 * rax original destination
29091 */
29092- .section .altinstr_replacement, "ax", @progbits
29093+ .section .altinstr_replacement, "a", @progbits
29094 .Lmemset_c_e:
29095 movq %rdi,%r9
29096 movb %sil,%al
29097 movq %rdx,%rcx
29098 rep stosb
29099 movq %r9,%rax
29100+ pax_force_retaddr
29101 ret
29102 .Lmemset_e_e:
29103 .previous
29104@@ -118,6 +120,7 @@ ENTRY(__memset)
29105
29106 .Lende:
29107 movq %r10,%rax
29108+ pax_force_retaddr
29109 ret
29110
29111 CFI_RESTORE_STATE
29112diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
29113index c9f2d9b..e7fd2c0 100644
29114--- a/arch/x86/lib/mmx_32.c
29115+++ b/arch/x86/lib/mmx_32.c
29116@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29117 {
29118 void *p;
29119 int i;
29120+ unsigned long cr0;
29121
29122 if (unlikely(in_interrupt()))
29123 return __memcpy(to, from, len);
29124@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29125 kernel_fpu_begin();
29126
29127 __asm__ __volatile__ (
29128- "1: prefetch (%0)\n" /* This set is 28 bytes */
29129- " prefetch 64(%0)\n"
29130- " prefetch 128(%0)\n"
29131- " prefetch 192(%0)\n"
29132- " prefetch 256(%0)\n"
29133+ "1: prefetch (%1)\n" /* This set is 28 bytes */
29134+ " prefetch 64(%1)\n"
29135+ " prefetch 128(%1)\n"
29136+ " prefetch 192(%1)\n"
29137+ " prefetch 256(%1)\n"
29138 "2: \n"
29139 ".section .fixup, \"ax\"\n"
29140- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29141+ "3: \n"
29142+
29143+#ifdef CONFIG_PAX_KERNEXEC
29144+ " movl %%cr0, %0\n"
29145+ " movl %0, %%eax\n"
29146+ " andl $0xFFFEFFFF, %%eax\n"
29147+ " movl %%eax, %%cr0\n"
29148+#endif
29149+
29150+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29151+
29152+#ifdef CONFIG_PAX_KERNEXEC
29153+ " movl %0, %%cr0\n"
29154+#endif
29155+
29156 " jmp 2b\n"
29157 ".previous\n"
29158 _ASM_EXTABLE(1b, 3b)
29159- : : "r" (from));
29160+ : "=&r" (cr0) : "r" (from) : "ax");
29161
29162 for ( ; i > 5; i--) {
29163 __asm__ __volatile__ (
29164- "1: prefetch 320(%0)\n"
29165- "2: movq (%0), %%mm0\n"
29166- " movq 8(%0), %%mm1\n"
29167- " movq 16(%0), %%mm2\n"
29168- " movq 24(%0), %%mm3\n"
29169- " movq %%mm0, (%1)\n"
29170- " movq %%mm1, 8(%1)\n"
29171- " movq %%mm2, 16(%1)\n"
29172- " movq %%mm3, 24(%1)\n"
29173- " movq 32(%0), %%mm0\n"
29174- " movq 40(%0), %%mm1\n"
29175- " movq 48(%0), %%mm2\n"
29176- " movq 56(%0), %%mm3\n"
29177- " movq %%mm0, 32(%1)\n"
29178- " movq %%mm1, 40(%1)\n"
29179- " movq %%mm2, 48(%1)\n"
29180- " movq %%mm3, 56(%1)\n"
29181+ "1: prefetch 320(%1)\n"
29182+ "2: movq (%1), %%mm0\n"
29183+ " movq 8(%1), %%mm1\n"
29184+ " movq 16(%1), %%mm2\n"
29185+ " movq 24(%1), %%mm3\n"
29186+ " movq %%mm0, (%2)\n"
29187+ " movq %%mm1, 8(%2)\n"
29188+ " movq %%mm2, 16(%2)\n"
29189+ " movq %%mm3, 24(%2)\n"
29190+ " movq 32(%1), %%mm0\n"
29191+ " movq 40(%1), %%mm1\n"
29192+ " movq 48(%1), %%mm2\n"
29193+ " movq 56(%1), %%mm3\n"
29194+ " movq %%mm0, 32(%2)\n"
29195+ " movq %%mm1, 40(%2)\n"
29196+ " movq %%mm2, 48(%2)\n"
29197+ " movq %%mm3, 56(%2)\n"
29198 ".section .fixup, \"ax\"\n"
29199- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29200+ "3:\n"
29201+
29202+#ifdef CONFIG_PAX_KERNEXEC
29203+ " movl %%cr0, %0\n"
29204+ " movl %0, %%eax\n"
29205+ " andl $0xFFFEFFFF, %%eax\n"
29206+ " movl %%eax, %%cr0\n"
29207+#endif
29208+
29209+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29210+
29211+#ifdef CONFIG_PAX_KERNEXEC
29212+ " movl %0, %%cr0\n"
29213+#endif
29214+
29215 " jmp 2b\n"
29216 ".previous\n"
29217 _ASM_EXTABLE(1b, 3b)
29218- : : "r" (from), "r" (to) : "memory");
29219+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29220
29221 from += 64;
29222 to += 64;
29223@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
29224 static void fast_copy_page(void *to, void *from)
29225 {
29226 int i;
29227+ unsigned long cr0;
29228
29229 kernel_fpu_begin();
29230
29231@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
29232 * but that is for later. -AV
29233 */
29234 __asm__ __volatile__(
29235- "1: prefetch (%0)\n"
29236- " prefetch 64(%0)\n"
29237- " prefetch 128(%0)\n"
29238- " prefetch 192(%0)\n"
29239- " prefetch 256(%0)\n"
29240+ "1: prefetch (%1)\n"
29241+ " prefetch 64(%1)\n"
29242+ " prefetch 128(%1)\n"
29243+ " prefetch 192(%1)\n"
29244+ " prefetch 256(%1)\n"
29245 "2: \n"
29246 ".section .fixup, \"ax\"\n"
29247- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29248+ "3: \n"
29249+
29250+#ifdef CONFIG_PAX_KERNEXEC
29251+ " movl %%cr0, %0\n"
29252+ " movl %0, %%eax\n"
29253+ " andl $0xFFFEFFFF, %%eax\n"
29254+ " movl %%eax, %%cr0\n"
29255+#endif
29256+
29257+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29258+
29259+#ifdef CONFIG_PAX_KERNEXEC
29260+ " movl %0, %%cr0\n"
29261+#endif
29262+
29263 " jmp 2b\n"
29264 ".previous\n"
29265- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29266+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29267
29268 for (i = 0; i < (4096-320)/64; i++) {
29269 __asm__ __volatile__ (
29270- "1: prefetch 320(%0)\n"
29271- "2: movq (%0), %%mm0\n"
29272- " movntq %%mm0, (%1)\n"
29273- " movq 8(%0), %%mm1\n"
29274- " movntq %%mm1, 8(%1)\n"
29275- " movq 16(%0), %%mm2\n"
29276- " movntq %%mm2, 16(%1)\n"
29277- " movq 24(%0), %%mm3\n"
29278- " movntq %%mm3, 24(%1)\n"
29279- " movq 32(%0), %%mm4\n"
29280- " movntq %%mm4, 32(%1)\n"
29281- " movq 40(%0), %%mm5\n"
29282- " movntq %%mm5, 40(%1)\n"
29283- " movq 48(%0), %%mm6\n"
29284- " movntq %%mm6, 48(%1)\n"
29285- " movq 56(%0), %%mm7\n"
29286- " movntq %%mm7, 56(%1)\n"
29287+ "1: prefetch 320(%1)\n"
29288+ "2: movq (%1), %%mm0\n"
29289+ " movntq %%mm0, (%2)\n"
29290+ " movq 8(%1), %%mm1\n"
29291+ " movntq %%mm1, 8(%2)\n"
29292+ " movq 16(%1), %%mm2\n"
29293+ " movntq %%mm2, 16(%2)\n"
29294+ " movq 24(%1), %%mm3\n"
29295+ " movntq %%mm3, 24(%2)\n"
29296+ " movq 32(%1), %%mm4\n"
29297+ " movntq %%mm4, 32(%2)\n"
29298+ " movq 40(%1), %%mm5\n"
29299+ " movntq %%mm5, 40(%2)\n"
29300+ " movq 48(%1), %%mm6\n"
29301+ " movntq %%mm6, 48(%2)\n"
29302+ " movq 56(%1), %%mm7\n"
29303+ " movntq %%mm7, 56(%2)\n"
29304 ".section .fixup, \"ax\"\n"
29305- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29306+ "3:\n"
29307+
29308+#ifdef CONFIG_PAX_KERNEXEC
29309+ " movl %%cr0, %0\n"
29310+ " movl %0, %%eax\n"
29311+ " andl $0xFFFEFFFF, %%eax\n"
29312+ " movl %%eax, %%cr0\n"
29313+#endif
29314+
29315+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29316+
29317+#ifdef CONFIG_PAX_KERNEXEC
29318+ " movl %0, %%cr0\n"
29319+#endif
29320+
29321 " jmp 2b\n"
29322 ".previous\n"
29323- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
29324+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29325
29326 from += 64;
29327 to += 64;
29328@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
29329 static void fast_copy_page(void *to, void *from)
29330 {
29331 int i;
29332+ unsigned long cr0;
29333
29334 kernel_fpu_begin();
29335
29336 __asm__ __volatile__ (
29337- "1: prefetch (%0)\n"
29338- " prefetch 64(%0)\n"
29339- " prefetch 128(%0)\n"
29340- " prefetch 192(%0)\n"
29341- " prefetch 256(%0)\n"
29342+ "1: prefetch (%1)\n"
29343+ " prefetch 64(%1)\n"
29344+ " prefetch 128(%1)\n"
29345+ " prefetch 192(%1)\n"
29346+ " prefetch 256(%1)\n"
29347 "2: \n"
29348 ".section .fixup, \"ax\"\n"
29349- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29350+ "3: \n"
29351+
29352+#ifdef CONFIG_PAX_KERNEXEC
29353+ " movl %%cr0, %0\n"
29354+ " movl %0, %%eax\n"
29355+ " andl $0xFFFEFFFF, %%eax\n"
29356+ " movl %%eax, %%cr0\n"
29357+#endif
29358+
29359+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29360+
29361+#ifdef CONFIG_PAX_KERNEXEC
29362+ " movl %0, %%cr0\n"
29363+#endif
29364+
29365 " jmp 2b\n"
29366 ".previous\n"
29367- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29368+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29369
29370 for (i = 0; i < 4096/64; i++) {
29371 __asm__ __volatile__ (
29372- "1: prefetch 320(%0)\n"
29373- "2: movq (%0), %%mm0\n"
29374- " movq 8(%0), %%mm1\n"
29375- " movq 16(%0), %%mm2\n"
29376- " movq 24(%0), %%mm3\n"
29377- " movq %%mm0, (%1)\n"
29378- " movq %%mm1, 8(%1)\n"
29379- " movq %%mm2, 16(%1)\n"
29380- " movq %%mm3, 24(%1)\n"
29381- " movq 32(%0), %%mm0\n"
29382- " movq 40(%0), %%mm1\n"
29383- " movq 48(%0), %%mm2\n"
29384- " movq 56(%0), %%mm3\n"
29385- " movq %%mm0, 32(%1)\n"
29386- " movq %%mm1, 40(%1)\n"
29387- " movq %%mm2, 48(%1)\n"
29388- " movq %%mm3, 56(%1)\n"
29389+ "1: prefetch 320(%1)\n"
29390+ "2: movq (%1), %%mm0\n"
29391+ " movq 8(%1), %%mm1\n"
29392+ " movq 16(%1), %%mm2\n"
29393+ " movq 24(%1), %%mm3\n"
29394+ " movq %%mm0, (%2)\n"
29395+ " movq %%mm1, 8(%2)\n"
29396+ " movq %%mm2, 16(%2)\n"
29397+ " movq %%mm3, 24(%2)\n"
29398+ " movq 32(%1), %%mm0\n"
29399+ " movq 40(%1), %%mm1\n"
29400+ " movq 48(%1), %%mm2\n"
29401+ " movq 56(%1), %%mm3\n"
29402+ " movq %%mm0, 32(%2)\n"
29403+ " movq %%mm1, 40(%2)\n"
29404+ " movq %%mm2, 48(%2)\n"
29405+ " movq %%mm3, 56(%2)\n"
29406 ".section .fixup, \"ax\"\n"
29407- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29408+ "3:\n"
29409+
29410+#ifdef CONFIG_PAX_KERNEXEC
29411+ " movl %%cr0, %0\n"
29412+ " movl %0, %%eax\n"
29413+ " andl $0xFFFEFFFF, %%eax\n"
29414+ " movl %%eax, %%cr0\n"
29415+#endif
29416+
29417+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29418+
29419+#ifdef CONFIG_PAX_KERNEXEC
29420+ " movl %0, %%cr0\n"
29421+#endif
29422+
29423 " jmp 2b\n"
29424 ".previous\n"
29425 _ASM_EXTABLE(1b, 3b)
29426- : : "r" (from), "r" (to) : "memory");
29427+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29428
29429 from += 64;
29430 to += 64;
29431diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
29432index f6d13ee..d789440 100644
29433--- a/arch/x86/lib/msr-reg.S
29434+++ b/arch/x86/lib/msr-reg.S
29435@@ -3,6 +3,7 @@
29436 #include <asm/dwarf2.h>
29437 #include <asm/asm.h>
29438 #include <asm/msr.h>
29439+#include <asm/alternative-asm.h>
29440
29441 #ifdef CONFIG_X86_64
29442 /*
29443@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
29444 movl %edi, 28(%r10)
29445 popq_cfi %rbp
29446 popq_cfi %rbx
29447+ pax_force_retaddr
29448 ret
29449 3:
29450 CFI_RESTORE_STATE
29451diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
29452index fc6ba17..d4d989d 100644
29453--- a/arch/x86/lib/putuser.S
29454+++ b/arch/x86/lib/putuser.S
29455@@ -16,7 +16,9 @@
29456 #include <asm/errno.h>
29457 #include <asm/asm.h>
29458 #include <asm/smap.h>
29459-
29460+#include <asm/segment.h>
29461+#include <asm/pgtable.h>
29462+#include <asm/alternative-asm.h>
29463
29464 /*
29465 * __put_user_X
29466@@ -30,57 +32,125 @@
29467 * as they get called from within inline assembly.
29468 */
29469
29470-#define ENTER CFI_STARTPROC ; \
29471- GET_THREAD_INFO(%_ASM_BX)
29472-#define EXIT ASM_CLAC ; \
29473- ret ; \
29474+#define ENTER CFI_STARTPROC
29475+#define EXIT ASM_CLAC ; \
29476+ pax_force_retaddr ; \
29477+ ret ; \
29478 CFI_ENDPROC
29479
29480+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29481+#define _DEST %_ASM_CX,%_ASM_BX
29482+#else
29483+#define _DEST %_ASM_CX
29484+#endif
29485+
29486+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29487+#define __copyuser_seg gs;
29488+#else
29489+#define __copyuser_seg
29490+#endif
29491+
29492 .text
29493 ENTRY(__put_user_1)
29494 ENTER
29495+
29496+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29497+ GET_THREAD_INFO(%_ASM_BX)
29498 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
29499 jae bad_put_user
29500 ASM_STAC
29501-1: movb %al,(%_ASM_CX)
29502+
29503+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29504+ mov pax_user_shadow_base,%_ASM_BX
29505+ cmp %_ASM_BX,%_ASM_CX
29506+ jb 1234f
29507+ xor %ebx,%ebx
29508+1234:
29509+#endif
29510+
29511+#endif
29512+
29513+1: __copyuser_seg movb %al,(_DEST)
29514 xor %eax,%eax
29515 EXIT
29516 ENDPROC(__put_user_1)
29517
29518 ENTRY(__put_user_2)
29519 ENTER
29520+
29521+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29522+ GET_THREAD_INFO(%_ASM_BX)
29523 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29524 sub $1,%_ASM_BX
29525 cmp %_ASM_BX,%_ASM_CX
29526 jae bad_put_user
29527 ASM_STAC
29528-2: movw %ax,(%_ASM_CX)
29529+
29530+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29531+ mov pax_user_shadow_base,%_ASM_BX
29532+ cmp %_ASM_BX,%_ASM_CX
29533+ jb 1234f
29534+ xor %ebx,%ebx
29535+1234:
29536+#endif
29537+
29538+#endif
29539+
29540+2: __copyuser_seg movw %ax,(_DEST)
29541 xor %eax,%eax
29542 EXIT
29543 ENDPROC(__put_user_2)
29544
29545 ENTRY(__put_user_4)
29546 ENTER
29547+
29548+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29549+ GET_THREAD_INFO(%_ASM_BX)
29550 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29551 sub $3,%_ASM_BX
29552 cmp %_ASM_BX,%_ASM_CX
29553 jae bad_put_user
29554 ASM_STAC
29555-3: movl %eax,(%_ASM_CX)
29556+
29557+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29558+ mov pax_user_shadow_base,%_ASM_BX
29559+ cmp %_ASM_BX,%_ASM_CX
29560+ jb 1234f
29561+ xor %ebx,%ebx
29562+1234:
29563+#endif
29564+
29565+#endif
29566+
29567+3: __copyuser_seg movl %eax,(_DEST)
29568 xor %eax,%eax
29569 EXIT
29570 ENDPROC(__put_user_4)
29571
29572 ENTRY(__put_user_8)
29573 ENTER
29574+
29575+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29576+ GET_THREAD_INFO(%_ASM_BX)
29577 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29578 sub $7,%_ASM_BX
29579 cmp %_ASM_BX,%_ASM_CX
29580 jae bad_put_user
29581 ASM_STAC
29582-4: mov %_ASM_AX,(%_ASM_CX)
29583+
29584+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29585+ mov pax_user_shadow_base,%_ASM_BX
29586+ cmp %_ASM_BX,%_ASM_CX
29587+ jb 1234f
29588+ xor %ebx,%ebx
29589+1234:
29590+#endif
29591+
29592+#endif
29593+
29594+4: __copyuser_seg mov %_ASM_AX,(_DEST)
29595 #ifdef CONFIG_X86_32
29596-5: movl %edx,4(%_ASM_CX)
29597+5: __copyuser_seg movl %edx,4(_DEST)
29598 #endif
29599 xor %eax,%eax
29600 EXIT
29601diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
29602index 1cad221..de671ee 100644
29603--- a/arch/x86/lib/rwlock.S
29604+++ b/arch/x86/lib/rwlock.S
29605@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29606 FRAME
29607 0: LOCK_PREFIX
29608 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29609+
29610+#ifdef CONFIG_PAX_REFCOUNT
29611+ jno 1234f
29612+ LOCK_PREFIX
29613+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29614+ int $4
29615+1234:
29616+ _ASM_EXTABLE(1234b, 1234b)
29617+#endif
29618+
29619 1: rep; nop
29620 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29621 jne 1b
29622 LOCK_PREFIX
29623 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29624+
29625+#ifdef CONFIG_PAX_REFCOUNT
29626+ jno 1234f
29627+ LOCK_PREFIX
29628+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29629+ int $4
29630+1234:
29631+ _ASM_EXTABLE(1234b, 1234b)
29632+#endif
29633+
29634 jnz 0b
29635 ENDFRAME
29636+ pax_force_retaddr
29637 ret
29638 CFI_ENDPROC
29639 END(__write_lock_failed)
29640@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29641 FRAME
29642 0: LOCK_PREFIX
29643 READ_LOCK_SIZE(inc) (%__lock_ptr)
29644+
29645+#ifdef CONFIG_PAX_REFCOUNT
29646+ jno 1234f
29647+ LOCK_PREFIX
29648+ READ_LOCK_SIZE(dec) (%__lock_ptr)
29649+ int $4
29650+1234:
29651+ _ASM_EXTABLE(1234b, 1234b)
29652+#endif
29653+
29654 1: rep; nop
29655 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29656 js 1b
29657 LOCK_PREFIX
29658 READ_LOCK_SIZE(dec) (%__lock_ptr)
29659+
29660+#ifdef CONFIG_PAX_REFCOUNT
29661+ jno 1234f
29662+ LOCK_PREFIX
29663+ READ_LOCK_SIZE(inc) (%__lock_ptr)
29664+ int $4
29665+1234:
29666+ _ASM_EXTABLE(1234b, 1234b)
29667+#endif
29668+
29669 js 0b
29670 ENDFRAME
29671+ pax_force_retaddr
29672 ret
29673 CFI_ENDPROC
29674 END(__read_lock_failed)
29675diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29676index 5dff5f0..cadebf4 100644
29677--- a/arch/x86/lib/rwsem.S
29678+++ b/arch/x86/lib/rwsem.S
29679@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29680 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29681 CFI_RESTORE __ASM_REG(dx)
29682 restore_common_regs
29683+ pax_force_retaddr
29684 ret
29685 CFI_ENDPROC
29686 ENDPROC(call_rwsem_down_read_failed)
29687@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29688 movq %rax,%rdi
29689 call rwsem_down_write_failed
29690 restore_common_regs
29691+ pax_force_retaddr
29692 ret
29693 CFI_ENDPROC
29694 ENDPROC(call_rwsem_down_write_failed)
29695@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29696 movq %rax,%rdi
29697 call rwsem_wake
29698 restore_common_regs
29699-1: ret
29700+1: pax_force_retaddr
29701+ ret
29702 CFI_ENDPROC
29703 ENDPROC(call_rwsem_wake)
29704
29705@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29706 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29707 CFI_RESTORE __ASM_REG(dx)
29708 restore_common_regs
29709+ pax_force_retaddr
29710 ret
29711 CFI_ENDPROC
29712 ENDPROC(call_rwsem_downgrade_wake)
29713diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29714index a63efd6..8149fbe 100644
29715--- a/arch/x86/lib/thunk_64.S
29716+++ b/arch/x86/lib/thunk_64.S
29717@@ -8,6 +8,7 @@
29718 #include <linux/linkage.h>
29719 #include <asm/dwarf2.h>
29720 #include <asm/calling.h>
29721+#include <asm/alternative-asm.h>
29722
29723 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29724 .macro THUNK name, func, put_ret_addr_in_rdi=0
29725@@ -15,11 +16,11 @@
29726 \name:
29727 CFI_STARTPROC
29728
29729- /* this one pushes 9 elems, the next one would be %rIP */
29730- SAVE_ARGS
29731+ /* this one pushes 15+1 elems, the next one would be %rIP */
29732+ SAVE_ARGS 8
29733
29734 .if \put_ret_addr_in_rdi
29735- movq_cfi_restore 9*8, rdi
29736+ movq_cfi_restore RIP, rdi
29737 .endif
29738
29739 call \func
29740@@ -38,8 +39,9 @@
29741
29742 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
29743 CFI_STARTPROC
29744- SAVE_ARGS
29745+ SAVE_ARGS 8
29746 restore:
29747- RESTORE_ARGS
29748+ RESTORE_ARGS 1,8
29749+ pax_force_retaddr
29750 ret
29751 CFI_ENDPROC
29752diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29753index 3eb18ac..4b22130 100644
29754--- a/arch/x86/lib/usercopy_32.c
29755+++ b/arch/x86/lib/usercopy_32.c
29756@@ -42,11 +42,13 @@ do { \
29757 int __d0; \
29758 might_fault(); \
29759 __asm__ __volatile__( \
29760+ __COPYUSER_SET_ES \
29761 ASM_STAC "\n" \
29762 "0: rep; stosl\n" \
29763 " movl %2,%0\n" \
29764 "1: rep; stosb\n" \
29765 "2: " ASM_CLAC "\n" \
29766+ __COPYUSER_RESTORE_ES \
29767 ".section .fixup,\"ax\"\n" \
29768 "3: lea 0(%2,%0,4),%0\n" \
29769 " jmp 2b\n" \
29770@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29771
29772 #ifdef CONFIG_X86_INTEL_USERCOPY
29773 static unsigned long
29774-__copy_user_intel(void __user *to, const void *from, unsigned long size)
29775+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
29776 {
29777 int d0, d1;
29778 __asm__ __volatile__(
29779@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29780 " .align 2,0x90\n"
29781 "3: movl 0(%4), %%eax\n"
29782 "4: movl 4(%4), %%edx\n"
29783- "5: movl %%eax, 0(%3)\n"
29784- "6: movl %%edx, 4(%3)\n"
29785+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
29786+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
29787 "7: movl 8(%4), %%eax\n"
29788 "8: movl 12(%4),%%edx\n"
29789- "9: movl %%eax, 8(%3)\n"
29790- "10: movl %%edx, 12(%3)\n"
29791+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
29792+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
29793 "11: movl 16(%4), %%eax\n"
29794 "12: movl 20(%4), %%edx\n"
29795- "13: movl %%eax, 16(%3)\n"
29796- "14: movl %%edx, 20(%3)\n"
29797+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
29798+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
29799 "15: movl 24(%4), %%eax\n"
29800 "16: movl 28(%4), %%edx\n"
29801- "17: movl %%eax, 24(%3)\n"
29802- "18: movl %%edx, 28(%3)\n"
29803+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
29804+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
29805 "19: movl 32(%4), %%eax\n"
29806 "20: movl 36(%4), %%edx\n"
29807- "21: movl %%eax, 32(%3)\n"
29808- "22: movl %%edx, 36(%3)\n"
29809+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
29810+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
29811 "23: movl 40(%4), %%eax\n"
29812 "24: movl 44(%4), %%edx\n"
29813- "25: movl %%eax, 40(%3)\n"
29814- "26: movl %%edx, 44(%3)\n"
29815+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
29816+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
29817 "27: movl 48(%4), %%eax\n"
29818 "28: movl 52(%4), %%edx\n"
29819- "29: movl %%eax, 48(%3)\n"
29820- "30: movl %%edx, 52(%3)\n"
29821+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
29822+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
29823 "31: movl 56(%4), %%eax\n"
29824 "32: movl 60(%4), %%edx\n"
29825- "33: movl %%eax, 56(%3)\n"
29826- "34: movl %%edx, 60(%3)\n"
29827+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
29828+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
29829 " addl $-64, %0\n"
29830 " addl $64, %4\n"
29831 " addl $64, %3\n"
29832@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29833 " shrl $2, %0\n"
29834 " andl $3, %%eax\n"
29835 " cld\n"
29836+ __COPYUSER_SET_ES
29837 "99: rep; movsl\n"
29838 "36: movl %%eax, %0\n"
29839 "37: rep; movsb\n"
29840 "100:\n"
29841+ __COPYUSER_RESTORE_ES
29842+ ".section .fixup,\"ax\"\n"
29843+ "101: lea 0(%%eax,%0,4),%0\n"
29844+ " jmp 100b\n"
29845+ ".previous\n"
29846+ _ASM_EXTABLE(1b,100b)
29847+ _ASM_EXTABLE(2b,100b)
29848+ _ASM_EXTABLE(3b,100b)
29849+ _ASM_EXTABLE(4b,100b)
29850+ _ASM_EXTABLE(5b,100b)
29851+ _ASM_EXTABLE(6b,100b)
29852+ _ASM_EXTABLE(7b,100b)
29853+ _ASM_EXTABLE(8b,100b)
29854+ _ASM_EXTABLE(9b,100b)
29855+ _ASM_EXTABLE(10b,100b)
29856+ _ASM_EXTABLE(11b,100b)
29857+ _ASM_EXTABLE(12b,100b)
29858+ _ASM_EXTABLE(13b,100b)
29859+ _ASM_EXTABLE(14b,100b)
29860+ _ASM_EXTABLE(15b,100b)
29861+ _ASM_EXTABLE(16b,100b)
29862+ _ASM_EXTABLE(17b,100b)
29863+ _ASM_EXTABLE(18b,100b)
29864+ _ASM_EXTABLE(19b,100b)
29865+ _ASM_EXTABLE(20b,100b)
29866+ _ASM_EXTABLE(21b,100b)
29867+ _ASM_EXTABLE(22b,100b)
29868+ _ASM_EXTABLE(23b,100b)
29869+ _ASM_EXTABLE(24b,100b)
29870+ _ASM_EXTABLE(25b,100b)
29871+ _ASM_EXTABLE(26b,100b)
29872+ _ASM_EXTABLE(27b,100b)
29873+ _ASM_EXTABLE(28b,100b)
29874+ _ASM_EXTABLE(29b,100b)
29875+ _ASM_EXTABLE(30b,100b)
29876+ _ASM_EXTABLE(31b,100b)
29877+ _ASM_EXTABLE(32b,100b)
29878+ _ASM_EXTABLE(33b,100b)
29879+ _ASM_EXTABLE(34b,100b)
29880+ _ASM_EXTABLE(35b,100b)
29881+ _ASM_EXTABLE(36b,100b)
29882+ _ASM_EXTABLE(37b,100b)
29883+ _ASM_EXTABLE(99b,101b)
29884+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
29885+ : "1"(to), "2"(from), "0"(size)
29886+ : "eax", "edx", "memory");
29887+ return size;
29888+}
29889+
29890+static unsigned long
29891+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
29892+{
29893+ int d0, d1;
29894+ __asm__ __volatile__(
29895+ " .align 2,0x90\n"
29896+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
29897+ " cmpl $67, %0\n"
29898+ " jbe 3f\n"
29899+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
29900+ " .align 2,0x90\n"
29901+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
29902+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
29903+ "5: movl %%eax, 0(%3)\n"
29904+ "6: movl %%edx, 4(%3)\n"
29905+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
29906+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
29907+ "9: movl %%eax, 8(%3)\n"
29908+ "10: movl %%edx, 12(%3)\n"
29909+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
29910+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
29911+ "13: movl %%eax, 16(%3)\n"
29912+ "14: movl %%edx, 20(%3)\n"
29913+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
29914+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
29915+ "17: movl %%eax, 24(%3)\n"
29916+ "18: movl %%edx, 28(%3)\n"
29917+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
29918+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
29919+ "21: movl %%eax, 32(%3)\n"
29920+ "22: movl %%edx, 36(%3)\n"
29921+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
29922+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
29923+ "25: movl %%eax, 40(%3)\n"
29924+ "26: movl %%edx, 44(%3)\n"
29925+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
29926+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
29927+ "29: movl %%eax, 48(%3)\n"
29928+ "30: movl %%edx, 52(%3)\n"
29929+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
29930+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
29931+ "33: movl %%eax, 56(%3)\n"
29932+ "34: movl %%edx, 60(%3)\n"
29933+ " addl $-64, %0\n"
29934+ " addl $64, %4\n"
29935+ " addl $64, %3\n"
29936+ " cmpl $63, %0\n"
29937+ " ja 1b\n"
29938+ "35: movl %0, %%eax\n"
29939+ " shrl $2, %0\n"
29940+ " andl $3, %%eax\n"
29941+ " cld\n"
29942+ "99: rep; "__copyuser_seg" movsl\n"
29943+ "36: movl %%eax, %0\n"
29944+ "37: rep; "__copyuser_seg" movsb\n"
29945+ "100:\n"
29946 ".section .fixup,\"ax\"\n"
29947 "101: lea 0(%%eax,%0,4),%0\n"
29948 " jmp 100b\n"
29949@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29950 int d0, d1;
29951 __asm__ __volatile__(
29952 " .align 2,0x90\n"
29953- "0: movl 32(%4), %%eax\n"
29954+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29955 " cmpl $67, %0\n"
29956 " jbe 2f\n"
29957- "1: movl 64(%4), %%eax\n"
29958+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29959 " .align 2,0x90\n"
29960- "2: movl 0(%4), %%eax\n"
29961- "21: movl 4(%4), %%edx\n"
29962+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29963+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29964 " movl %%eax, 0(%3)\n"
29965 " movl %%edx, 4(%3)\n"
29966- "3: movl 8(%4), %%eax\n"
29967- "31: movl 12(%4),%%edx\n"
29968+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29969+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29970 " movl %%eax, 8(%3)\n"
29971 " movl %%edx, 12(%3)\n"
29972- "4: movl 16(%4), %%eax\n"
29973- "41: movl 20(%4), %%edx\n"
29974+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29975+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29976 " movl %%eax, 16(%3)\n"
29977 " movl %%edx, 20(%3)\n"
29978- "10: movl 24(%4), %%eax\n"
29979- "51: movl 28(%4), %%edx\n"
29980+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29981+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29982 " movl %%eax, 24(%3)\n"
29983 " movl %%edx, 28(%3)\n"
29984- "11: movl 32(%4), %%eax\n"
29985- "61: movl 36(%4), %%edx\n"
29986+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29987+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29988 " movl %%eax, 32(%3)\n"
29989 " movl %%edx, 36(%3)\n"
29990- "12: movl 40(%4), %%eax\n"
29991- "71: movl 44(%4), %%edx\n"
29992+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29993+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29994 " movl %%eax, 40(%3)\n"
29995 " movl %%edx, 44(%3)\n"
29996- "13: movl 48(%4), %%eax\n"
29997- "81: movl 52(%4), %%edx\n"
29998+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29999+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30000 " movl %%eax, 48(%3)\n"
30001 " movl %%edx, 52(%3)\n"
30002- "14: movl 56(%4), %%eax\n"
30003- "91: movl 60(%4), %%edx\n"
30004+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30005+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30006 " movl %%eax, 56(%3)\n"
30007 " movl %%edx, 60(%3)\n"
30008 " addl $-64, %0\n"
30009@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30010 " shrl $2, %0\n"
30011 " andl $3, %%eax\n"
30012 " cld\n"
30013- "6: rep; movsl\n"
30014+ "6: rep; "__copyuser_seg" movsl\n"
30015 " movl %%eax,%0\n"
30016- "7: rep; movsb\n"
30017+ "7: rep; "__copyuser_seg" movsb\n"
30018 "8:\n"
30019 ".section .fixup,\"ax\"\n"
30020 "9: lea 0(%%eax,%0,4),%0\n"
30021@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30022
30023 __asm__ __volatile__(
30024 " .align 2,0x90\n"
30025- "0: movl 32(%4), %%eax\n"
30026+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30027 " cmpl $67, %0\n"
30028 " jbe 2f\n"
30029- "1: movl 64(%4), %%eax\n"
30030+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30031 " .align 2,0x90\n"
30032- "2: movl 0(%4), %%eax\n"
30033- "21: movl 4(%4), %%edx\n"
30034+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30035+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30036 " movnti %%eax, 0(%3)\n"
30037 " movnti %%edx, 4(%3)\n"
30038- "3: movl 8(%4), %%eax\n"
30039- "31: movl 12(%4),%%edx\n"
30040+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30041+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30042 " movnti %%eax, 8(%3)\n"
30043 " movnti %%edx, 12(%3)\n"
30044- "4: movl 16(%4), %%eax\n"
30045- "41: movl 20(%4), %%edx\n"
30046+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30047+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30048 " movnti %%eax, 16(%3)\n"
30049 " movnti %%edx, 20(%3)\n"
30050- "10: movl 24(%4), %%eax\n"
30051- "51: movl 28(%4), %%edx\n"
30052+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30053+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30054 " movnti %%eax, 24(%3)\n"
30055 " movnti %%edx, 28(%3)\n"
30056- "11: movl 32(%4), %%eax\n"
30057- "61: movl 36(%4), %%edx\n"
30058+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30059+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30060 " movnti %%eax, 32(%3)\n"
30061 " movnti %%edx, 36(%3)\n"
30062- "12: movl 40(%4), %%eax\n"
30063- "71: movl 44(%4), %%edx\n"
30064+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30065+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30066 " movnti %%eax, 40(%3)\n"
30067 " movnti %%edx, 44(%3)\n"
30068- "13: movl 48(%4), %%eax\n"
30069- "81: movl 52(%4), %%edx\n"
30070+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30071+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30072 " movnti %%eax, 48(%3)\n"
30073 " movnti %%edx, 52(%3)\n"
30074- "14: movl 56(%4), %%eax\n"
30075- "91: movl 60(%4), %%edx\n"
30076+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30077+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30078 " movnti %%eax, 56(%3)\n"
30079 " movnti %%edx, 60(%3)\n"
30080 " addl $-64, %0\n"
30081@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30082 " shrl $2, %0\n"
30083 " andl $3, %%eax\n"
30084 " cld\n"
30085- "6: rep; movsl\n"
30086+ "6: rep; "__copyuser_seg" movsl\n"
30087 " movl %%eax,%0\n"
30088- "7: rep; movsb\n"
30089+ "7: rep; "__copyuser_seg" movsb\n"
30090 "8:\n"
30091 ".section .fixup,\"ax\"\n"
30092 "9: lea 0(%%eax,%0,4),%0\n"
30093@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
30094
30095 __asm__ __volatile__(
30096 " .align 2,0x90\n"
30097- "0: movl 32(%4), %%eax\n"
30098+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30099 " cmpl $67, %0\n"
30100 " jbe 2f\n"
30101- "1: movl 64(%4), %%eax\n"
30102+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30103 " .align 2,0x90\n"
30104- "2: movl 0(%4), %%eax\n"
30105- "21: movl 4(%4), %%edx\n"
30106+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30107+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30108 " movnti %%eax, 0(%3)\n"
30109 " movnti %%edx, 4(%3)\n"
30110- "3: movl 8(%4), %%eax\n"
30111- "31: movl 12(%4),%%edx\n"
30112+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30113+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30114 " movnti %%eax, 8(%3)\n"
30115 " movnti %%edx, 12(%3)\n"
30116- "4: movl 16(%4), %%eax\n"
30117- "41: movl 20(%4), %%edx\n"
30118+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30119+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30120 " movnti %%eax, 16(%3)\n"
30121 " movnti %%edx, 20(%3)\n"
30122- "10: movl 24(%4), %%eax\n"
30123- "51: movl 28(%4), %%edx\n"
30124+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30125+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30126 " movnti %%eax, 24(%3)\n"
30127 " movnti %%edx, 28(%3)\n"
30128- "11: movl 32(%4), %%eax\n"
30129- "61: movl 36(%4), %%edx\n"
30130+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30131+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30132 " movnti %%eax, 32(%3)\n"
30133 " movnti %%edx, 36(%3)\n"
30134- "12: movl 40(%4), %%eax\n"
30135- "71: movl 44(%4), %%edx\n"
30136+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30137+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30138 " movnti %%eax, 40(%3)\n"
30139 " movnti %%edx, 44(%3)\n"
30140- "13: movl 48(%4), %%eax\n"
30141- "81: movl 52(%4), %%edx\n"
30142+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30143+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30144 " movnti %%eax, 48(%3)\n"
30145 " movnti %%edx, 52(%3)\n"
30146- "14: movl 56(%4), %%eax\n"
30147- "91: movl 60(%4), %%edx\n"
30148+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30149+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30150 " movnti %%eax, 56(%3)\n"
30151 " movnti %%edx, 60(%3)\n"
30152 " addl $-64, %0\n"
30153@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
30154 " shrl $2, %0\n"
30155 " andl $3, %%eax\n"
30156 " cld\n"
30157- "6: rep; movsl\n"
30158+ "6: rep; "__copyuser_seg" movsl\n"
30159 " movl %%eax,%0\n"
30160- "7: rep; movsb\n"
30161+ "7: rep; "__copyuser_seg" movsb\n"
30162 "8:\n"
30163 ".section .fixup,\"ax\"\n"
30164 "9: lea 0(%%eax,%0,4),%0\n"
30165@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
30166 */
30167 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
30168 unsigned long size);
30169-unsigned long __copy_user_intel(void __user *to, const void *from,
30170+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
30171+ unsigned long size);
30172+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
30173 unsigned long size);
30174 unsigned long __copy_user_zeroing_intel_nocache(void *to,
30175 const void __user *from, unsigned long size);
30176 #endif /* CONFIG_X86_INTEL_USERCOPY */
30177
30178 /* Generic arbitrary sized copy. */
30179-#define __copy_user(to, from, size) \
30180+#define __copy_user(to, from, size, prefix, set, restore) \
30181 do { \
30182 int __d0, __d1, __d2; \
30183 __asm__ __volatile__( \
30184+ set \
30185 " cmp $7,%0\n" \
30186 " jbe 1f\n" \
30187 " movl %1,%0\n" \
30188 " negl %0\n" \
30189 " andl $7,%0\n" \
30190 " subl %0,%3\n" \
30191- "4: rep; movsb\n" \
30192+ "4: rep; "prefix"movsb\n" \
30193 " movl %3,%0\n" \
30194 " shrl $2,%0\n" \
30195 " andl $3,%3\n" \
30196 " .align 2,0x90\n" \
30197- "0: rep; movsl\n" \
30198+ "0: rep; "prefix"movsl\n" \
30199 " movl %3,%0\n" \
30200- "1: rep; movsb\n" \
30201+ "1: rep; "prefix"movsb\n" \
30202 "2:\n" \
30203+ restore \
30204 ".section .fixup,\"ax\"\n" \
30205 "5: addl %3,%0\n" \
30206 " jmp 2b\n" \
30207@@ -538,14 +650,14 @@ do { \
30208 " negl %0\n" \
30209 " andl $7,%0\n" \
30210 " subl %0,%3\n" \
30211- "4: rep; movsb\n" \
30212+ "4: rep; "__copyuser_seg"movsb\n" \
30213 " movl %3,%0\n" \
30214 " shrl $2,%0\n" \
30215 " andl $3,%3\n" \
30216 " .align 2,0x90\n" \
30217- "0: rep; movsl\n" \
30218+ "0: rep; "__copyuser_seg"movsl\n" \
30219 " movl %3,%0\n" \
30220- "1: rep; movsb\n" \
30221+ "1: rep; "__copyuser_seg"movsb\n" \
30222 "2:\n" \
30223 ".section .fixup,\"ax\"\n" \
30224 "5: addl %3,%0\n" \
30225@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
30226 {
30227 stac();
30228 if (movsl_is_ok(to, from, n))
30229- __copy_user(to, from, n);
30230+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
30231 else
30232- n = __copy_user_intel(to, from, n);
30233+ n = __generic_copy_to_user_intel(to, from, n);
30234 clac();
30235 return n;
30236 }
30237@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
30238 {
30239 stac();
30240 if (movsl_is_ok(to, from, n))
30241- __copy_user(to, from, n);
30242+ __copy_user(to, from, n, __copyuser_seg, "", "");
30243 else
30244- n = __copy_user_intel((void __user *)to,
30245- (const void *)from, n);
30246+ n = __generic_copy_from_user_intel(to, from, n);
30247 clac();
30248 return n;
30249 }
30250@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
30251 if (n > 64 && cpu_has_xmm2)
30252 n = __copy_user_intel_nocache(to, from, n);
30253 else
30254- __copy_user(to, from, n);
30255+ __copy_user(to, from, n, __copyuser_seg, "", "");
30256 #else
30257- __copy_user(to, from, n);
30258+ __copy_user(to, from, n, __copyuser_seg, "", "");
30259 #endif
30260 clac();
30261 return n;
30262 }
30263 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
30264
30265-/**
30266- * copy_to_user: - Copy a block of data into user space.
30267- * @to: Destination address, in user space.
30268- * @from: Source address, in kernel space.
30269- * @n: Number of bytes to copy.
30270- *
30271- * Context: User context only. This function may sleep.
30272- *
30273- * Copy data from kernel space to user space.
30274- *
30275- * Returns number of bytes that could not be copied.
30276- * On success, this will be zero.
30277- */
30278-unsigned long
30279-copy_to_user(void __user *to, const void *from, unsigned long n)
30280+#ifdef CONFIG_PAX_MEMORY_UDEREF
30281+void __set_fs(mm_segment_t x)
30282 {
30283- if (access_ok(VERIFY_WRITE, to, n))
30284- n = __copy_to_user(to, from, n);
30285- return n;
30286+ switch (x.seg) {
30287+ case 0:
30288+ loadsegment(gs, 0);
30289+ break;
30290+ case TASK_SIZE_MAX:
30291+ loadsegment(gs, __USER_DS);
30292+ break;
30293+ case -1UL:
30294+ loadsegment(gs, __KERNEL_DS);
30295+ break;
30296+ default:
30297+ BUG();
30298+ }
30299 }
30300-EXPORT_SYMBOL(copy_to_user);
30301+EXPORT_SYMBOL(__set_fs);
30302
30303-/**
30304- * copy_from_user: - Copy a block of data from user space.
30305- * @to: Destination address, in kernel space.
30306- * @from: Source address, in user space.
30307- * @n: Number of bytes to copy.
30308- *
30309- * Context: User context only. This function may sleep.
30310- *
30311- * Copy data from user space to kernel space.
30312- *
30313- * Returns number of bytes that could not be copied.
30314- * On success, this will be zero.
30315- *
30316- * If some data could not be copied, this function will pad the copied
30317- * data to the requested size using zero bytes.
30318- */
30319-unsigned long
30320-_copy_from_user(void *to, const void __user *from, unsigned long n)
30321+void set_fs(mm_segment_t x)
30322 {
30323- if (access_ok(VERIFY_READ, from, n))
30324- n = __copy_from_user(to, from, n);
30325- else
30326- memset(to, 0, n);
30327- return n;
30328+ current_thread_info()->addr_limit = x;
30329+ __set_fs(x);
30330 }
30331-EXPORT_SYMBOL(_copy_from_user);
30332+EXPORT_SYMBOL(set_fs);
30333+#endif
30334diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
30335index c905e89..01ab928 100644
30336--- a/arch/x86/lib/usercopy_64.c
30337+++ b/arch/x86/lib/usercopy_64.c
30338@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30339 might_fault();
30340 /* no memory constraint because it doesn't change any memory gcc knows
30341 about */
30342+ pax_open_userland();
30343 stac();
30344 asm volatile(
30345 " testq %[size8],%[size8]\n"
30346@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30347 _ASM_EXTABLE(0b,3b)
30348 _ASM_EXTABLE(1b,2b)
30349 : [size8] "=&c"(size), [dst] "=&D" (__d0)
30350- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
30351+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
30352 [zero] "r" (0UL), [eight] "r" (8UL));
30353 clac();
30354+ pax_close_userland();
30355 return size;
30356 }
30357 EXPORT_SYMBOL(__clear_user);
30358@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
30359 }
30360 EXPORT_SYMBOL(clear_user);
30361
30362-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
30363+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
30364 {
30365- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
30366- return copy_user_generic((__force void *)to, (__force void *)from, len);
30367- }
30368- return len;
30369+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
30370+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
30371+ return len;
30372 }
30373 EXPORT_SYMBOL(copy_in_user);
30374
30375@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
30376 * it is not necessary to optimize tail handling.
30377 */
30378 __visible unsigned long
30379-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30380+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
30381 {
30382 char c;
30383 unsigned zero_len;
30384
30385+ clac();
30386+ pax_close_userland();
30387 for (; len; --len, to++) {
30388 if (__get_user_nocheck(c, from++, sizeof(char)))
30389 break;
30390@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30391 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
30392 if (__put_user_nocheck(c, to++, sizeof(char)))
30393 break;
30394- clac();
30395 return len;
30396 }
30397diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
30398index 23d8e5f..9ccc13a 100644
30399--- a/arch/x86/mm/Makefile
30400+++ b/arch/x86/mm/Makefile
30401@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
30402 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
30403
30404 obj-$(CONFIG_MEMTEST) += memtest.o
30405+
30406+quote:="
30407+obj-$(CONFIG_X86_64) += uderef_64.o
30408+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
30409diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
30410index 903ec1e..c4166b2 100644
30411--- a/arch/x86/mm/extable.c
30412+++ b/arch/x86/mm/extable.c
30413@@ -6,12 +6,24 @@
30414 static inline unsigned long
30415 ex_insn_addr(const struct exception_table_entry *x)
30416 {
30417- return (unsigned long)&x->insn + x->insn;
30418+ unsigned long reloc = 0;
30419+
30420+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30421+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30422+#endif
30423+
30424+ return (unsigned long)&x->insn + x->insn + reloc;
30425 }
30426 static inline unsigned long
30427 ex_fixup_addr(const struct exception_table_entry *x)
30428 {
30429- return (unsigned long)&x->fixup + x->fixup;
30430+ unsigned long reloc = 0;
30431+
30432+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30433+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30434+#endif
30435+
30436+ return (unsigned long)&x->fixup + x->fixup + reloc;
30437 }
30438
30439 int fixup_exception(struct pt_regs *regs)
30440@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
30441 unsigned long new_ip;
30442
30443 #ifdef CONFIG_PNPBIOS
30444- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
30445+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
30446 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
30447 extern u32 pnp_bios_is_utter_crap;
30448 pnp_bios_is_utter_crap = 1;
30449@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
30450 i += 4;
30451 p->fixup -= i;
30452 i += 4;
30453+
30454+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30455+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
30456+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30457+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30458+#endif
30459+
30460 }
30461 }
30462
30463diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
30464index 3aaeffc..42ea9fb 100644
30465--- a/arch/x86/mm/fault.c
30466+++ b/arch/x86/mm/fault.c
30467@@ -14,11 +14,18 @@
30468 #include <linux/hugetlb.h> /* hstate_index_to_shift */
30469 #include <linux/prefetch.h> /* prefetchw */
30470 #include <linux/context_tracking.h> /* exception_enter(), ... */
30471+#include <linux/unistd.h>
30472+#include <linux/compiler.h>
30473
30474 #include <asm/traps.h> /* dotraplinkage, ... */
30475 #include <asm/pgalloc.h> /* pgd_*(), ... */
30476 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
30477 #include <asm/fixmap.h> /* VSYSCALL_START */
30478+#include <asm/tlbflush.h>
30479+
30480+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30481+#include <asm/stacktrace.h>
30482+#endif
30483
30484 /*
30485 * Page fault error code bits:
30486@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
30487 int ret = 0;
30488
30489 /* kprobe_running() needs smp_processor_id() */
30490- if (kprobes_built_in() && !user_mode_vm(regs)) {
30491+ if (kprobes_built_in() && !user_mode(regs)) {
30492 preempt_disable();
30493 if (kprobe_running() && kprobe_fault_handler(regs, 14))
30494 ret = 1;
30495@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
30496 return !instr_lo || (instr_lo>>1) == 1;
30497 case 0x00:
30498 /* Prefetch instruction is 0x0F0D or 0x0F18 */
30499- if (probe_kernel_address(instr, opcode))
30500+ if (user_mode(regs)) {
30501+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30502+ return 0;
30503+ } else if (probe_kernel_address(instr, opcode))
30504 return 0;
30505
30506 *prefetch = (instr_lo == 0xF) &&
30507@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
30508 while (instr < max_instr) {
30509 unsigned char opcode;
30510
30511- if (probe_kernel_address(instr, opcode))
30512+ if (user_mode(regs)) {
30513+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30514+ break;
30515+ } else if (probe_kernel_address(instr, opcode))
30516 break;
30517
30518 instr++;
30519@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
30520 force_sig_info(si_signo, &info, tsk);
30521 }
30522
30523+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30524+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
30525+#endif
30526+
30527+#ifdef CONFIG_PAX_EMUTRAMP
30528+static int pax_handle_fetch_fault(struct pt_regs *regs);
30529+#endif
30530+
30531+#ifdef CONFIG_PAX_PAGEEXEC
30532+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
30533+{
30534+ pgd_t *pgd;
30535+ pud_t *pud;
30536+ pmd_t *pmd;
30537+
30538+ pgd = pgd_offset(mm, address);
30539+ if (!pgd_present(*pgd))
30540+ return NULL;
30541+ pud = pud_offset(pgd, address);
30542+ if (!pud_present(*pud))
30543+ return NULL;
30544+ pmd = pmd_offset(pud, address);
30545+ if (!pmd_present(*pmd))
30546+ return NULL;
30547+ return pmd;
30548+}
30549+#endif
30550+
30551 DEFINE_SPINLOCK(pgd_lock);
30552 LIST_HEAD(pgd_list);
30553
30554@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
30555 for (address = VMALLOC_START & PMD_MASK;
30556 address >= TASK_SIZE && address < FIXADDR_TOP;
30557 address += PMD_SIZE) {
30558+
30559+#ifdef CONFIG_PAX_PER_CPU_PGD
30560+ unsigned long cpu;
30561+#else
30562 struct page *page;
30563+#endif
30564
30565 spin_lock(&pgd_lock);
30566+
30567+#ifdef CONFIG_PAX_PER_CPU_PGD
30568+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30569+ pgd_t *pgd = get_cpu_pgd(cpu, user);
30570+ pmd_t *ret;
30571+
30572+ ret = vmalloc_sync_one(pgd, address);
30573+ if (!ret)
30574+ break;
30575+ pgd = get_cpu_pgd(cpu, kernel);
30576+#else
30577 list_for_each_entry(page, &pgd_list, lru) {
30578+ pgd_t *pgd;
30579 spinlock_t *pgt_lock;
30580 pmd_t *ret;
30581
30582@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
30583 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
30584
30585 spin_lock(pgt_lock);
30586- ret = vmalloc_sync_one(page_address(page), address);
30587+ pgd = page_address(page);
30588+#endif
30589+
30590+ ret = vmalloc_sync_one(pgd, address);
30591+
30592+#ifndef CONFIG_PAX_PER_CPU_PGD
30593 spin_unlock(pgt_lock);
30594+#endif
30595
30596 if (!ret)
30597 break;
30598@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30599 * an interrupt in the middle of a task switch..
30600 */
30601 pgd_paddr = read_cr3();
30602+
30603+#ifdef CONFIG_PAX_PER_CPU_PGD
30604+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30605+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30606+#endif
30607+
30608 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30609 if (!pmd_k)
30610 return -1;
30611@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30612 * happen within a race in page table update. In the later
30613 * case just flush:
30614 */
30615- pgd = pgd_offset(current->active_mm, address);
30616+
30617 pgd_ref = pgd_offset_k(address);
30618 if (pgd_none(*pgd_ref))
30619 return -1;
30620
30621+#ifdef CONFIG_PAX_PER_CPU_PGD
30622+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30623+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30624+ if (pgd_none(*pgd)) {
30625+ set_pgd(pgd, *pgd_ref);
30626+ arch_flush_lazy_mmu_mode();
30627+ } else {
30628+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30629+ }
30630+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30631+#else
30632+ pgd = pgd_offset(current->active_mm, address);
30633+#endif
30634+
30635 if (pgd_none(*pgd)) {
30636 set_pgd(pgd, *pgd_ref);
30637 arch_flush_lazy_mmu_mode();
30638@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30639 static int is_errata100(struct pt_regs *regs, unsigned long address)
30640 {
30641 #ifdef CONFIG_X86_64
30642- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30643+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30644 return 1;
30645 #endif
30646 return 0;
30647@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30648 }
30649
30650 static const char nx_warning[] = KERN_CRIT
30651-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30652+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30653
30654 static void
30655 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30656@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30657 if (!oops_may_print())
30658 return;
30659
30660- if (error_code & PF_INSTR) {
30661+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30662 unsigned int level;
30663
30664 pte_t *pte = lookup_address(address, &level);
30665
30666 if (pte && pte_present(*pte) && !pte_exec(*pte))
30667- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30668+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30669 }
30670
30671+#ifdef CONFIG_PAX_KERNEXEC
30672+ if (init_mm.start_code <= address && address < init_mm.end_code) {
30673+ if (current->signal->curr_ip)
30674+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30675+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
30676+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30677+ else
30678+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30679+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30680+ }
30681+#endif
30682+
30683 printk(KERN_ALERT "BUG: unable to handle kernel ");
30684 if (address < PAGE_SIZE)
30685 printk(KERN_CONT "NULL pointer dereference");
30686@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30687 return;
30688 }
30689 #endif
30690+
30691+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30692+ if (pax_is_fetch_fault(regs, error_code, address)) {
30693+
30694+#ifdef CONFIG_PAX_EMUTRAMP
30695+ switch (pax_handle_fetch_fault(regs)) {
30696+ case 2:
30697+ return;
30698+ }
30699+#endif
30700+
30701+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30702+ do_group_exit(SIGKILL);
30703+ }
30704+#endif
30705+
30706 /* Kernel addresses are always protection faults: */
30707 if (address >= TASK_SIZE)
30708 error_code |= PF_PROT;
30709@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30710 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30711 printk(KERN_ERR
30712 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30713- tsk->comm, tsk->pid, address);
30714+ tsk->comm, task_pid_nr(tsk), address);
30715 code = BUS_MCEERR_AR;
30716 }
30717 #endif
30718@@ -889,6 +1001,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30719 return 1;
30720 }
30721
30722+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30723+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30724+{
30725+ pte_t *pte;
30726+ pmd_t *pmd;
30727+ spinlock_t *ptl;
30728+ unsigned char pte_mask;
30729+
30730+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30731+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
30732+ return 0;
30733+
30734+ /* PaX: it's our fault, let's handle it if we can */
30735+
30736+ /* PaX: take a look at read faults before acquiring any locks */
30737+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30738+ /* instruction fetch attempt from a protected page in user mode */
30739+ up_read(&mm->mmap_sem);
30740+
30741+#ifdef CONFIG_PAX_EMUTRAMP
30742+ switch (pax_handle_fetch_fault(regs)) {
30743+ case 2:
30744+ return 1;
30745+ }
30746+#endif
30747+
30748+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30749+ do_group_exit(SIGKILL);
30750+ }
30751+
30752+ pmd = pax_get_pmd(mm, address);
30753+ if (unlikely(!pmd))
30754+ return 0;
30755+
30756+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30757+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30758+ pte_unmap_unlock(pte, ptl);
30759+ return 0;
30760+ }
30761+
30762+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30763+ /* write attempt to a protected page in user mode */
30764+ pte_unmap_unlock(pte, ptl);
30765+ return 0;
30766+ }
30767+
30768+#ifdef CONFIG_SMP
30769+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30770+#else
30771+ if (likely(address > get_limit(regs->cs)))
30772+#endif
30773+ {
30774+ set_pte(pte, pte_mkread(*pte));
30775+ __flush_tlb_one(address);
30776+ pte_unmap_unlock(pte, ptl);
30777+ up_read(&mm->mmap_sem);
30778+ return 1;
30779+ }
30780+
30781+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
30782+
30783+ /*
30784+ * PaX: fill DTLB with user rights and retry
30785+ */
30786+ __asm__ __volatile__ (
30787+ "orb %2,(%1)\n"
30788+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
30789+/*
30790+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
30791+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
30792+ * page fault when examined during a TLB load attempt. this is true not only
30793+ * for PTEs holding a non-present entry but also present entries that will
30794+ * raise a page fault (such as those set up by PaX, or the copy-on-write
30795+ * mechanism). in effect it means that we do *not* need to flush the TLBs
30796+ * for our target pages since their PTEs are simply not in the TLBs at all.
30797+
30798+ * the best thing in omitting it is that we gain around 15-20% speed in the
30799+ * fast path of the page fault handler and can get rid of tracing since we
30800+ * can no longer flush unintended entries.
30801+ */
30802+ "invlpg (%0)\n"
30803+#endif
30804+ __copyuser_seg"testb $0,(%0)\n"
30805+ "xorb %3,(%1)\n"
30806+ :
30807+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
30808+ : "memory", "cc");
30809+ pte_unmap_unlock(pte, ptl);
30810+ up_read(&mm->mmap_sem);
30811+ return 1;
30812+}
30813+#endif
30814+
30815 /*
30816 * Handle a spurious fault caused by a stale TLB entry.
30817 *
30818@@ -955,6 +1160,9 @@ int show_unhandled_signals = 1;
30819 static inline int
30820 access_error(unsigned long error_code, struct vm_area_struct *vma)
30821 {
30822+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
30823+ return 1;
30824+
30825 if (error_code & PF_WRITE) {
30826 /* write, present and write, not present: */
30827 if (unlikely(!(vma->vm_flags & VM_WRITE)))
30828@@ -983,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
30829 if (error_code & PF_USER)
30830 return false;
30831
30832- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
30833+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
30834 return false;
30835
30836 return true;
30837@@ -1010,6 +1218,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30838 /* Get the faulting address: */
30839 address = read_cr2();
30840
30841+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30842+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
30843+ if (!search_exception_tables(regs->ip)) {
30844+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30845+ bad_area_nosemaphore(regs, error_code, address);
30846+ return;
30847+ }
30848+ if (address < pax_user_shadow_base) {
30849+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30850+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
30851+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
30852+ } else
30853+ address -= pax_user_shadow_base;
30854+ }
30855+#endif
30856+
30857 /*
30858 * Detect and handle instructions that would cause a page fault for
30859 * both a tracked kernel page and a userspace page.
30860@@ -1069,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30861 * User-mode registers count as a user access even for any
30862 * potential system fault or CPU buglet:
30863 */
30864- if (user_mode_vm(regs)) {
30865+ if (user_mode(regs)) {
30866 local_irq_enable();
30867 error_code |= PF_USER;
30868 flags |= FAULT_FLAG_USER;
30869@@ -1135,6 +1359,11 @@ retry:
30870 might_sleep();
30871 }
30872
30873+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30874+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
30875+ return;
30876+#endif
30877+
30878 vma = find_vma(mm, address);
30879 if (unlikely(!vma)) {
30880 bad_area(regs, error_code, address);
30881@@ -1146,18 +1375,24 @@ retry:
30882 bad_area(regs, error_code, address);
30883 return;
30884 }
30885- if (error_code & PF_USER) {
30886- /*
30887- * Accessing the stack below %sp is always a bug.
30888- * The large cushion allows instructions like enter
30889- * and pusha to work. ("enter $65535, $31" pushes
30890- * 32 pointers and then decrements %sp by 65535.)
30891- */
30892- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
30893- bad_area(regs, error_code, address);
30894- return;
30895- }
30896+ /*
30897+ * Accessing the stack below %sp is always a bug.
30898+ * The large cushion allows instructions like enter
30899+ * and pusha to work. ("enter $65535, $31" pushes
30900+ * 32 pointers and then decrements %sp by 65535.)
30901+ */
30902+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
30903+ bad_area(regs, error_code, address);
30904+ return;
30905 }
30906+
30907+#ifdef CONFIG_PAX_SEGMEXEC
30908+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
30909+ bad_area(regs, error_code, address);
30910+ return;
30911+ }
30912+#endif
30913+
30914 if (unlikely(expand_stack(vma, address))) {
30915 bad_area(regs, error_code, address);
30916 return;
30917@@ -1231,3 +1466,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
30918 __do_page_fault(regs, error_code);
30919 exception_exit(prev_state);
30920 }
30921+
30922+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30923+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
30924+{
30925+ struct mm_struct *mm = current->mm;
30926+ unsigned long ip = regs->ip;
30927+
30928+ if (v8086_mode(regs))
30929+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
30930+
30931+#ifdef CONFIG_PAX_PAGEEXEC
30932+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
30933+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
30934+ return true;
30935+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
30936+ return true;
30937+ return false;
30938+ }
30939+#endif
30940+
30941+#ifdef CONFIG_PAX_SEGMEXEC
30942+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
30943+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
30944+ return true;
30945+ return false;
30946+ }
30947+#endif
30948+
30949+ return false;
30950+}
30951+#endif
30952+
30953+#ifdef CONFIG_PAX_EMUTRAMP
30954+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
30955+{
30956+ int err;
30957+
30958+ do { /* PaX: libffi trampoline emulation */
30959+ unsigned char mov, jmp;
30960+ unsigned int addr1, addr2;
30961+
30962+#ifdef CONFIG_X86_64
30963+ if ((regs->ip + 9) >> 32)
30964+ break;
30965+#endif
30966+
30967+ err = get_user(mov, (unsigned char __user *)regs->ip);
30968+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30969+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30970+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30971+
30972+ if (err)
30973+ break;
30974+
30975+ if (mov == 0xB8 && jmp == 0xE9) {
30976+ regs->ax = addr1;
30977+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30978+ return 2;
30979+ }
30980+ } while (0);
30981+
30982+ do { /* PaX: gcc trampoline emulation #1 */
30983+ unsigned char mov1, mov2;
30984+ unsigned short jmp;
30985+ unsigned int addr1, addr2;
30986+
30987+#ifdef CONFIG_X86_64
30988+ if ((regs->ip + 11) >> 32)
30989+ break;
30990+#endif
30991+
30992+ err = get_user(mov1, (unsigned char __user *)regs->ip);
30993+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30994+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
30995+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30996+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
30997+
30998+ if (err)
30999+ break;
31000+
31001+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
31002+ regs->cx = addr1;
31003+ regs->ax = addr2;
31004+ regs->ip = addr2;
31005+ return 2;
31006+ }
31007+ } while (0);
31008+
31009+ do { /* PaX: gcc trampoline emulation #2 */
31010+ unsigned char mov, jmp;
31011+ unsigned int addr1, addr2;
31012+
31013+#ifdef CONFIG_X86_64
31014+ if ((regs->ip + 9) >> 32)
31015+ break;
31016+#endif
31017+
31018+ err = get_user(mov, (unsigned char __user *)regs->ip);
31019+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31020+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31021+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31022+
31023+ if (err)
31024+ break;
31025+
31026+ if (mov == 0xB9 && jmp == 0xE9) {
31027+ regs->cx = addr1;
31028+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31029+ return 2;
31030+ }
31031+ } while (0);
31032+
31033+ return 1; /* PaX in action */
31034+}
31035+
31036+#ifdef CONFIG_X86_64
31037+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
31038+{
31039+ int err;
31040+
31041+ do { /* PaX: libffi trampoline emulation */
31042+ unsigned short mov1, mov2, jmp1;
31043+ unsigned char stcclc, jmp2;
31044+ unsigned long addr1, addr2;
31045+
31046+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31047+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31048+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31049+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31050+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
31051+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
31052+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
31053+
31054+ if (err)
31055+ break;
31056+
31057+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31058+ regs->r11 = addr1;
31059+ regs->r10 = addr2;
31060+ if (stcclc == 0xF8)
31061+ regs->flags &= ~X86_EFLAGS_CF;
31062+ else
31063+ regs->flags |= X86_EFLAGS_CF;
31064+ regs->ip = addr1;
31065+ return 2;
31066+ }
31067+ } while (0);
31068+
31069+ do { /* PaX: gcc trampoline emulation #1 */
31070+ unsigned short mov1, mov2, jmp1;
31071+ unsigned char jmp2;
31072+ unsigned int addr1;
31073+ unsigned long addr2;
31074+
31075+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31076+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
31077+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
31078+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
31079+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
31080+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
31081+
31082+ if (err)
31083+ break;
31084+
31085+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31086+ regs->r11 = addr1;
31087+ regs->r10 = addr2;
31088+ regs->ip = addr1;
31089+ return 2;
31090+ }
31091+ } while (0);
31092+
31093+ do { /* PaX: gcc trampoline emulation #2 */
31094+ unsigned short mov1, mov2, jmp1;
31095+ unsigned char jmp2;
31096+ unsigned long addr1, addr2;
31097+
31098+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31099+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31100+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31101+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31102+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
31103+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
31104+
31105+ if (err)
31106+ break;
31107+
31108+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31109+ regs->r11 = addr1;
31110+ regs->r10 = addr2;
31111+ regs->ip = addr1;
31112+ return 2;
31113+ }
31114+ } while (0);
31115+
31116+ return 1; /* PaX in action */
31117+}
31118+#endif
31119+
31120+/*
31121+ * PaX: decide what to do with offenders (regs->ip = fault address)
31122+ *
31123+ * returns 1 when task should be killed
31124+ * 2 when gcc trampoline was detected
31125+ */
31126+static int pax_handle_fetch_fault(struct pt_regs *regs)
31127+{
31128+ if (v8086_mode(regs))
31129+ return 1;
31130+
31131+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
31132+ return 1;
31133+
31134+#ifdef CONFIG_X86_32
31135+ return pax_handle_fetch_fault_32(regs);
31136+#else
31137+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
31138+ return pax_handle_fetch_fault_32(regs);
31139+ else
31140+ return pax_handle_fetch_fault_64(regs);
31141+#endif
31142+}
31143+#endif
31144+
31145+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31146+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
31147+{
31148+ long i;
31149+
31150+ printk(KERN_ERR "PAX: bytes at PC: ");
31151+ for (i = 0; i < 20; i++) {
31152+ unsigned char c;
31153+ if (get_user(c, (unsigned char __force_user *)pc+i))
31154+ printk(KERN_CONT "?? ");
31155+ else
31156+ printk(KERN_CONT "%02x ", c);
31157+ }
31158+ printk("\n");
31159+
31160+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
31161+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
31162+ unsigned long c;
31163+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
31164+#ifdef CONFIG_X86_32
31165+ printk(KERN_CONT "???????? ");
31166+#else
31167+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
31168+ printk(KERN_CONT "???????? ???????? ");
31169+ else
31170+ printk(KERN_CONT "???????????????? ");
31171+#endif
31172+ } else {
31173+#ifdef CONFIG_X86_64
31174+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
31175+ printk(KERN_CONT "%08x ", (unsigned int)c);
31176+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
31177+ } else
31178+#endif
31179+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
31180+ }
31181+ }
31182+ printk("\n");
31183+}
31184+#endif
31185+
31186+/**
31187+ * probe_kernel_write(): safely attempt to write to a location
31188+ * @dst: address to write to
31189+ * @src: pointer to the data that shall be written
31190+ * @size: size of the data chunk
31191+ *
31192+ * Safely write to address @dst from the buffer at @src. If a kernel fault
31193+ * happens, handle that and return -EFAULT.
31194+ */
31195+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
31196+{
31197+ long ret;
31198+ mm_segment_t old_fs = get_fs();
31199+
31200+ set_fs(KERNEL_DS);
31201+ pagefault_disable();
31202+ pax_open_kernel();
31203+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
31204+ pax_close_kernel();
31205+ pagefault_enable();
31206+ set_fs(old_fs);
31207+
31208+ return ret ? -EFAULT : 0;
31209+}
31210diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
31211index dd74e46..0970b01 100644
31212--- a/arch/x86/mm/gup.c
31213+++ b/arch/x86/mm/gup.c
31214@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
31215 addr = start;
31216 len = (unsigned long) nr_pages << PAGE_SHIFT;
31217 end = start + len;
31218- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31219+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31220 (void __user *)start, len)))
31221 return 0;
31222
31223@@ -331,6 +331,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
31224 goto slow_irqon;
31225 #endif
31226
31227+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31228+ (void __user *)start, len)))
31229+ return 0;
31230+
31231 /*
31232 * XXX: batch / limit 'nr', to avoid large irq off latency
31233 * needs some instrumenting to determine the common sizes used by
31234diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
31235index 4500142..53a363c 100644
31236--- a/arch/x86/mm/highmem_32.c
31237+++ b/arch/x86/mm/highmem_32.c
31238@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31239 idx = type + KM_TYPE_NR*smp_processor_id();
31240 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31241 BUG_ON(!pte_none(*(kmap_pte-idx)));
31242+
31243+ pax_open_kernel();
31244 set_pte(kmap_pte-idx, mk_pte(page, prot));
31245+ pax_close_kernel();
31246+
31247 arch_flush_lazy_mmu_mode();
31248
31249 return (void *)vaddr;
31250diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
31251index 9d980d8..6bbfacb 100644
31252--- a/arch/x86/mm/hugetlbpage.c
31253+++ b/arch/x86/mm/hugetlbpage.c
31254@@ -92,23 +92,30 @@ int pmd_huge_support(void)
31255 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
31256 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
31257 unsigned long addr, unsigned long len,
31258- unsigned long pgoff, unsigned long flags)
31259+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31260 {
31261 struct hstate *h = hstate_file(file);
31262 struct vm_unmapped_area_info info;
31263-
31264+
31265 info.flags = 0;
31266 info.length = len;
31267 info.low_limit = TASK_UNMAPPED_BASE;
31268+
31269+#ifdef CONFIG_PAX_RANDMMAP
31270+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31271+ info.low_limit += current->mm->delta_mmap;
31272+#endif
31273+
31274 info.high_limit = TASK_SIZE;
31275 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31276 info.align_offset = 0;
31277+ info.threadstack_offset = offset;
31278 return vm_unmapped_area(&info);
31279 }
31280
31281 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31282 unsigned long addr0, unsigned long len,
31283- unsigned long pgoff, unsigned long flags)
31284+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31285 {
31286 struct hstate *h = hstate_file(file);
31287 struct vm_unmapped_area_info info;
31288@@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31289 info.high_limit = current->mm->mmap_base;
31290 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31291 info.align_offset = 0;
31292+ info.threadstack_offset = offset;
31293 addr = vm_unmapped_area(&info);
31294
31295 /*
31296@@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31297 VM_BUG_ON(addr != -ENOMEM);
31298 info.flags = 0;
31299 info.low_limit = TASK_UNMAPPED_BASE;
31300+
31301+#ifdef CONFIG_PAX_RANDMMAP
31302+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31303+ info.low_limit += current->mm->delta_mmap;
31304+#endif
31305+
31306 info.high_limit = TASK_SIZE;
31307 addr = vm_unmapped_area(&info);
31308 }
31309@@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31310 struct hstate *h = hstate_file(file);
31311 struct mm_struct *mm = current->mm;
31312 struct vm_area_struct *vma;
31313+ unsigned long pax_task_size = TASK_SIZE;
31314+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
31315
31316 if (len & ~huge_page_mask(h))
31317 return -EINVAL;
31318- if (len > TASK_SIZE)
31319+
31320+#ifdef CONFIG_PAX_SEGMEXEC
31321+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31322+ pax_task_size = SEGMEXEC_TASK_SIZE;
31323+#endif
31324+
31325+ pax_task_size -= PAGE_SIZE;
31326+
31327+ if (len > pax_task_size)
31328 return -ENOMEM;
31329
31330 if (flags & MAP_FIXED) {
31331@@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31332 return addr;
31333 }
31334
31335+#ifdef CONFIG_PAX_RANDMMAP
31336+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31337+#endif
31338+
31339 if (addr) {
31340 addr = ALIGN(addr, huge_page_size(h));
31341 vma = find_vma(mm, addr);
31342- if (TASK_SIZE - len >= addr &&
31343- (!vma || addr + len <= vma->vm_start))
31344+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
31345 return addr;
31346 }
31347 if (mm->get_unmapped_area == arch_get_unmapped_area)
31348 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
31349- pgoff, flags);
31350+ pgoff, flags, offset);
31351 else
31352 return hugetlb_get_unmapped_area_topdown(file, addr, len,
31353- pgoff, flags);
31354+ pgoff, flags, offset);
31355 }
31356
31357 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
31358diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
31359index 04664cd..dae6e5d 100644
31360--- a/arch/x86/mm/init.c
31361+++ b/arch/x86/mm/init.c
31362@@ -4,6 +4,7 @@
31363 #include <linux/swap.h>
31364 #include <linux/memblock.h>
31365 #include <linux/bootmem.h> /* for max_low_pfn */
31366+#include <linux/tboot.h>
31367
31368 #include <asm/cacheflush.h>
31369 #include <asm/e820.h>
31370@@ -17,6 +18,8 @@
31371 #include <asm/proto.h>
31372 #include <asm/dma.h> /* for MAX_DMA_PFN */
31373 #include <asm/microcode.h>
31374+#include <asm/desc.h>
31375+#include <asm/bios_ebda.h>
31376
31377 #include "mm_internal.h"
31378
31379@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
31380 early_ioremap_page_table_range_init();
31381 #endif
31382
31383+#ifdef CONFIG_PAX_PER_CPU_PGD
31384+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
31385+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31386+ KERNEL_PGD_PTRS);
31387+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
31388+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31389+ KERNEL_PGD_PTRS);
31390+ load_cr3(get_cpu_pgd(0, kernel));
31391+#else
31392 load_cr3(swapper_pg_dir);
31393+#endif
31394+
31395 __flush_tlb_all();
31396
31397 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
31398@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
31399 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
31400 * mmio resources as well as potential bios/acpi data regions.
31401 */
31402+
31403+#ifdef CONFIG_GRKERNSEC_KMEM
31404+static unsigned int ebda_start __read_only;
31405+static unsigned int ebda_end __read_only;
31406+#endif
31407+
31408 int devmem_is_allowed(unsigned long pagenr)
31409 {
31410- if (pagenr < 256)
31411+#ifdef CONFIG_GRKERNSEC_KMEM
31412+ /* allow BDA */
31413+ if (!pagenr)
31414 return 1;
31415+ /* allow EBDA */
31416+ if (pagenr >= ebda_start && pagenr < ebda_end)
31417+ return 1;
31418+ /* if tboot is in use, allow access to its hardcoded serial log range */
31419+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
31420+ return 1;
31421+#else
31422+ if (!pagenr)
31423+ return 1;
31424+#ifdef CONFIG_VM86
31425+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
31426+ return 1;
31427+#endif
31428+#endif
31429+
31430+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
31431+ return 1;
31432+#ifdef CONFIG_GRKERNSEC_KMEM
31433+ /* throw out everything else below 1MB */
31434+ if (pagenr <= 256)
31435+ return 0;
31436+#endif
31437 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
31438 return 0;
31439 if (!page_is_ram(pagenr))
31440@@ -530,8 +574,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
31441 #endif
31442 }
31443
31444+#ifdef CONFIG_GRKERNSEC_KMEM
31445+static inline void gr_init_ebda(void)
31446+{
31447+ unsigned int ebda_addr;
31448+ unsigned int ebda_size = 0;
31449+
31450+ ebda_addr = get_bios_ebda();
31451+ if (ebda_addr) {
31452+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
31453+ ebda_size <<= 10;
31454+ }
31455+ if (ebda_addr && ebda_size) {
31456+ ebda_start = ebda_addr >> PAGE_SHIFT;
31457+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
31458+ } else {
31459+ ebda_start = 0x9f000 >> PAGE_SHIFT;
31460+ ebda_end = 0xa0000 >> PAGE_SHIFT;
31461+ }
31462+}
31463+#else
31464+static inline void gr_init_ebda(void) { }
31465+#endif
31466+
31467 void free_initmem(void)
31468 {
31469+#ifdef CONFIG_PAX_KERNEXEC
31470+#ifdef CONFIG_X86_32
31471+ /* PaX: limit KERNEL_CS to actual size */
31472+ unsigned long addr, limit;
31473+ struct desc_struct d;
31474+ int cpu;
31475+#else
31476+ pgd_t *pgd;
31477+ pud_t *pud;
31478+ pmd_t *pmd;
31479+ unsigned long addr, end;
31480+#endif
31481+#endif
31482+
31483+ gr_init_ebda();
31484+
31485+#ifdef CONFIG_PAX_KERNEXEC
31486+#ifdef CONFIG_X86_32
31487+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
31488+ limit = (limit - 1UL) >> PAGE_SHIFT;
31489+
31490+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
31491+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
31492+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
31493+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
31494+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
31495+ }
31496+
31497+ /* PaX: make KERNEL_CS read-only */
31498+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
31499+ if (!paravirt_enabled())
31500+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
31501+/*
31502+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
31503+ pgd = pgd_offset_k(addr);
31504+ pud = pud_offset(pgd, addr);
31505+ pmd = pmd_offset(pud, addr);
31506+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31507+ }
31508+*/
31509+#ifdef CONFIG_X86_PAE
31510+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
31511+/*
31512+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
31513+ pgd = pgd_offset_k(addr);
31514+ pud = pud_offset(pgd, addr);
31515+ pmd = pmd_offset(pud, addr);
31516+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31517+ }
31518+*/
31519+#endif
31520+
31521+#ifdef CONFIG_MODULES
31522+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
31523+#endif
31524+
31525+#else
31526+ /* PaX: make kernel code/rodata read-only, rest non-executable */
31527+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
31528+ pgd = pgd_offset_k(addr);
31529+ pud = pud_offset(pgd, addr);
31530+ pmd = pmd_offset(pud, addr);
31531+ if (!pmd_present(*pmd))
31532+ continue;
31533+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
31534+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31535+ else
31536+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31537+ }
31538+
31539+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
31540+ end = addr + KERNEL_IMAGE_SIZE;
31541+ for (; addr < end; addr += PMD_SIZE) {
31542+ pgd = pgd_offset_k(addr);
31543+ pud = pud_offset(pgd, addr);
31544+ pmd = pmd_offset(pud, addr);
31545+ if (!pmd_present(*pmd))
31546+ continue;
31547+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
31548+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31549+ }
31550+#endif
31551+
31552+ flush_tlb_all();
31553+#endif
31554+
31555 free_init_pages("unused kernel",
31556 (unsigned long)(&__init_begin),
31557 (unsigned long)(&__init_end));
31558diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
31559index 4287f1f..3b99c71 100644
31560--- a/arch/x86/mm/init_32.c
31561+++ b/arch/x86/mm/init_32.c
31562@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
31563 bool __read_mostly __vmalloc_start_set = false;
31564
31565 /*
31566- * Creates a middle page table and puts a pointer to it in the
31567- * given global directory entry. This only returns the gd entry
31568- * in non-PAE compilation mode, since the middle layer is folded.
31569- */
31570-static pmd_t * __init one_md_table_init(pgd_t *pgd)
31571-{
31572- pud_t *pud;
31573- pmd_t *pmd_table;
31574-
31575-#ifdef CONFIG_X86_PAE
31576- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
31577- pmd_table = (pmd_t *)alloc_low_page();
31578- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
31579- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
31580- pud = pud_offset(pgd, 0);
31581- BUG_ON(pmd_table != pmd_offset(pud, 0));
31582-
31583- return pmd_table;
31584- }
31585-#endif
31586- pud = pud_offset(pgd, 0);
31587- pmd_table = pmd_offset(pud, 0);
31588-
31589- return pmd_table;
31590-}
31591-
31592-/*
31593 * Create a page table and place a pointer to it in a middle page
31594 * directory entry:
31595 */
31596@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
31597 pte_t *page_table = (pte_t *)alloc_low_page();
31598
31599 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31600+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31601+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31602+#else
31603 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31604+#endif
31605 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31606 }
31607
31608 return pte_offset_kernel(pmd, 0);
31609 }
31610
31611+static pmd_t * __init one_md_table_init(pgd_t *pgd)
31612+{
31613+ pud_t *pud;
31614+ pmd_t *pmd_table;
31615+
31616+ pud = pud_offset(pgd, 0);
31617+ pmd_table = pmd_offset(pud, 0);
31618+
31619+ return pmd_table;
31620+}
31621+
31622 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31623 {
31624 int pgd_idx = pgd_index(vaddr);
31625@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31626 int pgd_idx, pmd_idx;
31627 unsigned long vaddr;
31628 pgd_t *pgd;
31629+ pud_t *pud;
31630 pmd_t *pmd;
31631 pte_t *pte = NULL;
31632 unsigned long count = page_table_range_init_count(start, end);
31633@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31634 pgd = pgd_base + pgd_idx;
31635
31636 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31637- pmd = one_md_table_init(pgd);
31638- pmd = pmd + pmd_index(vaddr);
31639+ pud = pud_offset(pgd, vaddr);
31640+ pmd = pmd_offset(pud, vaddr);
31641+
31642+#ifdef CONFIG_X86_PAE
31643+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31644+#endif
31645+
31646 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31647 pmd++, pmd_idx++) {
31648 pte = page_table_kmap_check(one_page_table_init(pmd),
31649@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31650 }
31651 }
31652
31653-static inline int is_kernel_text(unsigned long addr)
31654+static inline int is_kernel_text(unsigned long start, unsigned long end)
31655 {
31656- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31657- return 1;
31658- return 0;
31659+ if ((start > ktla_ktva((unsigned long)_etext) ||
31660+ end <= ktla_ktva((unsigned long)_stext)) &&
31661+ (start > ktla_ktva((unsigned long)_einittext) ||
31662+ end <= ktla_ktva((unsigned long)_sinittext)) &&
31663+
31664+#ifdef CONFIG_ACPI_SLEEP
31665+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31666+#endif
31667+
31668+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31669+ return 0;
31670+ return 1;
31671 }
31672
31673 /*
31674@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31675 unsigned long last_map_addr = end;
31676 unsigned long start_pfn, end_pfn;
31677 pgd_t *pgd_base = swapper_pg_dir;
31678- int pgd_idx, pmd_idx, pte_ofs;
31679+ unsigned int pgd_idx, pmd_idx, pte_ofs;
31680 unsigned long pfn;
31681 pgd_t *pgd;
31682+ pud_t *pud;
31683 pmd_t *pmd;
31684 pte_t *pte;
31685 unsigned pages_2m, pages_4k;
31686@@ -291,8 +295,13 @@ repeat:
31687 pfn = start_pfn;
31688 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31689 pgd = pgd_base + pgd_idx;
31690- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31691- pmd = one_md_table_init(pgd);
31692+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31693+ pud = pud_offset(pgd, 0);
31694+ pmd = pmd_offset(pud, 0);
31695+
31696+#ifdef CONFIG_X86_PAE
31697+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31698+#endif
31699
31700 if (pfn >= end_pfn)
31701 continue;
31702@@ -304,14 +313,13 @@ repeat:
31703 #endif
31704 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31705 pmd++, pmd_idx++) {
31706- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31707+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31708
31709 /*
31710 * Map with big pages if possible, otherwise
31711 * create normal page tables:
31712 */
31713 if (use_pse) {
31714- unsigned int addr2;
31715 pgprot_t prot = PAGE_KERNEL_LARGE;
31716 /*
31717 * first pass will use the same initial
31718@@ -322,11 +330,7 @@ repeat:
31719 _PAGE_PSE);
31720
31721 pfn &= PMD_MASK >> PAGE_SHIFT;
31722- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31723- PAGE_OFFSET + PAGE_SIZE-1;
31724-
31725- if (is_kernel_text(addr) ||
31726- is_kernel_text(addr2))
31727+ if (is_kernel_text(address, address + PMD_SIZE))
31728 prot = PAGE_KERNEL_LARGE_EXEC;
31729
31730 pages_2m++;
31731@@ -343,7 +347,7 @@ repeat:
31732 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31733 pte += pte_ofs;
31734 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31735- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31736+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31737 pgprot_t prot = PAGE_KERNEL;
31738 /*
31739 * first pass will use the same initial
31740@@ -351,7 +355,7 @@ repeat:
31741 */
31742 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31743
31744- if (is_kernel_text(addr))
31745+ if (is_kernel_text(address, address + PAGE_SIZE))
31746 prot = PAGE_KERNEL_EXEC;
31747
31748 pages_4k++;
31749@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31750
31751 pud = pud_offset(pgd, va);
31752 pmd = pmd_offset(pud, va);
31753- if (!pmd_present(*pmd))
31754+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31755 break;
31756
31757 /* should not be large page here */
31758@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31759
31760 static void __init pagetable_init(void)
31761 {
31762- pgd_t *pgd_base = swapper_pg_dir;
31763-
31764- permanent_kmaps_init(pgd_base);
31765+ permanent_kmaps_init(swapper_pg_dir);
31766 }
31767
31768-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31769+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31770 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31771
31772 /* user-defined highmem size */
31773@@ -787,10 +789,10 @@ void __init mem_init(void)
31774 ((unsigned long)&__init_end -
31775 (unsigned long)&__init_begin) >> 10,
31776
31777- (unsigned long)&_etext, (unsigned long)&_edata,
31778- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
31779+ (unsigned long)&_sdata, (unsigned long)&_edata,
31780+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
31781
31782- (unsigned long)&_text, (unsigned long)&_etext,
31783+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
31784 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
31785
31786 /*
31787@@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
31788 if (!kernel_set_to_readonly)
31789 return;
31790
31791+ start = ktla_ktva(start);
31792 pr_debug("Set kernel text: %lx - %lx for read write\n",
31793 start, start+size);
31794
31795@@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
31796 if (!kernel_set_to_readonly)
31797 return;
31798
31799+ start = ktla_ktva(start);
31800 pr_debug("Set kernel text: %lx - %lx for read only\n",
31801 start, start+size);
31802
31803@@ -922,6 +926,7 @@ void mark_rodata_ro(void)
31804 unsigned long start = PFN_ALIGN(_text);
31805 unsigned long size = PFN_ALIGN(_etext) - start;
31806
31807+ start = ktla_ktva(start);
31808 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
31809 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
31810 size >> 10);
31811diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
31812index 104d56a..62ba13f1 100644
31813--- a/arch/x86/mm/init_64.c
31814+++ b/arch/x86/mm/init_64.c
31815@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
31816 * around without checking the pgd every time.
31817 */
31818
31819-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
31820+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
31821 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31822
31823 int force_personality32;
31824@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31825
31826 for (address = start; address <= end; address += PGDIR_SIZE) {
31827 const pgd_t *pgd_ref = pgd_offset_k(address);
31828+
31829+#ifdef CONFIG_PAX_PER_CPU_PGD
31830+ unsigned long cpu;
31831+#else
31832 struct page *page;
31833+#endif
31834
31835 if (pgd_none(*pgd_ref))
31836 continue;
31837
31838 spin_lock(&pgd_lock);
31839+
31840+#ifdef CONFIG_PAX_PER_CPU_PGD
31841+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31842+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
31843+
31844+ if (pgd_none(*pgd))
31845+ set_pgd(pgd, *pgd_ref);
31846+ else
31847+ BUG_ON(pgd_page_vaddr(*pgd)
31848+ != pgd_page_vaddr(*pgd_ref));
31849+ pgd = pgd_offset_cpu(cpu, kernel, address);
31850+#else
31851 list_for_each_entry(page, &pgd_list, lru) {
31852 pgd_t *pgd;
31853 spinlock_t *pgt_lock;
31854@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31855 /* the pgt_lock only for Xen */
31856 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31857 spin_lock(pgt_lock);
31858+#endif
31859
31860 if (pgd_none(*pgd))
31861 set_pgd(pgd, *pgd_ref);
31862@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31863 BUG_ON(pgd_page_vaddr(*pgd)
31864 != pgd_page_vaddr(*pgd_ref));
31865
31866+#ifndef CONFIG_PAX_PER_CPU_PGD
31867 spin_unlock(pgt_lock);
31868+#endif
31869+
31870 }
31871 spin_unlock(&pgd_lock);
31872 }
31873@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
31874 {
31875 if (pgd_none(*pgd)) {
31876 pud_t *pud = (pud_t *)spp_getpage();
31877- pgd_populate(&init_mm, pgd, pud);
31878+ pgd_populate_kernel(&init_mm, pgd, pud);
31879 if (pud != pud_offset(pgd, 0))
31880 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
31881 pud, pud_offset(pgd, 0));
31882@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
31883 {
31884 if (pud_none(*pud)) {
31885 pmd_t *pmd = (pmd_t *) spp_getpage();
31886- pud_populate(&init_mm, pud, pmd);
31887+ pud_populate_kernel(&init_mm, pud, pmd);
31888 if (pmd != pmd_offset(pud, 0))
31889 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
31890 pmd, pmd_offset(pud, 0));
31891@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
31892 pmd = fill_pmd(pud, vaddr);
31893 pte = fill_pte(pmd, vaddr);
31894
31895+ pax_open_kernel();
31896 set_pte(pte, new_pte);
31897+ pax_close_kernel();
31898
31899 /*
31900 * It's enough to flush this one mapping.
31901@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
31902 pgd = pgd_offset_k((unsigned long)__va(phys));
31903 if (pgd_none(*pgd)) {
31904 pud = (pud_t *) spp_getpage();
31905- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
31906- _PAGE_USER));
31907+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
31908 }
31909 pud = pud_offset(pgd, (unsigned long)__va(phys));
31910 if (pud_none(*pud)) {
31911 pmd = (pmd_t *) spp_getpage();
31912- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
31913- _PAGE_USER));
31914+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
31915 }
31916 pmd = pmd_offset(pud, phys);
31917 BUG_ON(!pmd_none(*pmd));
31918@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
31919 prot);
31920
31921 spin_lock(&init_mm.page_table_lock);
31922- pud_populate(&init_mm, pud, pmd);
31923+ pud_populate_kernel(&init_mm, pud, pmd);
31924 spin_unlock(&init_mm.page_table_lock);
31925 }
31926 __flush_tlb_all();
31927@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
31928 page_size_mask);
31929
31930 spin_lock(&init_mm.page_table_lock);
31931- pgd_populate(&init_mm, pgd, pud);
31932+ pgd_populate_kernel(&init_mm, pgd, pud);
31933 spin_unlock(&init_mm.page_table_lock);
31934 pgd_changed = true;
31935 }
31936@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
31937 static struct vm_area_struct gate_vma = {
31938 .vm_start = VSYSCALL_START,
31939 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
31940- .vm_page_prot = PAGE_READONLY_EXEC,
31941- .vm_flags = VM_READ | VM_EXEC
31942+ .vm_page_prot = PAGE_READONLY,
31943+ .vm_flags = VM_READ
31944 };
31945
31946 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31947@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
31948
31949 const char *arch_vma_name(struct vm_area_struct *vma)
31950 {
31951- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31952+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31953 return "[vdso]";
31954 if (vma == &gate_vma)
31955 return "[vsyscall]";
31956diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
31957index 7b179b4..6bd17777 100644
31958--- a/arch/x86/mm/iomap_32.c
31959+++ b/arch/x86/mm/iomap_32.c
31960@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
31961 type = kmap_atomic_idx_push();
31962 idx = type + KM_TYPE_NR * smp_processor_id();
31963 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31964+
31965+ pax_open_kernel();
31966 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
31967+ pax_close_kernel();
31968+
31969 arch_flush_lazy_mmu_mode();
31970
31971 return (void *)vaddr;
31972diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
31973index 799580c..72f9fe0 100644
31974--- a/arch/x86/mm/ioremap.c
31975+++ b/arch/x86/mm/ioremap.c
31976@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
31977 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
31978 int is_ram = page_is_ram(pfn);
31979
31980- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
31981+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
31982 return NULL;
31983 WARN_ON_ONCE(is_ram);
31984 }
31985@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
31986 *
31987 * Caller must ensure there is only one unmapping for the same pointer.
31988 */
31989-void iounmap(volatile void __iomem *addr)
31990+void iounmap(const volatile void __iomem *addr)
31991 {
31992 struct vm_struct *p, *o;
31993
31994@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31995
31996 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
31997 if (page_is_ram(start >> PAGE_SHIFT))
31998+#ifdef CONFIG_HIGHMEM
31999+ if ((start >> PAGE_SHIFT) < max_low_pfn)
32000+#endif
32001 return __va(phys);
32002
32003 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
32004@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32005 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
32006 {
32007 if (page_is_ram(phys >> PAGE_SHIFT))
32008+#ifdef CONFIG_HIGHMEM
32009+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
32010+#endif
32011 return;
32012
32013 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
32014@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
32015 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32016
32017 static __initdata int after_paging_init;
32018-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
32019+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
32020
32021 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
32022 {
32023@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
32024 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
32025
32026 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
32027- memset(bm_pte, 0, sizeof(bm_pte));
32028- pmd_populate_kernel(&init_mm, pmd, bm_pte);
32029+ pmd_populate_user(&init_mm, pmd, bm_pte);
32030
32031 /*
32032 * The boot-ioremap range spans multiple pmds, for which
32033diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
32034index d87dd6d..bf3fa66 100644
32035--- a/arch/x86/mm/kmemcheck/kmemcheck.c
32036+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
32037@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
32038 * memory (e.g. tracked pages)? For now, we need this to avoid
32039 * invoking kmemcheck for PnP BIOS calls.
32040 */
32041- if (regs->flags & X86_VM_MASK)
32042+ if (v8086_mode(regs))
32043 return false;
32044- if (regs->cs != __KERNEL_CS)
32045+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
32046 return false;
32047
32048 pte = kmemcheck_pte_lookup(address);
32049diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
32050index 25e7e13..1964579 100644
32051--- a/arch/x86/mm/mmap.c
32052+++ b/arch/x86/mm/mmap.c
32053@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
32054 * Leave an at least ~128 MB hole with possible stack randomization.
32055 */
32056 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
32057-#define MAX_GAP (TASK_SIZE/6*5)
32058+#define MAX_GAP (pax_task_size/6*5)
32059
32060 static int mmap_is_legacy(void)
32061 {
32062@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
32063 return rnd << PAGE_SHIFT;
32064 }
32065
32066-static unsigned long mmap_base(void)
32067+static unsigned long mmap_base(struct mm_struct *mm)
32068 {
32069 unsigned long gap = rlimit(RLIMIT_STACK);
32070+ unsigned long pax_task_size = TASK_SIZE;
32071+
32072+#ifdef CONFIG_PAX_SEGMEXEC
32073+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32074+ pax_task_size = SEGMEXEC_TASK_SIZE;
32075+#endif
32076
32077 if (gap < MIN_GAP)
32078 gap = MIN_GAP;
32079 else if (gap > MAX_GAP)
32080 gap = MAX_GAP;
32081
32082- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
32083+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
32084 }
32085
32086 /*
32087 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
32088 * does, but not when emulating X86_32
32089 */
32090-static unsigned long mmap_legacy_base(void)
32091+static unsigned long mmap_legacy_base(struct mm_struct *mm)
32092 {
32093- if (mmap_is_ia32())
32094+ if (mmap_is_ia32()) {
32095+
32096+#ifdef CONFIG_PAX_SEGMEXEC
32097+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32098+ return SEGMEXEC_TASK_UNMAPPED_BASE;
32099+ else
32100+#endif
32101+
32102 return TASK_UNMAPPED_BASE;
32103- else
32104+ } else
32105 return TASK_UNMAPPED_BASE + mmap_rnd();
32106 }
32107
32108@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
32109 */
32110 void arch_pick_mmap_layout(struct mm_struct *mm)
32111 {
32112- mm->mmap_legacy_base = mmap_legacy_base();
32113- mm->mmap_base = mmap_base();
32114+ mm->mmap_legacy_base = mmap_legacy_base(mm);
32115+ mm->mmap_base = mmap_base(mm);
32116+
32117+#ifdef CONFIG_PAX_RANDMMAP
32118+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
32119+ mm->mmap_legacy_base += mm->delta_mmap;
32120+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
32121+ }
32122+#endif
32123
32124 if (mmap_is_legacy()) {
32125 mm->mmap_base = mm->mmap_legacy_base;
32126diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
32127index 0057a7a..95c7edd 100644
32128--- a/arch/x86/mm/mmio-mod.c
32129+++ b/arch/x86/mm/mmio-mod.c
32130@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
32131 break;
32132 default:
32133 {
32134- unsigned char *ip = (unsigned char *)instptr;
32135+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
32136 my_trace->opcode = MMIO_UNKNOWN_OP;
32137 my_trace->width = 0;
32138 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
32139@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
32140 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32141 void __iomem *addr)
32142 {
32143- static atomic_t next_id;
32144+ static atomic_unchecked_t next_id;
32145 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
32146 /* These are page-unaligned. */
32147 struct mmiotrace_map map = {
32148@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32149 .private = trace
32150 },
32151 .phys = offset,
32152- .id = atomic_inc_return(&next_id)
32153+ .id = atomic_inc_return_unchecked(&next_id)
32154 };
32155 map.map_id = trace->id;
32156
32157@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
32158 ioremap_trace_core(offset, size, addr);
32159 }
32160
32161-static void iounmap_trace_core(volatile void __iomem *addr)
32162+static void iounmap_trace_core(const volatile void __iomem *addr)
32163 {
32164 struct mmiotrace_map map = {
32165 .phys = 0,
32166@@ -328,7 +328,7 @@ not_enabled:
32167 }
32168 }
32169
32170-void mmiotrace_iounmap(volatile void __iomem *addr)
32171+void mmiotrace_iounmap(const volatile void __iomem *addr)
32172 {
32173 might_sleep();
32174 if (is_enabled()) /* recheck and proper locking in *_core() */
32175diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
32176index 8bf93ba..dbcd670 100644
32177--- a/arch/x86/mm/numa.c
32178+++ b/arch/x86/mm/numa.c
32179@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
32180 return true;
32181 }
32182
32183-static int __init numa_register_memblks(struct numa_meminfo *mi)
32184+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
32185 {
32186 unsigned long uninitialized_var(pfn_align);
32187 int i, nid;
32188diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
32189index d0b1773..4c3327c 100644
32190--- a/arch/x86/mm/pageattr-test.c
32191+++ b/arch/x86/mm/pageattr-test.c
32192@@ -36,7 +36,7 @@ enum {
32193
32194 static int pte_testbit(pte_t pte)
32195 {
32196- return pte_flags(pte) & _PAGE_UNUSED1;
32197+ return pte_flags(pte) & _PAGE_CPA_TEST;
32198 }
32199
32200 struct split_state {
32201diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
32202index bb32480..75f2f5e 100644
32203--- a/arch/x86/mm/pageattr.c
32204+++ b/arch/x86/mm/pageattr.c
32205@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32206 */
32207 #ifdef CONFIG_PCI_BIOS
32208 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
32209- pgprot_val(forbidden) |= _PAGE_NX;
32210+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32211 #endif
32212
32213 /*
32214@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32215 * Does not cover __inittext since that is gone later on. On
32216 * 64bit we do not enforce !NX on the low mapping
32217 */
32218- if (within(address, (unsigned long)_text, (unsigned long)_etext))
32219- pgprot_val(forbidden) |= _PAGE_NX;
32220+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
32221+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32222
32223+#ifdef CONFIG_DEBUG_RODATA
32224 /*
32225 * The .rodata section needs to be read-only. Using the pfn
32226 * catches all aliases.
32227@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32228 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
32229 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
32230 pgprot_val(forbidden) |= _PAGE_RW;
32231+#endif
32232
32233 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
32234 /*
32235@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32236 }
32237 #endif
32238
32239+#ifdef CONFIG_PAX_KERNEXEC
32240+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
32241+ pgprot_val(forbidden) |= _PAGE_RW;
32242+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32243+ }
32244+#endif
32245+
32246 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
32247
32248 return prot;
32249@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
32250 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
32251 {
32252 /* change init_mm */
32253+ pax_open_kernel();
32254 set_pte_atomic(kpte, pte);
32255+
32256 #ifdef CONFIG_X86_32
32257 if (!SHARED_KERNEL_PMD) {
32258+
32259+#ifdef CONFIG_PAX_PER_CPU_PGD
32260+ unsigned long cpu;
32261+#else
32262 struct page *page;
32263+#endif
32264
32265+#ifdef CONFIG_PAX_PER_CPU_PGD
32266+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32267+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
32268+#else
32269 list_for_each_entry(page, &pgd_list, lru) {
32270- pgd_t *pgd;
32271+ pgd_t *pgd = (pgd_t *)page_address(page);
32272+#endif
32273+
32274 pud_t *pud;
32275 pmd_t *pmd;
32276
32277- pgd = (pgd_t *)page_address(page) + pgd_index(address);
32278+ pgd += pgd_index(address);
32279 pud = pud_offset(pgd, address);
32280 pmd = pmd_offset(pud, address);
32281 set_pte_atomic((pte_t *)pmd, pte);
32282 }
32283 }
32284 #endif
32285+ pax_close_kernel();
32286 }
32287
32288 static int
32289diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
32290index 6574388..87e9bef 100644
32291--- a/arch/x86/mm/pat.c
32292+++ b/arch/x86/mm/pat.c
32293@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
32294
32295 if (!entry) {
32296 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
32297- current->comm, current->pid, start, end - 1);
32298+ current->comm, task_pid_nr(current), start, end - 1);
32299 return -EINVAL;
32300 }
32301
32302@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32303
32304 while (cursor < to) {
32305 if (!devmem_is_allowed(pfn)) {
32306- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
32307- current->comm, from, to - 1);
32308+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
32309+ current->comm, from, to - 1, cursor);
32310 return 0;
32311 }
32312 cursor += PAGE_SIZE;
32313@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
32314 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
32315 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
32316 "for [mem %#010Lx-%#010Lx]\n",
32317- current->comm, current->pid,
32318+ current->comm, task_pid_nr(current),
32319 cattr_name(flags),
32320 base, (unsigned long long)(base + size-1));
32321 return -EINVAL;
32322@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32323 flags = lookup_memtype(paddr);
32324 if (want_flags != flags) {
32325 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
32326- current->comm, current->pid,
32327+ current->comm, task_pid_nr(current),
32328 cattr_name(want_flags),
32329 (unsigned long long)paddr,
32330 (unsigned long long)(paddr + size - 1),
32331@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32332 free_memtype(paddr, paddr + size);
32333 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
32334 " for [mem %#010Lx-%#010Lx], got %s\n",
32335- current->comm, current->pid,
32336+ current->comm, task_pid_nr(current),
32337 cattr_name(want_flags),
32338 (unsigned long long)paddr,
32339 (unsigned long long)(paddr + size - 1),
32340diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
32341index 415f6c4..d319983 100644
32342--- a/arch/x86/mm/pat_rbtree.c
32343+++ b/arch/x86/mm/pat_rbtree.c
32344@@ -160,7 +160,7 @@ success:
32345
32346 failure:
32347 printk(KERN_INFO "%s:%d conflicting memory types "
32348- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
32349+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
32350 end, cattr_name(found_type), cattr_name(match->type));
32351 return -EBUSY;
32352 }
32353diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
32354index 9f0614d..92ae64a 100644
32355--- a/arch/x86/mm/pf_in.c
32356+++ b/arch/x86/mm/pf_in.c
32357@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
32358 int i;
32359 enum reason_type rv = OTHERS;
32360
32361- p = (unsigned char *)ins_addr;
32362+ p = (unsigned char *)ktla_ktva(ins_addr);
32363 p += skip_prefix(p, &prf);
32364 p += get_opcode(p, &opcode);
32365
32366@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
32367 struct prefix_bits prf;
32368 int i;
32369
32370- p = (unsigned char *)ins_addr;
32371+ p = (unsigned char *)ktla_ktva(ins_addr);
32372 p += skip_prefix(p, &prf);
32373 p += get_opcode(p, &opcode);
32374
32375@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
32376 struct prefix_bits prf;
32377 int i;
32378
32379- p = (unsigned char *)ins_addr;
32380+ p = (unsigned char *)ktla_ktva(ins_addr);
32381 p += skip_prefix(p, &prf);
32382 p += get_opcode(p, &opcode);
32383
32384@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
32385 struct prefix_bits prf;
32386 int i;
32387
32388- p = (unsigned char *)ins_addr;
32389+ p = (unsigned char *)ktla_ktva(ins_addr);
32390 p += skip_prefix(p, &prf);
32391 p += get_opcode(p, &opcode);
32392 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
32393@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
32394 struct prefix_bits prf;
32395 int i;
32396
32397- p = (unsigned char *)ins_addr;
32398+ p = (unsigned char *)ktla_ktva(ins_addr);
32399 p += skip_prefix(p, &prf);
32400 p += get_opcode(p, &opcode);
32401 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
32402diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
32403index dfa537a..fd45c64 100644
32404--- a/arch/x86/mm/pgtable.c
32405+++ b/arch/x86/mm/pgtable.c
32406@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
32407 list_del(&page->lru);
32408 }
32409
32410-#define UNSHARED_PTRS_PER_PGD \
32411- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32412+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32413+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
32414
32415+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
32416+{
32417+ unsigned int count = USER_PGD_PTRS;
32418
32419+ if (!pax_user_shadow_base)
32420+ return;
32421+
32422+ while (count--)
32423+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
32424+}
32425+#endif
32426+
32427+#ifdef CONFIG_PAX_PER_CPU_PGD
32428+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
32429+{
32430+ unsigned int count = USER_PGD_PTRS;
32431+
32432+ while (count--) {
32433+ pgd_t pgd;
32434+
32435+#ifdef CONFIG_X86_64
32436+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
32437+#else
32438+ pgd = *src++;
32439+#endif
32440+
32441+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32442+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
32443+#endif
32444+
32445+ *dst++ = pgd;
32446+ }
32447+
32448+}
32449+#endif
32450+
32451+#ifdef CONFIG_X86_64
32452+#define pxd_t pud_t
32453+#define pyd_t pgd_t
32454+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
32455+#define pxd_free(mm, pud) pud_free((mm), (pud))
32456+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
32457+#define pyd_offset(mm, address) pgd_offset((mm), (address))
32458+#define PYD_SIZE PGDIR_SIZE
32459+#else
32460+#define pxd_t pmd_t
32461+#define pyd_t pud_t
32462+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
32463+#define pxd_free(mm, pud) pmd_free((mm), (pud))
32464+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
32465+#define pyd_offset(mm, address) pud_offset((mm), (address))
32466+#define PYD_SIZE PUD_SIZE
32467+#endif
32468+
32469+#ifdef CONFIG_PAX_PER_CPU_PGD
32470+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
32471+static inline void pgd_dtor(pgd_t *pgd) {}
32472+#else
32473 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
32474 {
32475 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
32476@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
32477 pgd_list_del(pgd);
32478 spin_unlock(&pgd_lock);
32479 }
32480+#endif
32481
32482 /*
32483 * List of all pgd's needed for non-PAE so it can invalidate entries
32484@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
32485 * -- nyc
32486 */
32487
32488-#ifdef CONFIG_X86_PAE
32489+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
32490 /*
32491 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
32492 * updating the top-level pagetable entries to guarantee the
32493@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
32494 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
32495 * and initialize the kernel pmds here.
32496 */
32497-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
32498+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32499
32500 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32501 {
32502@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32503 */
32504 flush_tlb_mm(mm);
32505 }
32506+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
32507+#define PREALLOCATED_PXDS USER_PGD_PTRS
32508 #else /* !CONFIG_X86_PAE */
32509
32510 /* No need to prepopulate any pagetable entries in non-PAE modes. */
32511-#define PREALLOCATED_PMDS 0
32512+#define PREALLOCATED_PXDS 0
32513
32514 #endif /* CONFIG_X86_PAE */
32515
32516-static void free_pmds(pmd_t *pmds[])
32517+static void free_pxds(pxd_t *pxds[])
32518 {
32519 int i;
32520
32521- for(i = 0; i < PREALLOCATED_PMDS; i++)
32522- if (pmds[i])
32523- free_page((unsigned long)pmds[i]);
32524+ for(i = 0; i < PREALLOCATED_PXDS; i++)
32525+ if (pxds[i])
32526+ free_page((unsigned long)pxds[i]);
32527 }
32528
32529-static int preallocate_pmds(pmd_t *pmds[])
32530+static int preallocate_pxds(pxd_t *pxds[])
32531 {
32532 int i;
32533 bool failed = false;
32534
32535- for(i = 0; i < PREALLOCATED_PMDS; i++) {
32536- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
32537- if (pmd == NULL)
32538+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
32539+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
32540+ if (pxd == NULL)
32541 failed = true;
32542- pmds[i] = pmd;
32543+ pxds[i] = pxd;
32544 }
32545
32546 if (failed) {
32547- free_pmds(pmds);
32548+ free_pxds(pxds);
32549 return -ENOMEM;
32550 }
32551
32552@@ -219,49 +279,52 @@ static int preallocate_pmds(pmd_t *pmds[])
32553 * preallocate which never got a corresponding vma will need to be
32554 * freed manually.
32555 */
32556-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
32557+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
32558 {
32559 int i;
32560
32561- for(i = 0; i < PREALLOCATED_PMDS; i++) {
32562+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
32563 pgd_t pgd = pgdp[i];
32564
32565 if (pgd_val(pgd) != 0) {
32566- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
32567+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
32568
32569- pgdp[i] = native_make_pgd(0);
32570+ set_pgd(pgdp + i, native_make_pgd(0));
32571
32572- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
32573- pmd_free(mm, pmd);
32574+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
32575+ pxd_free(mm, pxd);
32576 }
32577 }
32578 }
32579
32580-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
32581+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
32582 {
32583- pud_t *pud;
32584+ pyd_t *pyd;
32585 int i;
32586
32587- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32588+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32589 return;
32590
32591- pud = pud_offset(pgd, 0);
32592-
32593- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
32594- pmd_t *pmd = pmds[i];
32595+#ifdef CONFIG_X86_64
32596+ pyd = pyd_offset(mm, 0L);
32597+#else
32598+ pyd = pyd_offset(pgd, 0L);
32599+#endif
32600
32601+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
32602+ pxd_t *pxd = pxds[i];
32603 if (i >= KERNEL_PGD_BOUNDARY)
32604- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32605- sizeof(pmd_t) * PTRS_PER_PMD);
32606+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32607+ sizeof(pxd_t) * PTRS_PER_PMD);
32608
32609- pud_populate(mm, pud, pmd);
32610+ pyd_populate(mm, pyd, pxd);
32611 }
32612 }
32613
32614 pgd_t *pgd_alloc(struct mm_struct *mm)
32615 {
32616 pgd_t *pgd;
32617- pmd_t *pmds[PREALLOCATED_PMDS];
32618+ pxd_t *pxds[PREALLOCATED_PXDS];
32619
32620 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32621
32622@@ -270,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32623
32624 mm->pgd = pgd;
32625
32626- if (preallocate_pmds(pmds) != 0)
32627+ if (preallocate_pxds(pxds) != 0)
32628 goto out_free_pgd;
32629
32630 if (paravirt_pgd_alloc(mm) != 0)
32631- goto out_free_pmds;
32632+ goto out_free_pxds;
32633
32634 /*
32635 * Make sure that pre-populating the pmds is atomic with
32636@@ -284,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32637 spin_lock(&pgd_lock);
32638
32639 pgd_ctor(mm, pgd);
32640- pgd_prepopulate_pmd(mm, pgd, pmds);
32641+ pgd_prepopulate_pxd(mm, pgd, pxds);
32642
32643 spin_unlock(&pgd_lock);
32644
32645 return pgd;
32646
32647-out_free_pmds:
32648- free_pmds(pmds);
32649+out_free_pxds:
32650+ free_pxds(pxds);
32651 out_free_pgd:
32652 free_page((unsigned long)pgd);
32653 out:
32654@@ -300,7 +363,7 @@ out:
32655
32656 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32657 {
32658- pgd_mop_up_pmds(mm, pgd);
32659+ pgd_mop_up_pxds(mm, pgd);
32660 pgd_dtor(pgd);
32661 paravirt_pgd_free(mm, pgd);
32662 free_page((unsigned long)pgd);
32663diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32664index a69bcb8..19068ab 100644
32665--- a/arch/x86/mm/pgtable_32.c
32666+++ b/arch/x86/mm/pgtable_32.c
32667@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32668 return;
32669 }
32670 pte = pte_offset_kernel(pmd, vaddr);
32671+
32672+ pax_open_kernel();
32673 if (pte_val(pteval))
32674 set_pte_at(&init_mm, vaddr, pte, pteval);
32675 else
32676 pte_clear(&init_mm, vaddr, pte);
32677+ pax_close_kernel();
32678
32679 /*
32680 * It's enough to flush this one mapping.
32681diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32682index e666cbb..61788c45 100644
32683--- a/arch/x86/mm/physaddr.c
32684+++ b/arch/x86/mm/physaddr.c
32685@@ -10,7 +10,7 @@
32686 #ifdef CONFIG_X86_64
32687
32688 #ifdef CONFIG_DEBUG_VIRTUAL
32689-unsigned long __phys_addr(unsigned long x)
32690+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32691 {
32692 unsigned long y = x - __START_KERNEL_map;
32693
32694@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32695 #else
32696
32697 #ifdef CONFIG_DEBUG_VIRTUAL
32698-unsigned long __phys_addr(unsigned long x)
32699+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32700 {
32701 unsigned long phys_addr = x - PAGE_OFFSET;
32702 /* VMALLOC_* aren't constants */
32703diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32704index 90555bf..f5f1828 100644
32705--- a/arch/x86/mm/setup_nx.c
32706+++ b/arch/x86/mm/setup_nx.c
32707@@ -5,8 +5,10 @@
32708 #include <asm/pgtable.h>
32709 #include <asm/proto.h>
32710
32711+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32712 static int disable_nx;
32713
32714+#ifndef CONFIG_PAX_PAGEEXEC
32715 /*
32716 * noexec = on|off
32717 *
32718@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32719 return 0;
32720 }
32721 early_param("noexec", noexec_setup);
32722+#endif
32723+
32724+#endif
32725
32726 void x86_configure_nx(void)
32727 {
32728+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32729 if (cpu_has_nx && !disable_nx)
32730 __supported_pte_mask |= _PAGE_NX;
32731 else
32732+#endif
32733 __supported_pte_mask &= ~_PAGE_NX;
32734 }
32735
32736diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32737index ae699b3..f1b2ad2 100644
32738--- a/arch/x86/mm/tlb.c
32739+++ b/arch/x86/mm/tlb.c
32740@@ -48,7 +48,11 @@ void leave_mm(int cpu)
32741 BUG();
32742 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32743 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32744+
32745+#ifndef CONFIG_PAX_PER_CPU_PGD
32746 load_cr3(swapper_pg_dir);
32747+#endif
32748+
32749 }
32750 }
32751 EXPORT_SYMBOL_GPL(leave_mm);
32752diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32753new file mode 100644
32754index 0000000..dace51c
32755--- /dev/null
32756+++ b/arch/x86/mm/uderef_64.c
32757@@ -0,0 +1,37 @@
32758+#include <linux/mm.h>
32759+#include <asm/pgtable.h>
32760+#include <asm/uaccess.h>
32761+
32762+#ifdef CONFIG_PAX_MEMORY_UDEREF
32763+/* PaX: due to the special call convention these functions must
32764+ * - remain leaf functions under all configurations,
32765+ * - never be called directly, only dereferenced from the wrappers.
32766+ */
32767+void __pax_open_userland(void)
32768+{
32769+ unsigned int cpu;
32770+
32771+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32772+ return;
32773+
32774+ cpu = raw_get_cpu();
32775+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
32776+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
32777+ raw_put_cpu_no_resched();
32778+}
32779+EXPORT_SYMBOL(__pax_open_userland);
32780+
32781+void __pax_close_userland(void)
32782+{
32783+ unsigned int cpu;
32784+
32785+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32786+ return;
32787+
32788+ cpu = raw_get_cpu();
32789+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
32790+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
32791+ raw_put_cpu_no_resched();
32792+}
32793+EXPORT_SYMBOL(__pax_close_userland);
32794+#endif
32795diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
32796index 877b9a1..a8ecf42 100644
32797--- a/arch/x86/net/bpf_jit.S
32798+++ b/arch/x86/net/bpf_jit.S
32799@@ -9,6 +9,7 @@
32800 */
32801 #include <linux/linkage.h>
32802 #include <asm/dwarf2.h>
32803+#include <asm/alternative-asm.h>
32804
32805 /*
32806 * Calling convention :
32807@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
32808 jle bpf_slow_path_word
32809 mov (SKBDATA,%rsi),%eax
32810 bswap %eax /* ntohl() */
32811+ pax_force_retaddr
32812 ret
32813
32814 sk_load_half:
32815@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
32816 jle bpf_slow_path_half
32817 movzwl (SKBDATA,%rsi),%eax
32818 rol $8,%ax # ntohs()
32819+ pax_force_retaddr
32820 ret
32821
32822 sk_load_byte:
32823@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
32824 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
32825 jle bpf_slow_path_byte
32826 movzbl (SKBDATA,%rsi),%eax
32827+ pax_force_retaddr
32828 ret
32829
32830 /**
32831@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
32832 movzbl (SKBDATA,%rsi),%ebx
32833 and $15,%bl
32834 shl $2,%bl
32835+ pax_force_retaddr
32836 ret
32837
32838 /* rsi contains offset and can be scratched */
32839@@ -109,6 +114,7 @@ bpf_slow_path_word:
32840 js bpf_error
32841 mov -12(%rbp),%eax
32842 bswap %eax
32843+ pax_force_retaddr
32844 ret
32845
32846 bpf_slow_path_half:
32847@@ -117,12 +123,14 @@ bpf_slow_path_half:
32848 mov -12(%rbp),%ax
32849 rol $8,%ax
32850 movzwl %ax,%eax
32851+ pax_force_retaddr
32852 ret
32853
32854 bpf_slow_path_byte:
32855 bpf_slow_path_common(1)
32856 js bpf_error
32857 movzbl -12(%rbp),%eax
32858+ pax_force_retaddr
32859 ret
32860
32861 bpf_slow_path_byte_msh:
32862@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
32863 and $15,%al
32864 shl $2,%al
32865 xchg %eax,%ebx
32866+ pax_force_retaddr
32867 ret
32868
32869 #define sk_negative_common(SIZE) \
32870@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
32871 sk_negative_common(4)
32872 mov (%rax), %eax
32873 bswap %eax
32874+ pax_force_retaddr
32875 ret
32876
32877 bpf_slow_path_half_neg:
32878@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
32879 mov (%rax),%ax
32880 rol $8,%ax
32881 movzwl %ax,%eax
32882+ pax_force_retaddr
32883 ret
32884
32885 bpf_slow_path_byte_neg:
32886@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
32887 .globl sk_load_byte_negative_offset
32888 sk_negative_common(1)
32889 movzbl (%rax), %eax
32890+ pax_force_retaddr
32891 ret
32892
32893 bpf_slow_path_byte_msh_neg:
32894@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
32895 and $15,%al
32896 shl $2,%al
32897 xchg %eax,%ebx
32898+ pax_force_retaddr
32899 ret
32900
32901 bpf_error:
32902@@ -197,4 +210,5 @@ bpf_error:
32903 xor %eax,%eax
32904 mov -8(%rbp),%rbx
32905 leaveq
32906+ pax_force_retaddr
32907 ret
32908diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
32909index 26328e8..5f96c25 100644
32910--- a/arch/x86/net/bpf_jit_comp.c
32911+++ b/arch/x86/net/bpf_jit_comp.c
32912@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32913 return ptr + len;
32914 }
32915
32916+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32917+#define MAX_INSTR_CODE_SIZE 96
32918+#else
32919+#define MAX_INSTR_CODE_SIZE 64
32920+#endif
32921+
32922 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
32923
32924 #define EMIT1(b1) EMIT(b1, 1)
32925 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
32926 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
32927 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
32928+
32929+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32930+/* original constant will appear in ecx */
32931+#define DILUTE_CONST_SEQUENCE(_off, _key) \
32932+do { \
32933+ /* mov ecx, randkey */ \
32934+ EMIT1(0xb9); \
32935+ EMIT(_key, 4); \
32936+ /* xor ecx, randkey ^ off */ \
32937+ EMIT2(0x81, 0xf1); \
32938+ EMIT((_key) ^ (_off), 4); \
32939+} while (0)
32940+
32941+#define EMIT1_off32(b1, _off) \
32942+do { \
32943+ switch (b1) { \
32944+ case 0x05: /* add eax, imm32 */ \
32945+ case 0x2d: /* sub eax, imm32 */ \
32946+ case 0x25: /* and eax, imm32 */ \
32947+ case 0x0d: /* or eax, imm32 */ \
32948+ case 0xb8: /* mov eax, imm32 */ \
32949+ case 0x35: /* xor eax, imm32 */ \
32950+ case 0x3d: /* cmp eax, imm32 */ \
32951+ case 0xa9: /* test eax, imm32 */ \
32952+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32953+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
32954+ break; \
32955+ case 0xbb: /* mov ebx, imm32 */ \
32956+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32957+ /* mov ebx, ecx */ \
32958+ EMIT2(0x89, 0xcb); \
32959+ break; \
32960+ case 0xbe: /* mov esi, imm32 */ \
32961+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32962+ /* mov esi, ecx */ \
32963+ EMIT2(0x89, 0xce); \
32964+ break; \
32965+ case 0xe8: /* call rel imm32, always to known funcs */ \
32966+ EMIT1(b1); \
32967+ EMIT(_off, 4); \
32968+ break; \
32969+ case 0xe9: /* jmp rel imm32 */ \
32970+ EMIT1(b1); \
32971+ EMIT(_off, 4); \
32972+ /* prevent fall-through, we're not called if off = 0 */ \
32973+ EMIT(0xcccccccc, 4); \
32974+ EMIT(0xcccccccc, 4); \
32975+ break; \
32976+ default: \
32977+ BUILD_BUG(); \
32978+ } \
32979+} while (0)
32980+
32981+#define EMIT2_off32(b1, b2, _off) \
32982+do { \
32983+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
32984+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
32985+ EMIT(randkey, 4); \
32986+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
32987+ EMIT((_off) - randkey, 4); \
32988+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
32989+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32990+ /* imul eax, ecx */ \
32991+ EMIT3(0x0f, 0xaf, 0xc1); \
32992+ } else { \
32993+ BUILD_BUG(); \
32994+ } \
32995+} while (0)
32996+#else
32997 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
32998+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
32999+#endif
33000
33001 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
33002 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
33003@@ -91,6 +168,24 @@ do { \
33004 #define X86_JBE 0x76
33005 #define X86_JA 0x77
33006
33007+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33008+#define APPEND_FLOW_VERIFY() \
33009+do { \
33010+ /* mov ecx, randkey */ \
33011+ EMIT1(0xb9); \
33012+ EMIT(randkey, 4); \
33013+ /* cmp ecx, randkey */ \
33014+ EMIT2(0x81, 0xf9); \
33015+ EMIT(randkey, 4); \
33016+ /* jz after 8 int 3s */ \
33017+ EMIT2(0x74, 0x08); \
33018+ EMIT(0xcccccccc, 4); \
33019+ EMIT(0xcccccccc, 4); \
33020+} while (0)
33021+#else
33022+#define APPEND_FLOW_VERIFY() do { } while (0)
33023+#endif
33024+
33025 #define EMIT_COND_JMP(op, offset) \
33026 do { \
33027 if (is_near(offset)) \
33028@@ -98,6 +193,7 @@ do { \
33029 else { \
33030 EMIT2(0x0f, op + 0x10); \
33031 EMIT(offset, 4); /* jxx .+off32 */ \
33032+ APPEND_FLOW_VERIFY(); \
33033 } \
33034 } while (0)
33035
33036@@ -145,55 +241,54 @@ static int pkt_type_offset(void)
33037 return -1;
33038 }
33039
33040-struct bpf_binary_header {
33041- unsigned int pages;
33042- /* Note : for security reasons, bpf code will follow a randomly
33043- * sized amount of int3 instructions
33044- */
33045- u8 image[];
33046-};
33047-
33048-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
33049+/* Note : for security reasons, bpf code will follow a randomly
33050+ * sized amount of int3 instructions
33051+ */
33052+static u8 *bpf_alloc_binary(unsigned int proglen,
33053 u8 **image_ptr)
33054 {
33055 unsigned int sz, hole;
33056- struct bpf_binary_header *header;
33057+ u8 *header;
33058
33059 /* Most of BPF filters are really small,
33060 * but if some of them fill a page, allow at least
33061 * 128 extra bytes to insert a random section of int3
33062 */
33063- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
33064- header = module_alloc(sz);
33065+ sz = round_up(proglen + 128, PAGE_SIZE);
33066+ header = module_alloc_exec(sz);
33067 if (!header)
33068 return NULL;
33069
33070+ pax_open_kernel();
33071 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
33072+ pax_close_kernel();
33073
33074- header->pages = sz / PAGE_SIZE;
33075- hole = sz - (proglen + sizeof(*header));
33076+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
33077
33078 /* insert a random number of int3 instructions before BPF code */
33079- *image_ptr = &header->image[prandom_u32() % hole];
33080+ *image_ptr = &header[prandom_u32() % hole];
33081 return header;
33082 }
33083
33084 void bpf_jit_compile(struct sk_filter *fp)
33085 {
33086- u8 temp[64];
33087+ u8 temp[MAX_INSTR_CODE_SIZE];
33088 u8 *prog;
33089 unsigned int proglen, oldproglen = 0;
33090 int ilen, i;
33091 int t_offset, f_offset;
33092 u8 t_op, f_op, seen = 0, pass;
33093 u8 *image = NULL;
33094- struct bpf_binary_header *header = NULL;
33095+ u8 *header = NULL;
33096 u8 *func;
33097 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
33098 unsigned int cleanup_addr; /* epilogue code offset */
33099 unsigned int *addrs;
33100 const struct sock_filter *filter = fp->insns;
33101 int flen = fp->len;
33102+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33103+ unsigned int randkey;
33104+#endif
33105
33106 if (!bpf_jit_enable)
33107 return;
33108@@ -202,11 +297,15 @@ void bpf_jit_compile(struct sk_filter *fp)
33109 if (addrs == NULL)
33110 return;
33111
33112+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33113+ randkey = get_random_int();
33114+#endif
33115+
33116 /* Before first pass, make a rough estimation of addrs[]
33117- * each bpf instruction is translated to less than 64 bytes
33118+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
33119 */
33120 for (proglen = 0, i = 0; i < flen; i++) {
33121- proglen += 64;
33122+ proglen += MAX_INSTR_CODE_SIZE;
33123 addrs[i] = proglen;
33124 }
33125 cleanup_addr = proglen; /* epilogue address */
33126@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
33127 case BPF_S_ALU_MUL_K: /* A *= K */
33128 if (is_imm8(K))
33129 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
33130- else {
33131- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
33132- EMIT(K, 4);
33133- }
33134+ else
33135+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
33136 break;
33137 case BPF_S_ALU_DIV_X: /* A /= X; */
33138 seen |= SEEN_XREG;
33139@@ -360,13 +457,23 @@ void bpf_jit_compile(struct sk_filter *fp)
33140 break;
33141 case BPF_S_ALU_MOD_K: /* A %= K; */
33142 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33143+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33144+ DILUTE_CONST_SEQUENCE(K, randkey);
33145+#else
33146 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33147+#endif
33148 EMIT2(0xf7, 0xf1); /* div %ecx */
33149 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
33150 break;
33151 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
33152+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33153+ DILUTE_CONST_SEQUENCE(K, randkey);
33154+ // imul rax, rcx
33155+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
33156+#else
33157 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
33158 EMIT(K, 4);
33159+#endif
33160 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
33161 break;
33162 case BPF_S_ALU_AND_X:
33163@@ -637,8 +744,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
33164 if (is_imm8(K)) {
33165 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
33166 } else {
33167- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
33168- EMIT(K, 4);
33169+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
33170 }
33171 } else {
33172 EMIT2(0x89,0xde); /* mov %ebx,%esi */
33173@@ -728,10 +834,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33174 if (unlikely(proglen + ilen > oldproglen)) {
33175 pr_err("bpb_jit_compile fatal error\n");
33176 kfree(addrs);
33177- module_free(NULL, header);
33178+ module_free_exec(NULL, image);
33179 return;
33180 }
33181+ pax_open_kernel();
33182 memcpy(image + proglen, temp, ilen);
33183+ pax_close_kernel();
33184 }
33185 proglen += ilen;
33186 addrs[i] = proglen;
33187@@ -764,7 +872,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33188
33189 if (image) {
33190 bpf_flush_icache(header, image + proglen);
33191- set_memory_ro((unsigned long)header, header->pages);
33192 fp->bpf_func = (void *)image;
33193 }
33194 out:
33195@@ -776,10 +883,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
33196 {
33197 struct sk_filter *fp = container_of(work, struct sk_filter, work);
33198 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
33199- struct bpf_binary_header *header = (void *)addr;
33200
33201- set_memory_rw(addr, header->pages);
33202- module_free(NULL, header);
33203+ set_memory_rw(addr, 1);
33204+ module_free_exec(NULL, (void *)addr);
33205 kfree(fp);
33206 }
33207
33208diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
33209index d6aa6e8..266395a 100644
33210--- a/arch/x86/oprofile/backtrace.c
33211+++ b/arch/x86/oprofile/backtrace.c
33212@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
33213 struct stack_frame_ia32 *fp;
33214 unsigned long bytes;
33215
33216- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33217+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33218 if (bytes != sizeof(bufhead))
33219 return NULL;
33220
33221- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
33222+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
33223
33224 oprofile_add_trace(bufhead[0].return_address);
33225
33226@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
33227 struct stack_frame bufhead[2];
33228 unsigned long bytes;
33229
33230- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33231+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33232 if (bytes != sizeof(bufhead))
33233 return NULL;
33234
33235@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
33236 {
33237 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
33238
33239- if (!user_mode_vm(regs)) {
33240+ if (!user_mode(regs)) {
33241 unsigned long stack = kernel_stack_pointer(regs);
33242 if (depth)
33243 dump_trace(NULL, regs, (unsigned long *)stack, 0,
33244diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
33245index 6890d84..1dad1f1 100644
33246--- a/arch/x86/oprofile/nmi_int.c
33247+++ b/arch/x86/oprofile/nmi_int.c
33248@@ -23,6 +23,7 @@
33249 #include <asm/nmi.h>
33250 #include <asm/msr.h>
33251 #include <asm/apic.h>
33252+#include <asm/pgtable.h>
33253
33254 #include "op_counter.h"
33255 #include "op_x86_model.h"
33256@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
33257 if (ret)
33258 return ret;
33259
33260- if (!model->num_virt_counters)
33261- model->num_virt_counters = model->num_counters;
33262+ if (!model->num_virt_counters) {
33263+ pax_open_kernel();
33264+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
33265+ pax_close_kernel();
33266+ }
33267
33268 mux_init(ops);
33269
33270diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
33271index 50d86c0..7985318 100644
33272--- a/arch/x86/oprofile/op_model_amd.c
33273+++ b/arch/x86/oprofile/op_model_amd.c
33274@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
33275 num_counters = AMD64_NUM_COUNTERS;
33276 }
33277
33278- op_amd_spec.num_counters = num_counters;
33279- op_amd_spec.num_controls = num_counters;
33280- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33281+ pax_open_kernel();
33282+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
33283+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
33284+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33285+ pax_close_kernel();
33286
33287 return 0;
33288 }
33289diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
33290index d90528e..0127e2b 100644
33291--- a/arch/x86/oprofile/op_model_ppro.c
33292+++ b/arch/x86/oprofile/op_model_ppro.c
33293@@ -19,6 +19,7 @@
33294 #include <asm/msr.h>
33295 #include <asm/apic.h>
33296 #include <asm/nmi.h>
33297+#include <asm/pgtable.h>
33298
33299 #include "op_x86_model.h"
33300 #include "op_counter.h"
33301@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
33302
33303 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
33304
33305- op_arch_perfmon_spec.num_counters = num_counters;
33306- op_arch_perfmon_spec.num_controls = num_counters;
33307+ pax_open_kernel();
33308+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
33309+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
33310+ pax_close_kernel();
33311 }
33312
33313 static int arch_perfmon_init(struct oprofile_operations *ignore)
33314diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
33315index 71e8a67..6a313bb 100644
33316--- a/arch/x86/oprofile/op_x86_model.h
33317+++ b/arch/x86/oprofile/op_x86_model.h
33318@@ -52,7 +52,7 @@ struct op_x86_model_spec {
33319 void (*switch_ctrl)(struct op_x86_model_spec const *model,
33320 struct op_msrs const * const msrs);
33321 #endif
33322-};
33323+} __do_const;
33324
33325 struct op_counter_config;
33326
33327diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
33328index 372e9b8..e775a6c 100644
33329--- a/arch/x86/pci/irq.c
33330+++ b/arch/x86/pci/irq.c
33331@@ -50,7 +50,7 @@ struct irq_router {
33332 struct irq_router_handler {
33333 u16 vendor;
33334 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
33335-};
33336+} __do_const;
33337
33338 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
33339 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
33340@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
33341 return 0;
33342 }
33343
33344-static __initdata struct irq_router_handler pirq_routers[] = {
33345+static __initconst const struct irq_router_handler pirq_routers[] = {
33346 { PCI_VENDOR_ID_INTEL, intel_router_probe },
33347 { PCI_VENDOR_ID_AL, ali_router_probe },
33348 { PCI_VENDOR_ID_ITE, ite_router_probe },
33349@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
33350 static void __init pirq_find_router(struct irq_router *r)
33351 {
33352 struct irq_routing_table *rt = pirq_table;
33353- struct irq_router_handler *h;
33354+ const struct irq_router_handler *h;
33355
33356 #ifdef CONFIG_PCI_BIOS
33357 if (!rt->signature) {
33358@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
33359 return 0;
33360 }
33361
33362-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
33363+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
33364 {
33365 .callback = fix_broken_hp_bios_irq9,
33366 .ident = "HP Pavilion N5400 Series Laptop",
33367diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
33368index 903fded..94b0d88 100644
33369--- a/arch/x86/pci/mrst.c
33370+++ b/arch/x86/pci/mrst.c
33371@@ -241,7 +241,9 @@ int __init pci_mrst_init(void)
33372 pr_info("Intel MID platform detected, using MID PCI ops\n");
33373 pci_mmcfg_late_init();
33374 pcibios_enable_irq = mrst_pci_irq_enable;
33375- pci_root_ops = pci_mrst_ops;
33376+ pax_open_kernel();
33377+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
33378+ pax_close_kernel();
33379 pci_soc_mode = 1;
33380 /* Continue with standard init */
33381 return 1;
33382diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
33383index c77b24a..c979855 100644
33384--- a/arch/x86/pci/pcbios.c
33385+++ b/arch/x86/pci/pcbios.c
33386@@ -79,7 +79,7 @@ union bios32 {
33387 static struct {
33388 unsigned long address;
33389 unsigned short segment;
33390-} bios32_indirect = { 0, __KERNEL_CS };
33391+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
33392
33393 /*
33394 * Returns the entry point for the given service, NULL on error
33395@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
33396 unsigned long length; /* %ecx */
33397 unsigned long entry; /* %edx */
33398 unsigned long flags;
33399+ struct desc_struct d, *gdt;
33400
33401 local_irq_save(flags);
33402- __asm__("lcall *(%%edi); cld"
33403+
33404+ gdt = get_cpu_gdt_table(smp_processor_id());
33405+
33406+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
33407+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33408+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
33409+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33410+
33411+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
33412 : "=a" (return_code),
33413 "=b" (address),
33414 "=c" (length),
33415 "=d" (entry)
33416 : "0" (service),
33417 "1" (0),
33418- "D" (&bios32_indirect));
33419+ "D" (&bios32_indirect),
33420+ "r"(__PCIBIOS_DS)
33421+ : "memory");
33422+
33423+ pax_open_kernel();
33424+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
33425+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
33426+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
33427+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
33428+ pax_close_kernel();
33429+
33430 local_irq_restore(flags);
33431
33432 switch (return_code) {
33433- case 0:
33434- return address + entry;
33435- case 0x80: /* Not present */
33436- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33437- return 0;
33438- default: /* Shouldn't happen */
33439- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33440- service, return_code);
33441+ case 0: {
33442+ int cpu;
33443+ unsigned char flags;
33444+
33445+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
33446+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
33447+ printk(KERN_WARNING "bios32_service: not valid\n");
33448 return 0;
33449+ }
33450+ address = address + PAGE_OFFSET;
33451+ length += 16UL; /* some BIOSs underreport this... */
33452+ flags = 4;
33453+ if (length >= 64*1024*1024) {
33454+ length >>= PAGE_SHIFT;
33455+ flags |= 8;
33456+ }
33457+
33458+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33459+ gdt = get_cpu_gdt_table(cpu);
33460+ pack_descriptor(&d, address, length, 0x9b, flags);
33461+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33462+ pack_descriptor(&d, address, length, 0x93, flags);
33463+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33464+ }
33465+ return entry;
33466+ }
33467+ case 0x80: /* Not present */
33468+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33469+ return 0;
33470+ default: /* Shouldn't happen */
33471+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33472+ service, return_code);
33473+ return 0;
33474 }
33475 }
33476
33477 static struct {
33478 unsigned long address;
33479 unsigned short segment;
33480-} pci_indirect = { 0, __KERNEL_CS };
33481+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
33482
33483-static int pci_bios_present;
33484+static int pci_bios_present __read_only;
33485
33486 static int check_pcibios(void)
33487 {
33488@@ -131,11 +174,13 @@ static int check_pcibios(void)
33489 unsigned long flags, pcibios_entry;
33490
33491 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
33492- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
33493+ pci_indirect.address = pcibios_entry;
33494
33495 local_irq_save(flags);
33496- __asm__(
33497- "lcall *(%%edi); cld\n\t"
33498+ __asm__("movw %w6, %%ds\n\t"
33499+ "lcall *%%ss:(%%edi); cld\n\t"
33500+ "push %%ss\n\t"
33501+ "pop %%ds\n\t"
33502 "jc 1f\n\t"
33503 "xor %%ah, %%ah\n"
33504 "1:"
33505@@ -144,7 +189,8 @@ static int check_pcibios(void)
33506 "=b" (ebx),
33507 "=c" (ecx)
33508 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
33509- "D" (&pci_indirect)
33510+ "D" (&pci_indirect),
33511+ "r" (__PCIBIOS_DS)
33512 : "memory");
33513 local_irq_restore(flags);
33514
33515@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33516
33517 switch (len) {
33518 case 1:
33519- __asm__("lcall *(%%esi); cld\n\t"
33520+ __asm__("movw %w6, %%ds\n\t"
33521+ "lcall *%%ss:(%%esi); cld\n\t"
33522+ "push %%ss\n\t"
33523+ "pop %%ds\n\t"
33524 "jc 1f\n\t"
33525 "xor %%ah, %%ah\n"
33526 "1:"
33527@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33528 : "1" (PCIBIOS_READ_CONFIG_BYTE),
33529 "b" (bx),
33530 "D" ((long)reg),
33531- "S" (&pci_indirect));
33532+ "S" (&pci_indirect),
33533+ "r" (__PCIBIOS_DS));
33534 /*
33535 * Zero-extend the result beyond 8 bits, do not trust the
33536 * BIOS having done it:
33537@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33538 *value &= 0xff;
33539 break;
33540 case 2:
33541- __asm__("lcall *(%%esi); cld\n\t"
33542+ __asm__("movw %w6, %%ds\n\t"
33543+ "lcall *%%ss:(%%esi); cld\n\t"
33544+ "push %%ss\n\t"
33545+ "pop %%ds\n\t"
33546 "jc 1f\n\t"
33547 "xor %%ah, %%ah\n"
33548 "1:"
33549@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33550 : "1" (PCIBIOS_READ_CONFIG_WORD),
33551 "b" (bx),
33552 "D" ((long)reg),
33553- "S" (&pci_indirect));
33554+ "S" (&pci_indirect),
33555+ "r" (__PCIBIOS_DS));
33556 /*
33557 * Zero-extend the result beyond 16 bits, do not trust the
33558 * BIOS having done it:
33559@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33560 *value &= 0xffff;
33561 break;
33562 case 4:
33563- __asm__("lcall *(%%esi); cld\n\t"
33564+ __asm__("movw %w6, %%ds\n\t"
33565+ "lcall *%%ss:(%%esi); cld\n\t"
33566+ "push %%ss\n\t"
33567+ "pop %%ds\n\t"
33568 "jc 1f\n\t"
33569 "xor %%ah, %%ah\n"
33570 "1:"
33571@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33572 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33573 "b" (bx),
33574 "D" ((long)reg),
33575- "S" (&pci_indirect));
33576+ "S" (&pci_indirect),
33577+ "r" (__PCIBIOS_DS));
33578 break;
33579 }
33580
33581@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33582
33583 switch (len) {
33584 case 1:
33585- __asm__("lcall *(%%esi); cld\n\t"
33586+ __asm__("movw %w6, %%ds\n\t"
33587+ "lcall *%%ss:(%%esi); cld\n\t"
33588+ "push %%ss\n\t"
33589+ "pop %%ds\n\t"
33590 "jc 1f\n\t"
33591 "xor %%ah, %%ah\n"
33592 "1:"
33593@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33594 "c" (value),
33595 "b" (bx),
33596 "D" ((long)reg),
33597- "S" (&pci_indirect));
33598+ "S" (&pci_indirect),
33599+ "r" (__PCIBIOS_DS));
33600 break;
33601 case 2:
33602- __asm__("lcall *(%%esi); cld\n\t"
33603+ __asm__("movw %w6, %%ds\n\t"
33604+ "lcall *%%ss:(%%esi); cld\n\t"
33605+ "push %%ss\n\t"
33606+ "pop %%ds\n\t"
33607 "jc 1f\n\t"
33608 "xor %%ah, %%ah\n"
33609 "1:"
33610@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33611 "c" (value),
33612 "b" (bx),
33613 "D" ((long)reg),
33614- "S" (&pci_indirect));
33615+ "S" (&pci_indirect),
33616+ "r" (__PCIBIOS_DS));
33617 break;
33618 case 4:
33619- __asm__("lcall *(%%esi); cld\n\t"
33620+ __asm__("movw %w6, %%ds\n\t"
33621+ "lcall *%%ss:(%%esi); cld\n\t"
33622+ "push %%ss\n\t"
33623+ "pop %%ds\n\t"
33624 "jc 1f\n\t"
33625 "xor %%ah, %%ah\n"
33626 "1:"
33627@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33628 "c" (value),
33629 "b" (bx),
33630 "D" ((long)reg),
33631- "S" (&pci_indirect));
33632+ "S" (&pci_indirect),
33633+ "r" (__PCIBIOS_DS));
33634 break;
33635 }
33636
33637@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33638
33639 DBG("PCI: Fetching IRQ routing table... ");
33640 __asm__("push %%es\n\t"
33641+ "movw %w8, %%ds\n\t"
33642 "push %%ds\n\t"
33643 "pop %%es\n\t"
33644- "lcall *(%%esi); cld\n\t"
33645+ "lcall *%%ss:(%%esi); cld\n\t"
33646 "pop %%es\n\t"
33647+ "push %%ss\n\t"
33648+ "pop %%ds\n"
33649 "jc 1f\n\t"
33650 "xor %%ah, %%ah\n"
33651 "1:"
33652@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33653 "1" (0),
33654 "D" ((long) &opt),
33655 "S" (&pci_indirect),
33656- "m" (opt)
33657+ "m" (opt),
33658+ "r" (__PCIBIOS_DS)
33659 : "memory");
33660 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33661 if (ret & 0xff00)
33662@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33663 {
33664 int ret;
33665
33666- __asm__("lcall *(%%esi); cld\n\t"
33667+ __asm__("movw %w5, %%ds\n\t"
33668+ "lcall *%%ss:(%%esi); cld\n\t"
33669+ "push %%ss\n\t"
33670+ "pop %%ds\n"
33671 "jc 1f\n\t"
33672 "xor %%ah, %%ah\n"
33673 "1:"
33674@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33675 : "0" (PCIBIOS_SET_PCI_HW_INT),
33676 "b" ((dev->bus->number << 8) | dev->devfn),
33677 "c" ((irq << 8) | (pin + 10)),
33678- "S" (&pci_indirect));
33679+ "S" (&pci_indirect),
33680+ "r" (__PCIBIOS_DS));
33681 return !(ret & 0xff00);
33682 }
33683 EXPORT_SYMBOL(pcibios_set_irq_routing);
33684diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33685index 40e4469..d915bf9 100644
33686--- a/arch/x86/platform/efi/efi_32.c
33687+++ b/arch/x86/platform/efi/efi_32.c
33688@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33689 {
33690 struct desc_ptr gdt_descr;
33691
33692+#ifdef CONFIG_PAX_KERNEXEC
33693+ struct desc_struct d;
33694+#endif
33695+
33696 local_irq_save(efi_rt_eflags);
33697
33698 load_cr3(initial_page_table);
33699 __flush_tlb_all();
33700
33701+#ifdef CONFIG_PAX_KERNEXEC
33702+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33703+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33704+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33705+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33706+#endif
33707+
33708 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33709 gdt_descr.size = GDT_SIZE - 1;
33710 load_gdt(&gdt_descr);
33711@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33712 {
33713 struct desc_ptr gdt_descr;
33714
33715+#ifdef CONFIG_PAX_KERNEXEC
33716+ struct desc_struct d;
33717+
33718+ memset(&d, 0, sizeof d);
33719+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33720+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33721+#endif
33722+
33723 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33724 gdt_descr.size = GDT_SIZE - 1;
33725 load_gdt(&gdt_descr);
33726
33727+#ifdef CONFIG_PAX_PER_CPU_PGD
33728+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33729+#else
33730 load_cr3(swapper_pg_dir);
33731+#endif
33732+
33733 __flush_tlb_all();
33734
33735 local_irq_restore(efi_rt_eflags);
33736diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33737index 39a0e7f1..872396e 100644
33738--- a/arch/x86/platform/efi/efi_64.c
33739+++ b/arch/x86/platform/efi/efi_64.c
33740@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33741 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33742 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33743 }
33744+
33745+#ifdef CONFIG_PAX_PER_CPU_PGD
33746+ load_cr3(swapper_pg_dir);
33747+#endif
33748+
33749 __flush_tlb_all();
33750 }
33751
33752@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33753 for (pgd = 0; pgd < n_pgds; pgd++)
33754 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33755 kfree(save_pgd);
33756+
33757+#ifdef CONFIG_PAX_PER_CPU_PGD
33758+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33759+#endif
33760+
33761 __flush_tlb_all();
33762 local_irq_restore(efi_flags);
33763 early_code_mapping_set_exec(0);
33764diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
33765index fbe66e6..eae5e38 100644
33766--- a/arch/x86/platform/efi/efi_stub_32.S
33767+++ b/arch/x86/platform/efi/efi_stub_32.S
33768@@ -6,7 +6,9 @@
33769 */
33770
33771 #include <linux/linkage.h>
33772+#include <linux/init.h>
33773 #include <asm/page_types.h>
33774+#include <asm/segment.h>
33775
33776 /*
33777 * efi_call_phys(void *, ...) is a function with variable parameters.
33778@@ -20,7 +22,7 @@
33779 * service functions will comply with gcc calling convention, too.
33780 */
33781
33782-.text
33783+__INIT
33784 ENTRY(efi_call_phys)
33785 /*
33786 * 0. The function can only be called in Linux kernel. So CS has been
33787@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
33788 * The mapping of lower virtual memory has been created in prelog and
33789 * epilog.
33790 */
33791- movl $1f, %edx
33792- subl $__PAGE_OFFSET, %edx
33793- jmp *%edx
33794+#ifdef CONFIG_PAX_KERNEXEC
33795+ movl $(__KERNEXEC_EFI_DS), %edx
33796+ mov %edx, %ds
33797+ mov %edx, %es
33798+ mov %edx, %ss
33799+ addl $2f,(1f)
33800+ ljmp *(1f)
33801+
33802+__INITDATA
33803+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
33804+.previous
33805+
33806+2:
33807+ subl $2b,(1b)
33808+#else
33809+ jmp 1f-__PAGE_OFFSET
33810 1:
33811+#endif
33812
33813 /*
33814 * 2. Now on the top of stack is the return
33815@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
33816 * parameter 2, ..., param n. To make things easy, we save the return
33817 * address of efi_call_phys in a global variable.
33818 */
33819- popl %edx
33820- movl %edx, saved_return_addr
33821- /* get the function pointer into ECX*/
33822- popl %ecx
33823- movl %ecx, efi_rt_function_ptr
33824- movl $2f, %edx
33825- subl $__PAGE_OFFSET, %edx
33826- pushl %edx
33827+ popl (saved_return_addr)
33828+ popl (efi_rt_function_ptr)
33829
33830 /*
33831 * 3. Clear PG bit in %CR0.
33832@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
33833 /*
33834 * 5. Call the physical function.
33835 */
33836- jmp *%ecx
33837+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
33838
33839-2:
33840 /*
33841 * 6. After EFI runtime service returns, control will return to
33842 * following instruction. We'd better readjust stack pointer first.
33843@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
33844 movl %cr0, %edx
33845 orl $0x80000000, %edx
33846 movl %edx, %cr0
33847- jmp 1f
33848-1:
33849+
33850 /*
33851 * 8. Now restore the virtual mode from flat mode by
33852 * adding EIP with PAGE_OFFSET.
33853 */
33854- movl $1f, %edx
33855- jmp *%edx
33856+#ifdef CONFIG_PAX_KERNEXEC
33857+ movl $(__KERNEL_DS), %edx
33858+ mov %edx, %ds
33859+ mov %edx, %es
33860+ mov %edx, %ss
33861+ ljmp $(__KERNEL_CS),$1f
33862+#else
33863+ jmp 1f+__PAGE_OFFSET
33864+#endif
33865 1:
33866
33867 /*
33868 * 9. Balance the stack. And because EAX contain the return value,
33869 * we'd better not clobber it.
33870 */
33871- leal efi_rt_function_ptr, %edx
33872- movl (%edx), %ecx
33873- pushl %ecx
33874+ pushl (efi_rt_function_ptr)
33875
33876 /*
33877- * 10. Push the saved return address onto the stack and return.
33878+ * 10. Return to the saved return address.
33879 */
33880- leal saved_return_addr, %edx
33881- movl (%edx), %ecx
33882- pushl %ecx
33883- ret
33884+ jmpl *(saved_return_addr)
33885 ENDPROC(efi_call_phys)
33886 .previous
33887
33888-.data
33889+__INITDATA
33890 saved_return_addr:
33891 .long 0
33892 efi_rt_function_ptr:
33893diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
33894index 4c07cca..2c8427d 100644
33895--- a/arch/x86/platform/efi/efi_stub_64.S
33896+++ b/arch/x86/platform/efi/efi_stub_64.S
33897@@ -7,6 +7,7 @@
33898 */
33899
33900 #include <linux/linkage.h>
33901+#include <asm/alternative-asm.h>
33902
33903 #define SAVE_XMM \
33904 mov %rsp, %rax; \
33905@@ -40,6 +41,7 @@ ENTRY(efi_call0)
33906 call *%rdi
33907 addq $32, %rsp
33908 RESTORE_XMM
33909+ pax_force_retaddr 0, 1
33910 ret
33911 ENDPROC(efi_call0)
33912
33913@@ -50,6 +52,7 @@ ENTRY(efi_call1)
33914 call *%rdi
33915 addq $32, %rsp
33916 RESTORE_XMM
33917+ pax_force_retaddr 0, 1
33918 ret
33919 ENDPROC(efi_call1)
33920
33921@@ -60,6 +63,7 @@ ENTRY(efi_call2)
33922 call *%rdi
33923 addq $32, %rsp
33924 RESTORE_XMM
33925+ pax_force_retaddr 0, 1
33926 ret
33927 ENDPROC(efi_call2)
33928
33929@@ -71,6 +75,7 @@ ENTRY(efi_call3)
33930 call *%rdi
33931 addq $32, %rsp
33932 RESTORE_XMM
33933+ pax_force_retaddr 0, 1
33934 ret
33935 ENDPROC(efi_call3)
33936
33937@@ -83,6 +88,7 @@ ENTRY(efi_call4)
33938 call *%rdi
33939 addq $32, %rsp
33940 RESTORE_XMM
33941+ pax_force_retaddr 0, 1
33942 ret
33943 ENDPROC(efi_call4)
33944
33945@@ -96,6 +102,7 @@ ENTRY(efi_call5)
33946 call *%rdi
33947 addq $48, %rsp
33948 RESTORE_XMM
33949+ pax_force_retaddr 0, 1
33950 ret
33951 ENDPROC(efi_call5)
33952
33953@@ -112,5 +119,6 @@ ENTRY(efi_call6)
33954 call *%rdi
33955 addq $48, %rsp
33956 RESTORE_XMM
33957+ pax_force_retaddr 0, 1
33958 ret
33959 ENDPROC(efi_call6)
33960diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
33961index 3ca5957..7909c18 100644
33962--- a/arch/x86/platform/mrst/mrst.c
33963+++ b/arch/x86/platform/mrst/mrst.c
33964@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
33965 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
33966 int sfi_mrtc_num;
33967
33968-static void mrst_power_off(void)
33969+static __noreturn void mrst_power_off(void)
33970 {
33971+ BUG();
33972 }
33973
33974-static void mrst_reboot(void)
33975+static __noreturn void mrst_reboot(void)
33976 {
33977 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
33978+ BUG();
33979 }
33980
33981 /* parse all the mtimer info to a static mtimer array */
33982diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
33983index d6ee929..3637cb5 100644
33984--- a/arch/x86/platform/olpc/olpc_dt.c
33985+++ b/arch/x86/platform/olpc/olpc_dt.c
33986@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
33987 return res;
33988 }
33989
33990-static struct of_pdt_ops prom_olpc_ops __initdata = {
33991+static struct of_pdt_ops prom_olpc_ops __initconst = {
33992 .nextprop = olpc_dt_nextprop,
33993 .getproplen = olpc_dt_getproplen,
33994 .getproperty = olpc_dt_getproperty,
33995diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
33996index 424f4c9..f2a2988 100644
33997--- a/arch/x86/power/cpu.c
33998+++ b/arch/x86/power/cpu.c
33999@@ -137,11 +137,8 @@ static void do_fpu_end(void)
34000 static void fix_processor_context(void)
34001 {
34002 int cpu = smp_processor_id();
34003- struct tss_struct *t = &per_cpu(init_tss, cpu);
34004-#ifdef CONFIG_X86_64
34005- struct desc_struct *desc = get_cpu_gdt_table(cpu);
34006- tss_desc tss;
34007-#endif
34008+ struct tss_struct *t = init_tss + cpu;
34009+
34010 set_tss_desc(cpu, t); /*
34011 * This just modifies memory; should not be
34012 * necessary. But... This is necessary, because
34013@@ -150,10 +147,6 @@ static void fix_processor_context(void)
34014 */
34015
34016 #ifdef CONFIG_X86_64
34017- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
34018- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
34019- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
34020-
34021 syscall_init(); /* This sets MSR_*STAR and related */
34022 #endif
34023 load_TR_desc(); /* This does ltr */
34024diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
34025index a44f457..9140171 100644
34026--- a/arch/x86/realmode/init.c
34027+++ b/arch/x86/realmode/init.c
34028@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
34029 __va(real_mode_header->trampoline_header);
34030
34031 #ifdef CONFIG_X86_32
34032- trampoline_header->start = __pa_symbol(startup_32_smp);
34033+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
34034+
34035+#ifdef CONFIG_PAX_KERNEXEC
34036+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
34037+#endif
34038+
34039+ trampoline_header->boot_cs = __BOOT_CS;
34040 trampoline_header->gdt_limit = __BOOT_DS + 7;
34041 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
34042 #else
34043@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
34044 *trampoline_cr4_features = read_cr4();
34045
34046 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
34047- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
34048+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
34049 trampoline_pgd[511] = init_level4_pgt[511].pgd;
34050 #endif
34051 }
34052diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
34053index 9cac825..4890b25 100644
34054--- a/arch/x86/realmode/rm/Makefile
34055+++ b/arch/x86/realmode/rm/Makefile
34056@@ -79,5 +79,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
34057 $(call cc-option, -fno-unit-at-a-time)) \
34058 $(call cc-option, -fno-stack-protector) \
34059 $(call cc-option, -mpreferred-stack-boundary=2)
34060+ifdef CONSTIFY_PLUGIN
34061+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
34062+endif
34063 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
34064 GCOV_PROFILE := n
34065diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
34066index a28221d..93c40f1 100644
34067--- a/arch/x86/realmode/rm/header.S
34068+++ b/arch/x86/realmode/rm/header.S
34069@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
34070 #endif
34071 /* APM/BIOS reboot */
34072 .long pa_machine_real_restart_asm
34073-#ifdef CONFIG_X86_64
34074+#ifdef CONFIG_X86_32
34075+ .long __KERNEL_CS
34076+#else
34077 .long __KERNEL32_CS
34078 #endif
34079 END(real_mode_header)
34080diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
34081index c1b2791..f9e31c7 100644
34082--- a/arch/x86/realmode/rm/trampoline_32.S
34083+++ b/arch/x86/realmode/rm/trampoline_32.S
34084@@ -25,6 +25,12 @@
34085 #include <asm/page_types.h>
34086 #include "realmode.h"
34087
34088+#ifdef CONFIG_PAX_KERNEXEC
34089+#define ta(X) (X)
34090+#else
34091+#define ta(X) (pa_ ## X)
34092+#endif
34093+
34094 .text
34095 .code16
34096
34097@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
34098
34099 cli # We should be safe anyway
34100
34101- movl tr_start, %eax # where we need to go
34102-
34103 movl $0xA5A5A5A5, trampoline_status
34104 # write marker for master knows we're running
34105
34106@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
34107 movw $1, %dx # protected mode (PE) bit
34108 lmsw %dx # into protected mode
34109
34110- ljmpl $__BOOT_CS, $pa_startup_32
34111+ ljmpl *(trampoline_header)
34112
34113 .section ".text32","ax"
34114 .code32
34115@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
34116 .balign 8
34117 GLOBAL(trampoline_header)
34118 tr_start: .space 4
34119- tr_gdt_pad: .space 2
34120+ tr_boot_cs: .space 2
34121 tr_gdt: .space 6
34122 END(trampoline_header)
34123
34124diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
34125index bb360dc..d0fd8f8 100644
34126--- a/arch/x86/realmode/rm/trampoline_64.S
34127+++ b/arch/x86/realmode/rm/trampoline_64.S
34128@@ -94,6 +94,7 @@ ENTRY(startup_32)
34129 movl %edx, %gs
34130
34131 movl pa_tr_cr4, %eax
34132+ andl $~X86_CR4_PCIDE, %eax
34133 movl %eax, %cr4 # Enable PAE mode
34134
34135 # Setup trampoline 4 level pagetables
34136@@ -107,7 +108,7 @@ ENTRY(startup_32)
34137 wrmsr
34138
34139 # Enable paging and in turn activate Long Mode
34140- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
34141+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
34142 movl %eax, %cr0
34143
34144 /*
34145diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
34146index e812034..c747134 100644
34147--- a/arch/x86/tools/Makefile
34148+++ b/arch/x86/tools/Makefile
34149@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
34150
34151 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
34152
34153-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
34154+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
34155 hostprogs-y += relocs
34156 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
34157 relocs: $(obj)/relocs
34158diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
34159index f7bab68..b6d9886 100644
34160--- a/arch/x86/tools/relocs.c
34161+++ b/arch/x86/tools/relocs.c
34162@@ -1,5 +1,7 @@
34163 /* This is included from relocs_32/64.c */
34164
34165+#include "../../../include/generated/autoconf.h"
34166+
34167 #define ElfW(type) _ElfW(ELF_BITS, type)
34168 #define _ElfW(bits, type) __ElfW(bits, type)
34169 #define __ElfW(bits, type) Elf##bits##_##type
34170@@ -11,6 +13,7 @@
34171 #define Elf_Sym ElfW(Sym)
34172
34173 static Elf_Ehdr ehdr;
34174+static Elf_Phdr *phdr;
34175
34176 struct relocs {
34177 uint32_t *offset;
34178@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
34179 }
34180 }
34181
34182+static void read_phdrs(FILE *fp)
34183+{
34184+ unsigned int i;
34185+
34186+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
34187+ if (!phdr) {
34188+ die("Unable to allocate %d program headers\n",
34189+ ehdr.e_phnum);
34190+ }
34191+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
34192+ die("Seek to %d failed: %s\n",
34193+ ehdr.e_phoff, strerror(errno));
34194+ }
34195+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
34196+ die("Cannot read ELF program headers: %s\n",
34197+ strerror(errno));
34198+ }
34199+ for(i = 0; i < ehdr.e_phnum; i++) {
34200+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
34201+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
34202+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
34203+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
34204+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
34205+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
34206+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
34207+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
34208+ }
34209+
34210+}
34211+
34212 static void read_shdrs(FILE *fp)
34213 {
34214- int i;
34215+ unsigned int i;
34216 Elf_Shdr shdr;
34217
34218 secs = calloc(ehdr.e_shnum, sizeof(struct section));
34219@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
34220
34221 static void read_strtabs(FILE *fp)
34222 {
34223- int i;
34224+ unsigned int i;
34225 for (i = 0; i < ehdr.e_shnum; i++) {
34226 struct section *sec = &secs[i];
34227 if (sec->shdr.sh_type != SHT_STRTAB) {
34228@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
34229
34230 static void read_symtabs(FILE *fp)
34231 {
34232- int i,j;
34233+ unsigned int i,j;
34234 for (i = 0; i < ehdr.e_shnum; i++) {
34235 struct section *sec = &secs[i];
34236 if (sec->shdr.sh_type != SHT_SYMTAB) {
34237@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
34238 }
34239
34240
34241-static void read_relocs(FILE *fp)
34242+static void read_relocs(FILE *fp, int use_real_mode)
34243 {
34244- int i,j;
34245+ unsigned int i,j;
34246+ uint32_t base;
34247+
34248 for (i = 0; i < ehdr.e_shnum; i++) {
34249 struct section *sec = &secs[i];
34250 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34251@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
34252 die("Cannot read symbol table: %s\n",
34253 strerror(errno));
34254 }
34255+ base = 0;
34256+
34257+#ifdef CONFIG_X86_32
34258+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
34259+ if (phdr[j].p_type != PT_LOAD )
34260+ continue;
34261+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
34262+ continue;
34263+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
34264+ break;
34265+ }
34266+#endif
34267+
34268 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
34269 Elf_Rel *rel = &sec->reltab[j];
34270- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
34271+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
34272 rel->r_info = elf_xword_to_cpu(rel->r_info);
34273 #if (SHT_REL_TYPE == SHT_RELA)
34274 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
34275@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
34276
34277 static void print_absolute_symbols(void)
34278 {
34279- int i;
34280+ unsigned int i;
34281 const char *format;
34282
34283 if (ELF_BITS == 64)
34284@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
34285 for (i = 0; i < ehdr.e_shnum; i++) {
34286 struct section *sec = &secs[i];
34287 char *sym_strtab;
34288- int j;
34289+ unsigned int j;
34290
34291 if (sec->shdr.sh_type != SHT_SYMTAB) {
34292 continue;
34293@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
34294
34295 static void print_absolute_relocs(void)
34296 {
34297- int i, printed = 0;
34298+ unsigned int i, printed = 0;
34299 const char *format;
34300
34301 if (ELF_BITS == 64)
34302@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
34303 struct section *sec_applies, *sec_symtab;
34304 char *sym_strtab;
34305 Elf_Sym *sh_symtab;
34306- int j;
34307+ unsigned int j;
34308 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34309 continue;
34310 }
34311@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
34312 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
34313 Elf_Sym *sym, const char *symname))
34314 {
34315- int i;
34316+ unsigned int i;
34317 /* Walk through the relocations */
34318 for (i = 0; i < ehdr.e_shnum; i++) {
34319 char *sym_strtab;
34320 Elf_Sym *sh_symtab;
34321 struct section *sec_applies, *sec_symtab;
34322- int j;
34323+ unsigned int j;
34324 struct section *sec = &secs[i];
34325
34326 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34327@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34328 {
34329 unsigned r_type = ELF32_R_TYPE(rel->r_info);
34330 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
34331+ char *sym_strtab = sec->link->link->strtab;
34332+
34333+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
34334+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
34335+ return 0;
34336+
34337+#ifdef CONFIG_PAX_KERNEXEC
34338+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
34339+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
34340+ return 0;
34341+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
34342+ return 0;
34343+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
34344+ return 0;
34345+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
34346+ return 0;
34347+#endif
34348
34349 switch (r_type) {
34350 case R_386_NONE:
34351@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
34352
34353 static void emit_relocs(int as_text, int use_real_mode)
34354 {
34355- int i;
34356+ unsigned int i;
34357 int (*write_reloc)(uint32_t, FILE *) = write32;
34358 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34359 const char *symname);
34360@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
34361 {
34362 regex_init(use_real_mode);
34363 read_ehdr(fp);
34364+ read_phdrs(fp);
34365 read_shdrs(fp);
34366 read_strtabs(fp);
34367 read_symtabs(fp);
34368- read_relocs(fp);
34369+ read_relocs(fp, use_real_mode);
34370 if (ELF_BITS == 64)
34371 percpu_init();
34372 if (show_absolute_syms) {
34373diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
34374index 80ffa5b..a33bd15 100644
34375--- a/arch/x86/um/tls_32.c
34376+++ b/arch/x86/um/tls_32.c
34377@@ -260,7 +260,7 @@ out:
34378 if (unlikely(task == current &&
34379 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
34380 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
34381- "without flushed TLS.", current->pid);
34382+ "without flushed TLS.", task_pid_nr(current));
34383 }
34384
34385 return 0;
34386diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
34387index fd14be1..e3c79c0 100644
34388--- a/arch/x86/vdso/Makefile
34389+++ b/arch/x86/vdso/Makefile
34390@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
34391 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
34392 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
34393
34394-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34395+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34396 GCOV_PROFILE := n
34397
34398 #
34399diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
34400index d6bfb87..876ee18 100644
34401--- a/arch/x86/vdso/vdso32-setup.c
34402+++ b/arch/x86/vdso/vdso32-setup.c
34403@@ -25,6 +25,7 @@
34404 #include <asm/tlbflush.h>
34405 #include <asm/vdso.h>
34406 #include <asm/proto.h>
34407+#include <asm/mman.h>
34408
34409 enum {
34410 VDSO_DISABLED = 0,
34411@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
34412 void enable_sep_cpu(void)
34413 {
34414 int cpu = get_cpu();
34415- struct tss_struct *tss = &per_cpu(init_tss, cpu);
34416+ struct tss_struct *tss = init_tss + cpu;
34417
34418 if (!boot_cpu_has(X86_FEATURE_SEP)) {
34419 put_cpu();
34420@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
34421 gate_vma.vm_start = FIXADDR_USER_START;
34422 gate_vma.vm_end = FIXADDR_USER_END;
34423 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
34424- gate_vma.vm_page_prot = __P101;
34425+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
34426
34427 return 0;
34428 }
34429@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34430 if (compat)
34431 addr = VDSO_HIGH_BASE;
34432 else {
34433- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
34434+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
34435 if (IS_ERR_VALUE(addr)) {
34436 ret = addr;
34437 goto up_fail;
34438 }
34439 }
34440
34441- current->mm->context.vdso = (void *)addr;
34442+ current->mm->context.vdso = addr;
34443
34444 if (compat_uses_vma || !compat) {
34445 /*
34446@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34447 }
34448
34449 current_thread_info()->sysenter_return =
34450- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34451+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34452
34453 up_fail:
34454 if (ret)
34455- current->mm->context.vdso = NULL;
34456+ current->mm->context.vdso = 0;
34457
34458 up_write(&mm->mmap_sem);
34459
34460@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
34461
34462 const char *arch_vma_name(struct vm_area_struct *vma)
34463 {
34464- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
34465+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
34466 return "[vdso]";
34467+
34468+#ifdef CONFIG_PAX_SEGMEXEC
34469+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
34470+ return "[vdso]";
34471+#endif
34472+
34473 return NULL;
34474 }
34475
34476@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
34477 * Check to see if the corresponding task was created in compat vdso
34478 * mode.
34479 */
34480- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
34481+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
34482 return &gate_vma;
34483 return NULL;
34484 }
34485diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
34486index 431e875..cbb23f3 100644
34487--- a/arch/x86/vdso/vma.c
34488+++ b/arch/x86/vdso/vma.c
34489@@ -16,8 +16,6 @@
34490 #include <asm/vdso.h>
34491 #include <asm/page.h>
34492
34493-unsigned int __read_mostly vdso_enabled = 1;
34494-
34495 extern char vdso_start[], vdso_end[];
34496 extern unsigned short vdso_sync_cpuid;
34497
34498@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
34499 * unaligned here as a result of stack start randomization.
34500 */
34501 addr = PAGE_ALIGN(addr);
34502- addr = align_vdso_addr(addr);
34503
34504 return addr;
34505 }
34506@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
34507 unsigned size)
34508 {
34509 struct mm_struct *mm = current->mm;
34510- unsigned long addr;
34511+ unsigned long addr = 0;
34512 int ret;
34513
34514- if (!vdso_enabled)
34515- return 0;
34516-
34517 down_write(&mm->mmap_sem);
34518+
34519+#ifdef CONFIG_PAX_RANDMMAP
34520+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
34521+#endif
34522+
34523 addr = vdso_addr(mm->start_stack, size);
34524+ addr = align_vdso_addr(addr);
34525 addr = get_unmapped_area(NULL, addr, size, 0, 0);
34526 if (IS_ERR_VALUE(addr)) {
34527 ret = addr;
34528 goto up_fail;
34529 }
34530
34531- current->mm->context.vdso = (void *)addr;
34532+ mm->context.vdso = addr;
34533
34534 ret = install_special_mapping(mm, addr, size,
34535 VM_READ|VM_EXEC|
34536 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
34537 pages);
34538- if (ret) {
34539- current->mm->context.vdso = NULL;
34540- goto up_fail;
34541- }
34542+ if (ret)
34543+ mm->context.vdso = 0;
34544
34545 up_fail:
34546 up_write(&mm->mmap_sem);
34547@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34548 vdsox32_size);
34549 }
34550 #endif
34551-
34552-static __init int vdso_setup(char *s)
34553-{
34554- vdso_enabled = simple_strtoul(s, NULL, 0);
34555- return 0;
34556-}
34557-__setup("vdso=", vdso_setup);
34558diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
34559index fa6ade7..73da73a5 100644
34560--- a/arch/x86/xen/enlighten.c
34561+++ b/arch/x86/xen/enlighten.c
34562@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
34563
34564 struct shared_info xen_dummy_shared_info;
34565
34566-void *xen_initial_gdt;
34567-
34568 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34569 __read_mostly int xen_have_vector_callback;
34570 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34571@@ -541,8 +539,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34572 {
34573 unsigned long va = dtr->address;
34574 unsigned int size = dtr->size + 1;
34575- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34576- unsigned long frames[pages];
34577+ unsigned long frames[65536 / PAGE_SIZE];
34578 int f;
34579
34580 /*
34581@@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34582 {
34583 unsigned long va = dtr->address;
34584 unsigned int size = dtr->size + 1;
34585- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34586- unsigned long frames[pages];
34587+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34588 int f;
34589
34590 /*
34591@@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34592 * 8-byte entries, or 16 4k pages..
34593 */
34594
34595- BUG_ON(size > 65536);
34596+ BUG_ON(size > GDT_SIZE);
34597 BUG_ON(va & ~PAGE_MASK);
34598
34599 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34600@@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34601 return 0;
34602 }
34603
34604-static void set_xen_basic_apic_ops(void)
34605+static void __init set_xen_basic_apic_ops(void)
34606 {
34607 apic->read = xen_apic_read;
34608 apic->write = xen_apic_write;
34609@@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34610 #endif
34611 };
34612
34613-static void xen_reboot(int reason)
34614+static __noreturn void xen_reboot(int reason)
34615 {
34616 struct sched_shutdown r = { .reason = reason };
34617
34618- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34619- BUG();
34620+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34621+ BUG();
34622 }
34623
34624-static void xen_restart(char *msg)
34625+static __noreturn void xen_restart(char *msg)
34626 {
34627 xen_reboot(SHUTDOWN_reboot);
34628 }
34629
34630-static void xen_emergency_restart(void)
34631+static __noreturn void xen_emergency_restart(void)
34632 {
34633 xen_reboot(SHUTDOWN_reboot);
34634 }
34635
34636-static void xen_machine_halt(void)
34637+static __noreturn void xen_machine_halt(void)
34638 {
34639 xen_reboot(SHUTDOWN_poweroff);
34640 }
34641
34642-static void xen_machine_power_off(void)
34643+static __noreturn void xen_machine_power_off(void)
34644 {
34645 if (pm_power_off)
34646 pm_power_off();
34647@@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(void)
34648 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34649
34650 /* Work out if we support NX */
34651- x86_configure_nx();
34652+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34653+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34654+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34655+ unsigned l, h;
34656+
34657+ __supported_pte_mask |= _PAGE_NX;
34658+ rdmsr(MSR_EFER, l, h);
34659+ l |= EFER_NX;
34660+ wrmsr(MSR_EFER, l, h);
34661+ }
34662+#endif
34663
34664 xen_setup_features();
34665
34666@@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(void)
34667
34668 machine_ops = xen_machine_ops;
34669
34670- /*
34671- * The only reliable way to retain the initial address of the
34672- * percpu gdt_page is to remember it here, so we can go and
34673- * mark it RW later, when the initial percpu area is freed.
34674- */
34675- xen_initial_gdt = &per_cpu(gdt_page, 0);
34676-
34677 xen_smp_init();
34678
34679 #ifdef CONFIG_ACPI_NUMA
34680diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34681index fdc3ba2..23cae00 100644
34682--- a/arch/x86/xen/mmu.c
34683+++ b/arch/x86/xen/mmu.c
34684@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
34685 return val;
34686 }
34687
34688-static pteval_t pte_pfn_to_mfn(pteval_t val)
34689+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
34690 {
34691 if (val & _PAGE_PRESENT) {
34692 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
34693@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34694 /* L3_k[510] -> level2_kernel_pgt
34695 * L3_i[511] -> level2_fixmap_pgt */
34696 convert_pfn_mfn(level3_kernel_pgt);
34697+ convert_pfn_mfn(level3_vmalloc_start_pgt);
34698+ convert_pfn_mfn(level3_vmalloc_end_pgt);
34699+ convert_pfn_mfn(level3_vmemmap_pgt);
34700
34701 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34702 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34703@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34704 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34705 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34706 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34707+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34708+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34709+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34710 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34711 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34712+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34713 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34714 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34715
34716@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34717 pv_mmu_ops.set_pud = xen_set_pud;
34718 #if PAGETABLE_LEVELS == 4
34719 pv_mmu_ops.set_pgd = xen_set_pgd;
34720+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34721 #endif
34722
34723 /* This will work as long as patching hasn't happened yet
34724@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34725 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34726 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34727 .set_pgd = xen_set_pgd_hyper,
34728+ .set_pgd_batched = xen_set_pgd_hyper,
34729
34730 .alloc_pud = xen_alloc_pmd_init,
34731 .release_pud = xen_release_pmd_init,
34732diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
34733index 31d0475..51af671 100644
34734--- a/arch/x86/xen/smp.c
34735+++ b/arch/x86/xen/smp.c
34736@@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
34737 native_smp_prepare_boot_cpu();
34738
34739 if (xen_pv_domain()) {
34740- /* We've switched to the "real" per-cpu gdt, so make sure the
34741- old memory can be recycled */
34742- make_lowmem_page_readwrite(xen_initial_gdt);
34743-
34744 #ifdef CONFIG_X86_32
34745 /*
34746 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
34747 * expects __USER_DS
34748 */
34749- loadsegment(ds, __USER_DS);
34750- loadsegment(es, __USER_DS);
34751+ loadsegment(ds, __KERNEL_DS);
34752+ loadsegment(es, __KERNEL_DS);
34753 #endif
34754
34755 xen_filter_cpu_maps();
34756@@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34757 ctxt->user_regs.ss = __KERNEL_DS;
34758 #ifdef CONFIG_X86_32
34759 ctxt->user_regs.fs = __KERNEL_PERCPU;
34760- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34761+ savesegment(gs, ctxt->user_regs.gs);
34762 #else
34763 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34764 #endif
34765@@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34766
34767 {
34768 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34769- ctxt->user_regs.ds = __USER_DS;
34770- ctxt->user_regs.es = __USER_DS;
34771+ ctxt->user_regs.ds = __KERNEL_DS;
34772+ ctxt->user_regs.es = __KERNEL_DS;
34773
34774 xen_copy_trap_info(ctxt->trap_ctxt);
34775
34776@@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
34777 int rc;
34778
34779 per_cpu(current_task, cpu) = idle;
34780+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
34781 #ifdef CONFIG_X86_32
34782 irq_ctx_init(cpu);
34783 #else
34784 clear_tsk_thread_flag(idle, TIF_FORK);
34785- per_cpu(kernel_stack, cpu) =
34786- (unsigned long)task_stack_page(idle) -
34787- KERNEL_STACK_OFFSET + THREAD_SIZE;
34788+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
34789 #endif
34790 xen_setup_runstate_info(cpu);
34791 xen_setup_timer(cpu);
34792@@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
34793
34794 void __init xen_smp_init(void)
34795 {
34796- smp_ops = xen_smp_ops;
34797+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
34798 xen_fill_possible_map();
34799 }
34800
34801diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
34802index 33ca6e4..0ded929 100644
34803--- a/arch/x86/xen/xen-asm_32.S
34804+++ b/arch/x86/xen/xen-asm_32.S
34805@@ -84,14 +84,14 @@ ENTRY(xen_iret)
34806 ESP_OFFSET=4 # bytes pushed onto stack
34807
34808 /*
34809- * Store vcpu_info pointer for easy access. Do it this way to
34810- * avoid having to reload %fs
34811+ * Store vcpu_info pointer for easy access.
34812 */
34813 #ifdef CONFIG_SMP
34814- GET_THREAD_INFO(%eax)
34815- movl %ss:TI_cpu(%eax), %eax
34816- movl %ss:__per_cpu_offset(,%eax,4), %eax
34817- mov %ss:xen_vcpu(%eax), %eax
34818+ push %fs
34819+ mov $(__KERNEL_PERCPU), %eax
34820+ mov %eax, %fs
34821+ mov PER_CPU_VAR(xen_vcpu), %eax
34822+ pop %fs
34823 #else
34824 movl %ss:xen_vcpu, %eax
34825 #endif
34826diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
34827index 7faed58..ba4427c 100644
34828--- a/arch/x86/xen/xen-head.S
34829+++ b/arch/x86/xen/xen-head.S
34830@@ -19,6 +19,17 @@ ENTRY(startup_xen)
34831 #ifdef CONFIG_X86_32
34832 mov %esi,xen_start_info
34833 mov $init_thread_union+THREAD_SIZE,%esp
34834+#ifdef CONFIG_SMP
34835+ movl $cpu_gdt_table,%edi
34836+ movl $__per_cpu_load,%eax
34837+ movw %ax,__KERNEL_PERCPU + 2(%edi)
34838+ rorl $16,%eax
34839+ movb %al,__KERNEL_PERCPU + 4(%edi)
34840+ movb %ah,__KERNEL_PERCPU + 7(%edi)
34841+ movl $__per_cpu_end - 1,%eax
34842+ subl $__per_cpu_start,%eax
34843+ movw %ax,__KERNEL_PERCPU + 0(%edi)
34844+#endif
34845 #else
34846 mov %rsi,xen_start_info
34847 mov $init_thread_union+THREAD_SIZE,%rsp
34848diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
34849index 95f8c61..611d6e8 100644
34850--- a/arch/x86/xen/xen-ops.h
34851+++ b/arch/x86/xen/xen-ops.h
34852@@ -10,8 +10,6 @@
34853 extern const char xen_hypervisor_callback[];
34854 extern const char xen_failsafe_callback[];
34855
34856-extern void *xen_initial_gdt;
34857-
34858 struct trap_info;
34859 void xen_copy_trap_info(struct trap_info *traps);
34860
34861diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
34862index 525bd3d..ef888b1 100644
34863--- a/arch/xtensa/variants/dc232b/include/variant/core.h
34864+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
34865@@ -119,9 +119,9 @@
34866 ----------------------------------------------------------------------*/
34867
34868 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
34869-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
34870 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
34871 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
34872+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34873
34874 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
34875 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
34876diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
34877index 2f33760..835e50a 100644
34878--- a/arch/xtensa/variants/fsf/include/variant/core.h
34879+++ b/arch/xtensa/variants/fsf/include/variant/core.h
34880@@ -11,6 +11,7 @@
34881 #ifndef _XTENSA_CORE_H
34882 #define _XTENSA_CORE_H
34883
34884+#include <linux/const.h>
34885
34886 /****************************************************************************
34887 Parameters Useful for Any Code, USER or PRIVILEGED
34888@@ -112,9 +113,9 @@
34889 ----------------------------------------------------------------------*/
34890
34891 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34892-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34893 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34894 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34895+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34896
34897 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
34898 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
34899diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
34900index af00795..2bb8105 100644
34901--- a/arch/xtensa/variants/s6000/include/variant/core.h
34902+++ b/arch/xtensa/variants/s6000/include/variant/core.h
34903@@ -11,6 +11,7 @@
34904 #ifndef _XTENSA_CORE_CONFIGURATION_H
34905 #define _XTENSA_CORE_CONFIGURATION_H
34906
34907+#include <linux/const.h>
34908
34909 /****************************************************************************
34910 Parameters Useful for Any Code, USER or PRIVILEGED
34911@@ -118,9 +119,9 @@
34912 ----------------------------------------------------------------------*/
34913
34914 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34915-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34916 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34917 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34918+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34919
34920 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
34921 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
34922diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
34923index 4e491d9..c8e18e4 100644
34924--- a/block/blk-cgroup.c
34925+++ b/block/blk-cgroup.c
34926@@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
34927 static struct cgroup_subsys_state *
34928 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
34929 {
34930- static atomic64_t id_seq = ATOMIC64_INIT(0);
34931+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
34932 struct blkcg *blkcg;
34933
34934 if (!parent_css) {
34935@@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
34936
34937 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
34938 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
34939- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
34940+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
34941 done:
34942 spin_lock_init(&blkcg->lock);
34943 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
34944diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
34945index 4b8d9b54..a7178c0 100644
34946--- a/block/blk-iopoll.c
34947+++ b/block/blk-iopoll.c
34948@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
34949 }
34950 EXPORT_SYMBOL(blk_iopoll_complete);
34951
34952-static void blk_iopoll_softirq(struct softirq_action *h)
34953+static __latent_entropy void blk_iopoll_softirq(void)
34954 {
34955 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
34956 int rearm = 0, budget = blk_iopoll_budget;
34957diff --git a/block/blk-map.c b/block/blk-map.c
34958index 623e1cd..ca1e109 100644
34959--- a/block/blk-map.c
34960+++ b/block/blk-map.c
34961@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
34962 if (!len || !kbuf)
34963 return -EINVAL;
34964
34965- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
34966+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
34967 if (do_copy)
34968 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
34969 else
34970diff --git a/block/blk-softirq.c b/block/blk-softirq.c
34971index ec9e606..3f38839 100644
34972--- a/block/blk-softirq.c
34973+++ b/block/blk-softirq.c
34974@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
34975 * Softirq action handler - move entries to local list and loop over them
34976 * while passing them to the queue registered handler.
34977 */
34978-static void blk_done_softirq(struct softirq_action *h)
34979+static __latent_entropy void blk_done_softirq(void)
34980 {
34981 struct list_head *cpu_list, local_list;
34982
34983diff --git a/block/bsg.c b/block/bsg.c
34984index 420a5a9..23834aa 100644
34985--- a/block/bsg.c
34986+++ b/block/bsg.c
34987@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
34988 struct sg_io_v4 *hdr, struct bsg_device *bd,
34989 fmode_t has_write_perm)
34990 {
34991+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34992+ unsigned char *cmdptr;
34993+
34994 if (hdr->request_len > BLK_MAX_CDB) {
34995 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
34996 if (!rq->cmd)
34997 return -ENOMEM;
34998- }
34999+ cmdptr = rq->cmd;
35000+ } else
35001+ cmdptr = tmpcmd;
35002
35003- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
35004+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
35005 hdr->request_len))
35006 return -EFAULT;
35007
35008+ if (cmdptr != rq->cmd)
35009+ memcpy(rq->cmd, cmdptr, hdr->request_len);
35010+
35011 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
35012 if (blk_verify_command(rq->cmd, has_write_perm))
35013 return -EPERM;
35014diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
35015index fbd5a67..5d631b5 100644
35016--- a/block/compat_ioctl.c
35017+++ b/block/compat_ioctl.c
35018@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
35019 err |= __get_user(f->spec1, &uf->spec1);
35020 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
35021 err |= __get_user(name, &uf->name);
35022- f->name = compat_ptr(name);
35023+ f->name = (void __force_kernel *)compat_ptr(name);
35024 if (err) {
35025 err = -EFAULT;
35026 goto out;
35027diff --git a/block/genhd.c b/block/genhd.c
35028index 791f419..89f21c4 100644
35029--- a/block/genhd.c
35030+++ b/block/genhd.c
35031@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
35032
35033 /*
35034 * Register device numbers dev..(dev+range-1)
35035- * range must be nonzero
35036+ * Noop if @range is zero.
35037 * The hash chain is sorted on range, so that subranges can override.
35038 */
35039 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
35040 struct kobject *(*probe)(dev_t, int *, void *),
35041 int (*lock)(dev_t, void *), void *data)
35042 {
35043- kobj_map(bdev_map, devt, range, module, probe, lock, data);
35044+ if (range)
35045+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
35046 }
35047
35048 EXPORT_SYMBOL(blk_register_region);
35049
35050+/* undo blk_register_region(), noop if @range is zero */
35051 void blk_unregister_region(dev_t devt, unsigned long range)
35052 {
35053- kobj_unmap(bdev_map, devt, range);
35054+ if (range)
35055+ kobj_unmap(bdev_map, devt, range);
35056 }
35057
35058 EXPORT_SYMBOL(blk_unregister_region);
35059diff --git a/block/partitions/efi.c b/block/partitions/efi.c
35060index a8287b4..241a48e 100644
35061--- a/block/partitions/efi.c
35062+++ b/block/partitions/efi.c
35063@@ -292,14 +292,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
35064 if (!gpt)
35065 return NULL;
35066
35067+ if (!le32_to_cpu(gpt->num_partition_entries))
35068+ return NULL;
35069+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
35070+ if (!pte)
35071+ return NULL;
35072+
35073 count = le32_to_cpu(gpt->num_partition_entries) *
35074 le32_to_cpu(gpt->sizeof_partition_entry);
35075- if (!count)
35076- return NULL;
35077- pte = kmalloc(count, GFP_KERNEL);
35078- if (!pte)
35079- return NULL;
35080-
35081 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
35082 (u8 *) pte, count) < count) {
35083 kfree(pte);
35084diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
35085index a5ffcc9..e057498 100644
35086--- a/block/scsi_ioctl.c
35087+++ b/block/scsi_ioctl.c
35088@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
35089 return put_user(0, p);
35090 }
35091
35092-static int sg_get_timeout(struct request_queue *q)
35093+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
35094 {
35095 return jiffies_to_clock_t(q->sg_timeout);
35096 }
35097@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
35098 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
35099 struct sg_io_hdr *hdr, fmode_t mode)
35100 {
35101- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
35102+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35103+ unsigned char *cmdptr;
35104+
35105+ if (rq->cmd != rq->__cmd)
35106+ cmdptr = rq->cmd;
35107+ else
35108+ cmdptr = tmpcmd;
35109+
35110+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
35111 return -EFAULT;
35112+
35113+ if (cmdptr != rq->cmd)
35114+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
35115+
35116 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
35117 return -EPERM;
35118
35119@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35120 int err;
35121 unsigned int in_len, out_len, bytes, opcode, cmdlen;
35122 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
35123+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35124+ unsigned char *cmdptr;
35125
35126 if (!sic)
35127 return -EINVAL;
35128@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35129 */
35130 err = -EFAULT;
35131 rq->cmd_len = cmdlen;
35132- if (copy_from_user(rq->cmd, sic->data, cmdlen))
35133+
35134+ if (rq->cmd != rq->__cmd)
35135+ cmdptr = rq->cmd;
35136+ else
35137+ cmdptr = tmpcmd;
35138+
35139+ if (copy_from_user(cmdptr, sic->data, cmdlen))
35140 goto error;
35141
35142+ if (rq->cmd != cmdptr)
35143+ memcpy(rq->cmd, cmdptr, cmdlen);
35144+
35145 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
35146 goto error;
35147
35148diff --git a/crypto/cryptd.c b/crypto/cryptd.c
35149index 7bdd61b..afec999 100644
35150--- a/crypto/cryptd.c
35151+++ b/crypto/cryptd.c
35152@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
35153
35154 struct cryptd_blkcipher_request_ctx {
35155 crypto_completion_t complete;
35156-};
35157+} __no_const;
35158
35159 struct cryptd_hash_ctx {
35160 struct crypto_shash *child;
35161@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
35162
35163 struct cryptd_aead_request_ctx {
35164 crypto_completion_t complete;
35165-};
35166+} __no_const;
35167
35168 static void cryptd_queue_worker(struct work_struct *work);
35169
35170diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
35171index f8c920c..ab2cb5a 100644
35172--- a/crypto/pcrypt.c
35173+++ b/crypto/pcrypt.c
35174@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
35175 int ret;
35176
35177 pinst->kobj.kset = pcrypt_kset;
35178- ret = kobject_add(&pinst->kobj, NULL, name);
35179+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
35180 if (!ret)
35181 kobject_uevent(&pinst->kobj, KOBJ_ADD);
35182
35183diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
35184index f220d64..d359ad6 100644
35185--- a/drivers/acpi/apei/apei-internal.h
35186+++ b/drivers/acpi/apei/apei-internal.h
35187@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
35188 struct apei_exec_ins_type {
35189 u32 flags;
35190 apei_exec_ins_func_t run;
35191-};
35192+} __do_const;
35193
35194 struct apei_exec_context {
35195 u32 ip;
35196diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
35197index 33dc6a0..4b24b47 100644
35198--- a/drivers/acpi/apei/cper.c
35199+++ b/drivers/acpi/apei/cper.c
35200@@ -39,12 +39,12 @@
35201 */
35202 u64 cper_next_record_id(void)
35203 {
35204- static atomic64_t seq;
35205+ static atomic64_unchecked_t seq;
35206
35207- if (!atomic64_read(&seq))
35208- atomic64_set(&seq, ((u64)get_seconds()) << 32);
35209+ if (!atomic64_read_unchecked(&seq))
35210+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
35211
35212- return atomic64_inc_return(&seq);
35213+ return atomic64_inc_return_unchecked(&seq);
35214 }
35215 EXPORT_SYMBOL_GPL(cper_next_record_id);
35216
35217diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
35218index 8ec37bb..b0716e5 100644
35219--- a/drivers/acpi/apei/ghes.c
35220+++ b/drivers/acpi/apei/ghes.c
35221@@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
35222 const struct acpi_hest_generic *generic,
35223 const struct acpi_hest_generic_status *estatus)
35224 {
35225- static atomic_t seqno;
35226+ static atomic_unchecked_t seqno;
35227 unsigned int curr_seqno;
35228 char pfx_seq[64];
35229
35230@@ -509,7 +509,7 @@ static void __ghes_print_estatus(const char *pfx,
35231 else
35232 pfx = KERN_ERR;
35233 }
35234- curr_seqno = atomic_inc_return(&seqno);
35235+ curr_seqno = atomic_inc_return_unchecked(&seqno);
35236 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
35237 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
35238 pfx_seq, generic->header.source_id);
35239diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
35240index a83e3c6..c3d617f 100644
35241--- a/drivers/acpi/bgrt.c
35242+++ b/drivers/acpi/bgrt.c
35243@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
35244 if (!bgrt_image)
35245 return -ENODEV;
35246
35247- bin_attr_image.private = bgrt_image;
35248- bin_attr_image.size = bgrt_image_size;
35249+ pax_open_kernel();
35250+ *(void **)&bin_attr_image.private = bgrt_image;
35251+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
35252+ pax_close_kernel();
35253
35254 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
35255 if (!bgrt_kobj)
35256diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
35257index 9515f18..4b149c9 100644
35258--- a/drivers/acpi/blacklist.c
35259+++ b/drivers/acpi/blacklist.c
35260@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
35261 u32 is_critical_error;
35262 };
35263
35264-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
35265+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
35266
35267 /*
35268 * POLICY: If *anything* doesn't work, put it on the blacklist.
35269@@ -199,7 +199,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
35270 return 0;
35271 }
35272
35273-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
35274+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
35275 {
35276 .callback = dmi_disable_osi_vista,
35277 .ident = "Fujitsu Siemens",
35278diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
35279index 12b62f2..dc2aac8 100644
35280--- a/drivers/acpi/custom_method.c
35281+++ b/drivers/acpi/custom_method.c
35282@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
35283 struct acpi_table_header table;
35284 acpi_status status;
35285
35286+#ifdef CONFIG_GRKERNSEC_KMEM
35287+ return -EPERM;
35288+#endif
35289+
35290 if (!(*ppos)) {
35291 /* parse the table header to get the table length */
35292 if (count <= sizeof(struct acpi_table_header))
35293diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
35294index c7414a5..d5afd71 100644
35295--- a/drivers/acpi/processor_idle.c
35296+++ b/drivers/acpi/processor_idle.c
35297@@ -966,7 +966,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
35298 {
35299 int i, count = CPUIDLE_DRIVER_STATE_START;
35300 struct acpi_processor_cx *cx;
35301- struct cpuidle_state *state;
35302+ cpuidle_state_no_const *state;
35303 struct cpuidle_driver *drv = &acpi_idle_driver;
35304
35305 if (!pr->flags.power_setup_done)
35306diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
35307index 05306a5..733d1f0 100644
35308--- a/drivers/acpi/sysfs.c
35309+++ b/drivers/acpi/sysfs.c
35310@@ -423,11 +423,11 @@ static u32 num_counters;
35311 static struct attribute **all_attrs;
35312 static u32 acpi_gpe_count;
35313
35314-static struct attribute_group interrupt_stats_attr_group = {
35315+static attribute_group_no_const interrupt_stats_attr_group = {
35316 .name = "interrupts",
35317 };
35318
35319-static struct kobj_attribute *counter_attrs;
35320+static kobj_attribute_no_const *counter_attrs;
35321
35322 static void delete_gpe_attr_array(void)
35323 {
35324diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
35325index cfb7447..98f2149 100644
35326--- a/drivers/ata/libahci.c
35327+++ b/drivers/ata/libahci.c
35328@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
35329 }
35330 EXPORT_SYMBOL_GPL(ahci_kick_engine);
35331
35332-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35333+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35334 struct ata_taskfile *tf, int is_cmd, u16 flags,
35335 unsigned long timeout_msec)
35336 {
35337diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
35338index 81a94a3..b711c74 100644
35339--- a/drivers/ata/libata-core.c
35340+++ b/drivers/ata/libata-core.c
35341@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
35342 static void ata_dev_xfermask(struct ata_device *dev);
35343 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
35344
35345-atomic_t ata_print_id = ATOMIC_INIT(0);
35346+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
35347
35348 struct ata_force_param {
35349 const char *name;
35350@@ -4809,7 +4809,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
35351 struct ata_port *ap;
35352 unsigned int tag;
35353
35354- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35355+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35356 ap = qc->ap;
35357
35358 qc->flags = 0;
35359@@ -4825,7 +4825,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
35360 struct ata_port *ap;
35361 struct ata_link *link;
35362
35363- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35364+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35365 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
35366 ap = qc->ap;
35367 link = qc->dev->link;
35368@@ -5944,6 +5944,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35369 return;
35370
35371 spin_lock(&lock);
35372+ pax_open_kernel();
35373
35374 for (cur = ops->inherits; cur; cur = cur->inherits) {
35375 void **inherit = (void **)cur;
35376@@ -5957,8 +5958,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35377 if (IS_ERR(*pp))
35378 *pp = NULL;
35379
35380- ops->inherits = NULL;
35381+ *(struct ata_port_operations **)&ops->inherits = NULL;
35382
35383+ pax_close_kernel();
35384 spin_unlock(&lock);
35385 }
35386
35387@@ -6151,7 +6153,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
35388
35389 /* give ports names and add SCSI hosts */
35390 for (i = 0; i < host->n_ports; i++) {
35391- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
35392+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
35393 host->ports[i]->local_port_no = i + 1;
35394 }
35395
35396diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
35397index ab58556..ed19dd2 100644
35398--- a/drivers/ata/libata-scsi.c
35399+++ b/drivers/ata/libata-scsi.c
35400@@ -4114,7 +4114,7 @@ int ata_sas_port_init(struct ata_port *ap)
35401
35402 if (rc)
35403 return rc;
35404- ap->print_id = atomic_inc_return(&ata_print_id);
35405+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
35406 return 0;
35407 }
35408 EXPORT_SYMBOL_GPL(ata_sas_port_init);
35409diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
35410index 45b5ab3..98446b8 100644
35411--- a/drivers/ata/libata.h
35412+++ b/drivers/ata/libata.h
35413@@ -53,7 +53,7 @@ enum {
35414 ATA_DNXFER_QUIET = (1 << 31),
35415 };
35416
35417-extern atomic_t ata_print_id;
35418+extern atomic_unchecked_t ata_print_id;
35419 extern int atapi_passthru16;
35420 extern int libata_fua;
35421 extern int libata_noacpi;
35422diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
35423index 853f610..97d24da 100644
35424--- a/drivers/ata/pata_arasan_cf.c
35425+++ b/drivers/ata/pata_arasan_cf.c
35426@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
35427 /* Handle platform specific quirks */
35428 if (quirk) {
35429 if (quirk & CF_BROKEN_PIO) {
35430- ap->ops->set_piomode = NULL;
35431+ pax_open_kernel();
35432+ *(void **)&ap->ops->set_piomode = NULL;
35433+ pax_close_kernel();
35434 ap->pio_mask = 0;
35435 }
35436 if (quirk & CF_BROKEN_MWDMA)
35437diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
35438index f9b983a..887b9d8 100644
35439--- a/drivers/atm/adummy.c
35440+++ b/drivers/atm/adummy.c
35441@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
35442 vcc->pop(vcc, skb);
35443 else
35444 dev_kfree_skb_any(skb);
35445- atomic_inc(&vcc->stats->tx);
35446+ atomic_inc_unchecked(&vcc->stats->tx);
35447
35448 return 0;
35449 }
35450diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
35451index 62a7607..cc4be104 100644
35452--- a/drivers/atm/ambassador.c
35453+++ b/drivers/atm/ambassador.c
35454@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
35455 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
35456
35457 // VC layer stats
35458- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35459+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35460
35461 // free the descriptor
35462 kfree (tx_descr);
35463@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35464 dump_skb ("<<<", vc, skb);
35465
35466 // VC layer stats
35467- atomic_inc(&atm_vcc->stats->rx);
35468+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35469 __net_timestamp(skb);
35470 // end of our responsibility
35471 atm_vcc->push (atm_vcc, skb);
35472@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35473 } else {
35474 PRINTK (KERN_INFO, "dropped over-size frame");
35475 // should we count this?
35476- atomic_inc(&atm_vcc->stats->rx_drop);
35477+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35478 }
35479
35480 } else {
35481@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
35482 }
35483
35484 if (check_area (skb->data, skb->len)) {
35485- atomic_inc(&atm_vcc->stats->tx_err);
35486+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
35487 return -ENOMEM; // ?
35488 }
35489
35490diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
35491index 0e3f8f9..765a7a5 100644
35492--- a/drivers/atm/atmtcp.c
35493+++ b/drivers/atm/atmtcp.c
35494@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35495 if (vcc->pop) vcc->pop(vcc,skb);
35496 else dev_kfree_skb(skb);
35497 if (dev_data) return 0;
35498- atomic_inc(&vcc->stats->tx_err);
35499+ atomic_inc_unchecked(&vcc->stats->tx_err);
35500 return -ENOLINK;
35501 }
35502 size = skb->len+sizeof(struct atmtcp_hdr);
35503@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35504 if (!new_skb) {
35505 if (vcc->pop) vcc->pop(vcc,skb);
35506 else dev_kfree_skb(skb);
35507- atomic_inc(&vcc->stats->tx_err);
35508+ atomic_inc_unchecked(&vcc->stats->tx_err);
35509 return -ENOBUFS;
35510 }
35511 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
35512@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35513 if (vcc->pop) vcc->pop(vcc,skb);
35514 else dev_kfree_skb(skb);
35515 out_vcc->push(out_vcc,new_skb);
35516- atomic_inc(&vcc->stats->tx);
35517- atomic_inc(&out_vcc->stats->rx);
35518+ atomic_inc_unchecked(&vcc->stats->tx);
35519+ atomic_inc_unchecked(&out_vcc->stats->rx);
35520 return 0;
35521 }
35522
35523@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35524 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
35525 read_unlock(&vcc_sklist_lock);
35526 if (!out_vcc) {
35527- atomic_inc(&vcc->stats->tx_err);
35528+ atomic_inc_unchecked(&vcc->stats->tx_err);
35529 goto done;
35530 }
35531 skb_pull(skb,sizeof(struct atmtcp_hdr));
35532@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35533 __net_timestamp(new_skb);
35534 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
35535 out_vcc->push(out_vcc,new_skb);
35536- atomic_inc(&vcc->stats->tx);
35537- atomic_inc(&out_vcc->stats->rx);
35538+ atomic_inc_unchecked(&vcc->stats->tx);
35539+ atomic_inc_unchecked(&out_vcc->stats->rx);
35540 done:
35541 if (vcc->pop) vcc->pop(vcc,skb);
35542 else dev_kfree_skb(skb);
35543diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
35544index b1955ba..b179940 100644
35545--- a/drivers/atm/eni.c
35546+++ b/drivers/atm/eni.c
35547@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
35548 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
35549 vcc->dev->number);
35550 length = 0;
35551- atomic_inc(&vcc->stats->rx_err);
35552+ atomic_inc_unchecked(&vcc->stats->rx_err);
35553 }
35554 else {
35555 length = ATM_CELL_SIZE-1; /* no HEC */
35556@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35557 size);
35558 }
35559 eff = length = 0;
35560- atomic_inc(&vcc->stats->rx_err);
35561+ atomic_inc_unchecked(&vcc->stats->rx_err);
35562 }
35563 else {
35564 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
35565@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35566 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
35567 vcc->dev->number,vcc->vci,length,size << 2,descr);
35568 length = eff = 0;
35569- atomic_inc(&vcc->stats->rx_err);
35570+ atomic_inc_unchecked(&vcc->stats->rx_err);
35571 }
35572 }
35573 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
35574@@ -767,7 +767,7 @@ rx_dequeued++;
35575 vcc->push(vcc,skb);
35576 pushed++;
35577 }
35578- atomic_inc(&vcc->stats->rx);
35579+ atomic_inc_unchecked(&vcc->stats->rx);
35580 }
35581 wake_up(&eni_dev->rx_wait);
35582 }
35583@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
35584 PCI_DMA_TODEVICE);
35585 if (vcc->pop) vcc->pop(vcc,skb);
35586 else dev_kfree_skb_irq(skb);
35587- atomic_inc(&vcc->stats->tx);
35588+ atomic_inc_unchecked(&vcc->stats->tx);
35589 wake_up(&eni_dev->tx_wait);
35590 dma_complete++;
35591 }
35592diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
35593index b41c948..a002b17 100644
35594--- a/drivers/atm/firestream.c
35595+++ b/drivers/atm/firestream.c
35596@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
35597 }
35598 }
35599
35600- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35601+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35602
35603 fs_dprintk (FS_DEBUG_TXMEM, "i");
35604 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35605@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35606 #endif
35607 skb_put (skb, qe->p1 & 0xffff);
35608 ATM_SKB(skb)->vcc = atm_vcc;
35609- atomic_inc(&atm_vcc->stats->rx);
35610+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35611 __net_timestamp(skb);
35612 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35613 atm_vcc->push (atm_vcc, skb);
35614@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35615 kfree (pe);
35616 }
35617 if (atm_vcc)
35618- atomic_inc(&atm_vcc->stats->rx_drop);
35619+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35620 break;
35621 case 0x1f: /* Reassembly abort: no buffers. */
35622 /* Silently increment error counter. */
35623 if (atm_vcc)
35624- atomic_inc(&atm_vcc->stats->rx_drop);
35625+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35626 break;
35627 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35628 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35629diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35630index 204814e..cede831 100644
35631--- a/drivers/atm/fore200e.c
35632+++ b/drivers/atm/fore200e.c
35633@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35634 #endif
35635 /* check error condition */
35636 if (*entry->status & STATUS_ERROR)
35637- atomic_inc(&vcc->stats->tx_err);
35638+ atomic_inc_unchecked(&vcc->stats->tx_err);
35639 else
35640- atomic_inc(&vcc->stats->tx);
35641+ atomic_inc_unchecked(&vcc->stats->tx);
35642 }
35643 }
35644
35645@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35646 if (skb == NULL) {
35647 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35648
35649- atomic_inc(&vcc->stats->rx_drop);
35650+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35651 return -ENOMEM;
35652 }
35653
35654@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35655
35656 dev_kfree_skb_any(skb);
35657
35658- atomic_inc(&vcc->stats->rx_drop);
35659+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35660 return -ENOMEM;
35661 }
35662
35663 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35664
35665 vcc->push(vcc, skb);
35666- atomic_inc(&vcc->stats->rx);
35667+ atomic_inc_unchecked(&vcc->stats->rx);
35668
35669 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35670
35671@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35672 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35673 fore200e->atm_dev->number,
35674 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35675- atomic_inc(&vcc->stats->rx_err);
35676+ atomic_inc_unchecked(&vcc->stats->rx_err);
35677 }
35678 }
35679
35680@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35681 goto retry_here;
35682 }
35683
35684- atomic_inc(&vcc->stats->tx_err);
35685+ atomic_inc_unchecked(&vcc->stats->tx_err);
35686
35687 fore200e->tx_sat++;
35688 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35689diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35690index 8557adc..3fb5d55 100644
35691--- a/drivers/atm/he.c
35692+++ b/drivers/atm/he.c
35693@@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35694
35695 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35696 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35697- atomic_inc(&vcc->stats->rx_drop);
35698+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35699 goto return_host_buffers;
35700 }
35701
35702@@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35703 RBRQ_LEN_ERR(he_dev->rbrq_head)
35704 ? "LEN_ERR" : "",
35705 vcc->vpi, vcc->vci);
35706- atomic_inc(&vcc->stats->rx_err);
35707+ atomic_inc_unchecked(&vcc->stats->rx_err);
35708 goto return_host_buffers;
35709 }
35710
35711@@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35712 vcc->push(vcc, skb);
35713 spin_lock(&he_dev->global_lock);
35714
35715- atomic_inc(&vcc->stats->rx);
35716+ atomic_inc_unchecked(&vcc->stats->rx);
35717
35718 return_host_buffers:
35719 ++pdus_assembled;
35720@@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35721 tpd->vcc->pop(tpd->vcc, tpd->skb);
35722 else
35723 dev_kfree_skb_any(tpd->skb);
35724- atomic_inc(&tpd->vcc->stats->tx_err);
35725+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35726 }
35727 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35728 return;
35729@@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35730 vcc->pop(vcc, skb);
35731 else
35732 dev_kfree_skb_any(skb);
35733- atomic_inc(&vcc->stats->tx_err);
35734+ atomic_inc_unchecked(&vcc->stats->tx_err);
35735 return -EINVAL;
35736 }
35737
35738@@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35739 vcc->pop(vcc, skb);
35740 else
35741 dev_kfree_skb_any(skb);
35742- atomic_inc(&vcc->stats->tx_err);
35743+ atomic_inc_unchecked(&vcc->stats->tx_err);
35744 return -EINVAL;
35745 }
35746 #endif
35747@@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35748 vcc->pop(vcc, skb);
35749 else
35750 dev_kfree_skb_any(skb);
35751- atomic_inc(&vcc->stats->tx_err);
35752+ atomic_inc_unchecked(&vcc->stats->tx_err);
35753 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35754 return -ENOMEM;
35755 }
35756@@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35757 vcc->pop(vcc, skb);
35758 else
35759 dev_kfree_skb_any(skb);
35760- atomic_inc(&vcc->stats->tx_err);
35761+ atomic_inc_unchecked(&vcc->stats->tx_err);
35762 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35763 return -ENOMEM;
35764 }
35765@@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35766 __enqueue_tpd(he_dev, tpd, cid);
35767 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35768
35769- atomic_inc(&vcc->stats->tx);
35770+ atomic_inc_unchecked(&vcc->stats->tx);
35771
35772 return 0;
35773 }
35774diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
35775index 1dc0519..1aadaf7 100644
35776--- a/drivers/atm/horizon.c
35777+++ b/drivers/atm/horizon.c
35778@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
35779 {
35780 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
35781 // VC layer stats
35782- atomic_inc(&vcc->stats->rx);
35783+ atomic_inc_unchecked(&vcc->stats->rx);
35784 __net_timestamp(skb);
35785 // end of our responsibility
35786 vcc->push (vcc, skb);
35787@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
35788 dev->tx_iovec = NULL;
35789
35790 // VC layer stats
35791- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35792+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35793
35794 // free the skb
35795 hrz_kfree_skb (skb);
35796diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
35797index 1bdf104..9dc44b1 100644
35798--- a/drivers/atm/idt77252.c
35799+++ b/drivers/atm/idt77252.c
35800@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
35801 else
35802 dev_kfree_skb(skb);
35803
35804- atomic_inc(&vcc->stats->tx);
35805+ atomic_inc_unchecked(&vcc->stats->tx);
35806 }
35807
35808 atomic_dec(&scq->used);
35809@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35810 if ((sb = dev_alloc_skb(64)) == NULL) {
35811 printk("%s: Can't allocate buffers for aal0.\n",
35812 card->name);
35813- atomic_add(i, &vcc->stats->rx_drop);
35814+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35815 break;
35816 }
35817 if (!atm_charge(vcc, sb->truesize)) {
35818 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
35819 card->name);
35820- atomic_add(i - 1, &vcc->stats->rx_drop);
35821+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
35822 dev_kfree_skb(sb);
35823 break;
35824 }
35825@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35826 ATM_SKB(sb)->vcc = vcc;
35827 __net_timestamp(sb);
35828 vcc->push(vcc, sb);
35829- atomic_inc(&vcc->stats->rx);
35830+ atomic_inc_unchecked(&vcc->stats->rx);
35831
35832 cell += ATM_CELL_PAYLOAD;
35833 }
35834@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35835 "(CDC: %08x)\n",
35836 card->name, len, rpp->len, readl(SAR_REG_CDC));
35837 recycle_rx_pool_skb(card, rpp);
35838- atomic_inc(&vcc->stats->rx_err);
35839+ atomic_inc_unchecked(&vcc->stats->rx_err);
35840 return;
35841 }
35842 if (stat & SAR_RSQE_CRC) {
35843 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
35844 recycle_rx_pool_skb(card, rpp);
35845- atomic_inc(&vcc->stats->rx_err);
35846+ atomic_inc_unchecked(&vcc->stats->rx_err);
35847 return;
35848 }
35849 if (skb_queue_len(&rpp->queue) > 1) {
35850@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35851 RXPRINTK("%s: Can't alloc RX skb.\n",
35852 card->name);
35853 recycle_rx_pool_skb(card, rpp);
35854- atomic_inc(&vcc->stats->rx_err);
35855+ atomic_inc_unchecked(&vcc->stats->rx_err);
35856 return;
35857 }
35858 if (!atm_charge(vcc, skb->truesize)) {
35859@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35860 __net_timestamp(skb);
35861
35862 vcc->push(vcc, skb);
35863- atomic_inc(&vcc->stats->rx);
35864+ atomic_inc_unchecked(&vcc->stats->rx);
35865
35866 return;
35867 }
35868@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35869 __net_timestamp(skb);
35870
35871 vcc->push(vcc, skb);
35872- atomic_inc(&vcc->stats->rx);
35873+ atomic_inc_unchecked(&vcc->stats->rx);
35874
35875 if (skb->truesize > SAR_FB_SIZE_3)
35876 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
35877@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
35878 if (vcc->qos.aal != ATM_AAL0) {
35879 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
35880 card->name, vpi, vci);
35881- atomic_inc(&vcc->stats->rx_drop);
35882+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35883 goto drop;
35884 }
35885
35886 if ((sb = dev_alloc_skb(64)) == NULL) {
35887 printk("%s: Can't allocate buffers for AAL0.\n",
35888 card->name);
35889- atomic_inc(&vcc->stats->rx_err);
35890+ atomic_inc_unchecked(&vcc->stats->rx_err);
35891 goto drop;
35892 }
35893
35894@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
35895 ATM_SKB(sb)->vcc = vcc;
35896 __net_timestamp(sb);
35897 vcc->push(vcc, sb);
35898- atomic_inc(&vcc->stats->rx);
35899+ atomic_inc_unchecked(&vcc->stats->rx);
35900
35901 drop:
35902 skb_pull(queue, 64);
35903@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35904
35905 if (vc == NULL) {
35906 printk("%s: NULL connection in send().\n", card->name);
35907- atomic_inc(&vcc->stats->tx_err);
35908+ atomic_inc_unchecked(&vcc->stats->tx_err);
35909 dev_kfree_skb(skb);
35910 return -EINVAL;
35911 }
35912 if (!test_bit(VCF_TX, &vc->flags)) {
35913 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
35914- atomic_inc(&vcc->stats->tx_err);
35915+ atomic_inc_unchecked(&vcc->stats->tx_err);
35916 dev_kfree_skb(skb);
35917 return -EINVAL;
35918 }
35919@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35920 break;
35921 default:
35922 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
35923- atomic_inc(&vcc->stats->tx_err);
35924+ atomic_inc_unchecked(&vcc->stats->tx_err);
35925 dev_kfree_skb(skb);
35926 return -EINVAL;
35927 }
35928
35929 if (skb_shinfo(skb)->nr_frags != 0) {
35930 printk("%s: No scatter-gather yet.\n", card->name);
35931- atomic_inc(&vcc->stats->tx_err);
35932+ atomic_inc_unchecked(&vcc->stats->tx_err);
35933 dev_kfree_skb(skb);
35934 return -EINVAL;
35935 }
35936@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35937
35938 err = queue_skb(card, vc, skb, oam);
35939 if (err) {
35940- atomic_inc(&vcc->stats->tx_err);
35941+ atomic_inc_unchecked(&vcc->stats->tx_err);
35942 dev_kfree_skb(skb);
35943 return err;
35944 }
35945@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
35946 skb = dev_alloc_skb(64);
35947 if (!skb) {
35948 printk("%s: Out of memory in send_oam().\n", card->name);
35949- atomic_inc(&vcc->stats->tx_err);
35950+ atomic_inc_unchecked(&vcc->stats->tx_err);
35951 return -ENOMEM;
35952 }
35953 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
35954diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
35955index 4217f29..88f547a 100644
35956--- a/drivers/atm/iphase.c
35957+++ b/drivers/atm/iphase.c
35958@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
35959 status = (u_short) (buf_desc_ptr->desc_mode);
35960 if (status & (RX_CER | RX_PTE | RX_OFL))
35961 {
35962- atomic_inc(&vcc->stats->rx_err);
35963+ atomic_inc_unchecked(&vcc->stats->rx_err);
35964 IF_ERR(printk("IA: bad packet, dropping it");)
35965 if (status & RX_CER) {
35966 IF_ERR(printk(" cause: packet CRC error\n");)
35967@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
35968 len = dma_addr - buf_addr;
35969 if (len > iadev->rx_buf_sz) {
35970 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
35971- atomic_inc(&vcc->stats->rx_err);
35972+ atomic_inc_unchecked(&vcc->stats->rx_err);
35973 goto out_free_desc;
35974 }
35975
35976@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35977 ia_vcc = INPH_IA_VCC(vcc);
35978 if (ia_vcc == NULL)
35979 {
35980- atomic_inc(&vcc->stats->rx_err);
35981+ atomic_inc_unchecked(&vcc->stats->rx_err);
35982 atm_return(vcc, skb->truesize);
35983 dev_kfree_skb_any(skb);
35984 goto INCR_DLE;
35985@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35986 if ((length > iadev->rx_buf_sz) || (length >
35987 (skb->len - sizeof(struct cpcs_trailer))))
35988 {
35989- atomic_inc(&vcc->stats->rx_err);
35990+ atomic_inc_unchecked(&vcc->stats->rx_err);
35991 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
35992 length, skb->len);)
35993 atm_return(vcc, skb->truesize);
35994@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35995
35996 IF_RX(printk("rx_dle_intr: skb push");)
35997 vcc->push(vcc,skb);
35998- atomic_inc(&vcc->stats->rx);
35999+ atomic_inc_unchecked(&vcc->stats->rx);
36000 iadev->rx_pkt_cnt++;
36001 }
36002 INCR_DLE:
36003@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
36004 {
36005 struct k_sonet_stats *stats;
36006 stats = &PRIV(_ia_dev[board])->sonet_stats;
36007- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
36008- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
36009- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
36010- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
36011- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
36012- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
36013- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
36014- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
36015- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
36016+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
36017+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
36018+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
36019+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
36020+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
36021+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
36022+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
36023+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
36024+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
36025 }
36026 ia_cmds.status = 0;
36027 break;
36028@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36029 if ((desc == 0) || (desc > iadev->num_tx_desc))
36030 {
36031 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
36032- atomic_inc(&vcc->stats->tx);
36033+ atomic_inc_unchecked(&vcc->stats->tx);
36034 if (vcc->pop)
36035 vcc->pop(vcc, skb);
36036 else
36037@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36038 ATM_DESC(skb) = vcc->vci;
36039 skb_queue_tail(&iadev->tx_dma_q, skb);
36040
36041- atomic_inc(&vcc->stats->tx);
36042+ atomic_inc_unchecked(&vcc->stats->tx);
36043 iadev->tx_pkt_cnt++;
36044 /* Increment transaction counter */
36045 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
36046
36047 #if 0
36048 /* add flow control logic */
36049- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
36050+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
36051 if (iavcc->vc_desc_cnt > 10) {
36052 vcc->tx_quota = vcc->tx_quota * 3 / 4;
36053 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
36054diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
36055index fa7d701..1e404c7 100644
36056--- a/drivers/atm/lanai.c
36057+++ b/drivers/atm/lanai.c
36058@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
36059 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
36060 lanai_endtx(lanai, lvcc);
36061 lanai_free_skb(lvcc->tx.atmvcc, skb);
36062- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
36063+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
36064 }
36065
36066 /* Try to fill the buffer - don't call unless there is backlog */
36067@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
36068 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
36069 __net_timestamp(skb);
36070 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
36071- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
36072+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
36073 out:
36074 lvcc->rx.buf.ptr = end;
36075 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
36076@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36077 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
36078 "vcc %d\n", lanai->number, (unsigned int) s, vci);
36079 lanai->stats.service_rxnotaal5++;
36080- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36081+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36082 return 0;
36083 }
36084 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
36085@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36086 int bytes;
36087 read_unlock(&vcc_sklist_lock);
36088 DPRINTK("got trashed rx pdu on vci %d\n", vci);
36089- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36090+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36091 lvcc->stats.x.aal5.service_trash++;
36092 bytes = (SERVICE_GET_END(s) * 16) -
36093 (((unsigned long) lvcc->rx.buf.ptr) -
36094@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36095 }
36096 if (s & SERVICE_STREAM) {
36097 read_unlock(&vcc_sklist_lock);
36098- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36099+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36100 lvcc->stats.x.aal5.service_stream++;
36101 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
36102 "PDU on VCI %d!\n", lanai->number, vci);
36103@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36104 return 0;
36105 }
36106 DPRINTK("got rx crc error on vci %d\n", vci);
36107- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36108+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36109 lvcc->stats.x.aal5.service_rxcrc++;
36110 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
36111 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
36112diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
36113index 5aca5f4..ce3a6b0 100644
36114--- a/drivers/atm/nicstar.c
36115+++ b/drivers/atm/nicstar.c
36116@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36117 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
36118 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
36119 card->index);
36120- atomic_inc(&vcc->stats->tx_err);
36121+ atomic_inc_unchecked(&vcc->stats->tx_err);
36122 dev_kfree_skb_any(skb);
36123 return -EINVAL;
36124 }
36125@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36126 if (!vc->tx) {
36127 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
36128 card->index);
36129- atomic_inc(&vcc->stats->tx_err);
36130+ atomic_inc_unchecked(&vcc->stats->tx_err);
36131 dev_kfree_skb_any(skb);
36132 return -EINVAL;
36133 }
36134@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36135 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
36136 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
36137 card->index);
36138- atomic_inc(&vcc->stats->tx_err);
36139+ atomic_inc_unchecked(&vcc->stats->tx_err);
36140 dev_kfree_skb_any(skb);
36141 return -EINVAL;
36142 }
36143
36144 if (skb_shinfo(skb)->nr_frags != 0) {
36145 printk("nicstar%d: No scatter-gather yet.\n", card->index);
36146- atomic_inc(&vcc->stats->tx_err);
36147+ atomic_inc_unchecked(&vcc->stats->tx_err);
36148 dev_kfree_skb_any(skb);
36149 return -EINVAL;
36150 }
36151@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36152 }
36153
36154 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
36155- atomic_inc(&vcc->stats->tx_err);
36156+ atomic_inc_unchecked(&vcc->stats->tx_err);
36157 dev_kfree_skb_any(skb);
36158 return -EIO;
36159 }
36160- atomic_inc(&vcc->stats->tx);
36161+ atomic_inc_unchecked(&vcc->stats->tx);
36162
36163 return 0;
36164 }
36165@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36166 printk
36167 ("nicstar%d: Can't allocate buffers for aal0.\n",
36168 card->index);
36169- atomic_add(i, &vcc->stats->rx_drop);
36170+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36171 break;
36172 }
36173 if (!atm_charge(vcc, sb->truesize)) {
36174 RXPRINTK
36175 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
36176 card->index);
36177- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36178+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36179 dev_kfree_skb_any(sb);
36180 break;
36181 }
36182@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36183 ATM_SKB(sb)->vcc = vcc;
36184 __net_timestamp(sb);
36185 vcc->push(vcc, sb);
36186- atomic_inc(&vcc->stats->rx);
36187+ atomic_inc_unchecked(&vcc->stats->rx);
36188 cell += ATM_CELL_PAYLOAD;
36189 }
36190
36191@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36192 if (iovb == NULL) {
36193 printk("nicstar%d: Out of iovec buffers.\n",
36194 card->index);
36195- atomic_inc(&vcc->stats->rx_drop);
36196+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36197 recycle_rx_buf(card, skb);
36198 return;
36199 }
36200@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36201 small or large buffer itself. */
36202 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
36203 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
36204- atomic_inc(&vcc->stats->rx_err);
36205+ atomic_inc_unchecked(&vcc->stats->rx_err);
36206 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36207 NS_MAX_IOVECS);
36208 NS_PRV_IOVCNT(iovb) = 0;
36209@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36210 ("nicstar%d: Expected a small buffer, and this is not one.\n",
36211 card->index);
36212 which_list(card, skb);
36213- atomic_inc(&vcc->stats->rx_err);
36214+ atomic_inc_unchecked(&vcc->stats->rx_err);
36215 recycle_rx_buf(card, skb);
36216 vc->rx_iov = NULL;
36217 recycle_iov_buf(card, iovb);
36218@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36219 ("nicstar%d: Expected a large buffer, and this is not one.\n",
36220 card->index);
36221 which_list(card, skb);
36222- atomic_inc(&vcc->stats->rx_err);
36223+ atomic_inc_unchecked(&vcc->stats->rx_err);
36224 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36225 NS_PRV_IOVCNT(iovb));
36226 vc->rx_iov = NULL;
36227@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36228 printk(" - PDU size mismatch.\n");
36229 else
36230 printk(".\n");
36231- atomic_inc(&vcc->stats->rx_err);
36232+ atomic_inc_unchecked(&vcc->stats->rx_err);
36233 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36234 NS_PRV_IOVCNT(iovb));
36235 vc->rx_iov = NULL;
36236@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36237 /* skb points to a small buffer */
36238 if (!atm_charge(vcc, skb->truesize)) {
36239 push_rxbufs(card, skb);
36240- atomic_inc(&vcc->stats->rx_drop);
36241+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36242 } else {
36243 skb_put(skb, len);
36244 dequeue_sm_buf(card, skb);
36245@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36246 ATM_SKB(skb)->vcc = vcc;
36247 __net_timestamp(skb);
36248 vcc->push(vcc, skb);
36249- atomic_inc(&vcc->stats->rx);
36250+ atomic_inc_unchecked(&vcc->stats->rx);
36251 }
36252 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
36253 struct sk_buff *sb;
36254@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36255 if (len <= NS_SMBUFSIZE) {
36256 if (!atm_charge(vcc, sb->truesize)) {
36257 push_rxbufs(card, sb);
36258- atomic_inc(&vcc->stats->rx_drop);
36259+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36260 } else {
36261 skb_put(sb, len);
36262 dequeue_sm_buf(card, sb);
36263@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36264 ATM_SKB(sb)->vcc = vcc;
36265 __net_timestamp(sb);
36266 vcc->push(vcc, sb);
36267- atomic_inc(&vcc->stats->rx);
36268+ atomic_inc_unchecked(&vcc->stats->rx);
36269 }
36270
36271 push_rxbufs(card, skb);
36272@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36273
36274 if (!atm_charge(vcc, skb->truesize)) {
36275 push_rxbufs(card, skb);
36276- atomic_inc(&vcc->stats->rx_drop);
36277+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36278 } else {
36279 dequeue_lg_buf(card, skb);
36280 #ifdef NS_USE_DESTRUCTORS
36281@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36282 ATM_SKB(skb)->vcc = vcc;
36283 __net_timestamp(skb);
36284 vcc->push(vcc, skb);
36285- atomic_inc(&vcc->stats->rx);
36286+ atomic_inc_unchecked(&vcc->stats->rx);
36287 }
36288
36289 push_rxbufs(card, sb);
36290@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36291 printk
36292 ("nicstar%d: Out of huge buffers.\n",
36293 card->index);
36294- atomic_inc(&vcc->stats->rx_drop);
36295+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36296 recycle_iovec_rx_bufs(card,
36297 (struct iovec *)
36298 iovb->data,
36299@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36300 card->hbpool.count++;
36301 } else
36302 dev_kfree_skb_any(hb);
36303- atomic_inc(&vcc->stats->rx_drop);
36304+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36305 } else {
36306 /* Copy the small buffer to the huge buffer */
36307 sb = (struct sk_buff *)iov->iov_base;
36308@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36309 #endif /* NS_USE_DESTRUCTORS */
36310 __net_timestamp(hb);
36311 vcc->push(vcc, hb);
36312- atomic_inc(&vcc->stats->rx);
36313+ atomic_inc_unchecked(&vcc->stats->rx);
36314 }
36315 }
36316
36317diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
36318index 32784d1..4a8434a 100644
36319--- a/drivers/atm/solos-pci.c
36320+++ b/drivers/atm/solos-pci.c
36321@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
36322 }
36323 atm_charge(vcc, skb->truesize);
36324 vcc->push(vcc, skb);
36325- atomic_inc(&vcc->stats->rx);
36326+ atomic_inc_unchecked(&vcc->stats->rx);
36327 break;
36328
36329 case PKT_STATUS:
36330@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
36331 vcc = SKB_CB(oldskb)->vcc;
36332
36333 if (vcc) {
36334- atomic_inc(&vcc->stats->tx);
36335+ atomic_inc_unchecked(&vcc->stats->tx);
36336 solos_pop(vcc, oldskb);
36337 } else {
36338 dev_kfree_skb_irq(oldskb);
36339diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
36340index 0215934..ce9f5b1 100644
36341--- a/drivers/atm/suni.c
36342+++ b/drivers/atm/suni.c
36343@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
36344
36345
36346 #define ADD_LIMITED(s,v) \
36347- atomic_add((v),&stats->s); \
36348- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
36349+ atomic_add_unchecked((v),&stats->s); \
36350+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
36351
36352
36353 static void suni_hz(unsigned long from_timer)
36354diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
36355index 5120a96..e2572bd 100644
36356--- a/drivers/atm/uPD98402.c
36357+++ b/drivers/atm/uPD98402.c
36358@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
36359 struct sonet_stats tmp;
36360 int error = 0;
36361
36362- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36363+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36364 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
36365 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
36366 if (zero && !error) {
36367@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
36368
36369
36370 #define ADD_LIMITED(s,v) \
36371- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
36372- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
36373- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36374+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
36375+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
36376+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36377
36378
36379 static void stat_event(struct atm_dev *dev)
36380@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
36381 if (reason & uPD98402_INT_PFM) stat_event(dev);
36382 if (reason & uPD98402_INT_PCO) {
36383 (void) GET(PCOCR); /* clear interrupt cause */
36384- atomic_add(GET(HECCT),
36385+ atomic_add_unchecked(GET(HECCT),
36386 &PRIV(dev)->sonet_stats.uncorr_hcs);
36387 }
36388 if ((reason & uPD98402_INT_RFO) &&
36389@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
36390 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
36391 uPD98402_INT_LOS),PIMR); /* enable them */
36392 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
36393- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36394- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
36395- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
36396+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36397+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
36398+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
36399 return 0;
36400 }
36401
36402diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
36403index 969c3c2..9b72956 100644
36404--- a/drivers/atm/zatm.c
36405+++ b/drivers/atm/zatm.c
36406@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36407 }
36408 if (!size) {
36409 dev_kfree_skb_irq(skb);
36410- if (vcc) atomic_inc(&vcc->stats->rx_err);
36411+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
36412 continue;
36413 }
36414 if (!atm_charge(vcc,skb->truesize)) {
36415@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36416 skb->len = size;
36417 ATM_SKB(skb)->vcc = vcc;
36418 vcc->push(vcc,skb);
36419- atomic_inc(&vcc->stats->rx);
36420+ atomic_inc_unchecked(&vcc->stats->rx);
36421 }
36422 zout(pos & 0xffff,MTA(mbx));
36423 #if 0 /* probably a stupid idea */
36424@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
36425 skb_queue_head(&zatm_vcc->backlog,skb);
36426 break;
36427 }
36428- atomic_inc(&vcc->stats->tx);
36429+ atomic_inc_unchecked(&vcc->stats->tx);
36430 wake_up(&zatm_vcc->tx_wait);
36431 }
36432
36433diff --git a/drivers/base/bus.c b/drivers/base/bus.c
36434index 4c289ab..de1c333 100644
36435--- a/drivers/base/bus.c
36436+++ b/drivers/base/bus.c
36437@@ -1193,7 +1193,7 @@ int subsys_interface_register(struct subsys_interface *sif)
36438 return -EINVAL;
36439
36440 mutex_lock(&subsys->p->mutex);
36441- list_add_tail(&sif->node, &subsys->p->interfaces);
36442+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
36443 if (sif->add_dev) {
36444 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36445 while ((dev = subsys_dev_iter_next(&iter)))
36446@@ -1218,7 +1218,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
36447 subsys = sif->subsys;
36448
36449 mutex_lock(&subsys->p->mutex);
36450- list_del_init(&sif->node);
36451+ pax_list_del_init((struct list_head *)&sif->node);
36452 if (sif->remove_dev) {
36453 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36454 while ((dev = subsys_dev_iter_next(&iter)))
36455diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
36456index 7413d06..79155fa 100644
36457--- a/drivers/base/devtmpfs.c
36458+++ b/drivers/base/devtmpfs.c
36459@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
36460 if (!thread)
36461 return 0;
36462
36463- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
36464+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
36465 if (err)
36466 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
36467 else
36468@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
36469 *err = sys_unshare(CLONE_NEWNS);
36470 if (*err)
36471 goto out;
36472- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
36473+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
36474 if (*err)
36475 goto out;
36476- sys_chdir("/.."); /* will traverse into overmounted root */
36477- sys_chroot(".");
36478+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
36479+ sys_chroot((char __force_user *)".");
36480 complete(&setup_done);
36481 while (1) {
36482 spin_lock(&req_lock);
36483diff --git a/drivers/base/node.c b/drivers/base/node.c
36484index bc9f43b..29703b8 100644
36485--- a/drivers/base/node.c
36486+++ b/drivers/base/node.c
36487@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
36488 struct node_attr {
36489 struct device_attribute attr;
36490 enum node_states state;
36491-};
36492+} __do_const;
36493
36494 static ssize_t show_node_state(struct device *dev,
36495 struct device_attribute *attr, char *buf)
36496diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
36497index bfb8955..42c9b9a 100644
36498--- a/drivers/base/power/domain.c
36499+++ b/drivers/base/power/domain.c
36500@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
36501 {
36502 struct cpuidle_driver *cpuidle_drv;
36503 struct gpd_cpu_data *cpu_data;
36504- struct cpuidle_state *idle_state;
36505+ cpuidle_state_no_const *idle_state;
36506 int ret = 0;
36507
36508 if (IS_ERR_OR_NULL(genpd) || state < 0)
36509@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
36510 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
36511 {
36512 struct gpd_cpu_data *cpu_data;
36513- struct cpuidle_state *idle_state;
36514+ cpuidle_state_no_const *idle_state;
36515 int ret = 0;
36516
36517 if (IS_ERR_OR_NULL(genpd))
36518diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
36519index 03e089a..0e9560c 100644
36520--- a/drivers/base/power/sysfs.c
36521+++ b/drivers/base/power/sysfs.c
36522@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
36523 return -EIO;
36524 }
36525 }
36526- return sprintf(buf, p);
36527+ return sprintf(buf, "%s", p);
36528 }
36529
36530 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
36531diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
36532index 2d56f41..8830f19 100644
36533--- a/drivers/base/power/wakeup.c
36534+++ b/drivers/base/power/wakeup.c
36535@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
36536 * They need to be modified together atomically, so it's better to use one
36537 * atomic variable to hold them both.
36538 */
36539-static atomic_t combined_event_count = ATOMIC_INIT(0);
36540+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
36541
36542 #define IN_PROGRESS_BITS (sizeof(int) * 4)
36543 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
36544
36545 static void split_counters(unsigned int *cnt, unsigned int *inpr)
36546 {
36547- unsigned int comb = atomic_read(&combined_event_count);
36548+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
36549
36550 *cnt = (comb >> IN_PROGRESS_BITS);
36551 *inpr = comb & MAX_IN_PROGRESS;
36552@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
36553 ws->start_prevent_time = ws->last_time;
36554
36555 /* Increment the counter of events in progress. */
36556- cec = atomic_inc_return(&combined_event_count);
36557+ cec = atomic_inc_return_unchecked(&combined_event_count);
36558
36559 trace_wakeup_source_activate(ws->name, cec);
36560 }
36561@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
36562 * Increment the counter of registered wakeup events and decrement the
36563 * couter of wakeup events in progress simultaneously.
36564 */
36565- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
36566+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
36567 trace_wakeup_source_deactivate(ws->name, cec);
36568
36569 split_counters(&cnt, &inpr);
36570diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
36571index e8d11b6..7b1b36f 100644
36572--- a/drivers/base/syscore.c
36573+++ b/drivers/base/syscore.c
36574@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
36575 void register_syscore_ops(struct syscore_ops *ops)
36576 {
36577 mutex_lock(&syscore_ops_lock);
36578- list_add_tail(&ops->node, &syscore_ops_list);
36579+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
36580 mutex_unlock(&syscore_ops_lock);
36581 }
36582 EXPORT_SYMBOL_GPL(register_syscore_ops);
36583@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
36584 void unregister_syscore_ops(struct syscore_ops *ops)
36585 {
36586 mutex_lock(&syscore_ops_lock);
36587- list_del(&ops->node);
36588+ pax_list_del((struct list_head *)&ops->node);
36589 mutex_unlock(&syscore_ops_lock);
36590 }
36591 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36592diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36593index edfa251..1734d4d 100644
36594--- a/drivers/block/cciss.c
36595+++ b/drivers/block/cciss.c
36596@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
36597 while (!list_empty(&h->reqQ)) {
36598 c = list_entry(h->reqQ.next, CommandList_struct, list);
36599 /* can't do anything if fifo is full */
36600- if ((h->access.fifo_full(h))) {
36601+ if ((h->access->fifo_full(h))) {
36602 dev_warn(&h->pdev->dev, "fifo full\n");
36603 break;
36604 }
36605@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
36606 h->Qdepth--;
36607
36608 /* Tell the controller execute command */
36609- h->access.submit_command(h, c);
36610+ h->access->submit_command(h, c);
36611
36612 /* Put job onto the completed Q */
36613 addQ(&h->cmpQ, c);
36614@@ -3447,17 +3447,17 @@ startio:
36615
36616 static inline unsigned long get_next_completion(ctlr_info_t *h)
36617 {
36618- return h->access.command_completed(h);
36619+ return h->access->command_completed(h);
36620 }
36621
36622 static inline int interrupt_pending(ctlr_info_t *h)
36623 {
36624- return h->access.intr_pending(h);
36625+ return h->access->intr_pending(h);
36626 }
36627
36628 static inline long interrupt_not_for_us(ctlr_info_t *h)
36629 {
36630- return ((h->access.intr_pending(h) == 0) ||
36631+ return ((h->access->intr_pending(h) == 0) ||
36632 (h->interrupts_enabled == 0));
36633 }
36634
36635@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
36636 u32 a;
36637
36638 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36639- return h->access.command_completed(h);
36640+ return h->access->command_completed(h);
36641
36642 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36643 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36644@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36645 trans_support & CFGTBL_Trans_use_short_tags);
36646
36647 /* Change the access methods to the performant access methods */
36648- h->access = SA5_performant_access;
36649+ h->access = &SA5_performant_access;
36650 h->transMethod = CFGTBL_Trans_Performant;
36651
36652 return;
36653@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36654 if (prod_index < 0)
36655 return -ENODEV;
36656 h->product_name = products[prod_index].product_name;
36657- h->access = *(products[prod_index].access);
36658+ h->access = products[prod_index].access;
36659
36660 if (cciss_board_disabled(h)) {
36661 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36662@@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
36663 }
36664
36665 /* make sure the board interrupts are off */
36666- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36667+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36668 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36669 if (rc)
36670 goto clean2;
36671@@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
36672 * fake ones to scoop up any residual completions.
36673 */
36674 spin_lock_irqsave(&h->lock, flags);
36675- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36676+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36677 spin_unlock_irqrestore(&h->lock, flags);
36678 free_irq(h->intr[h->intr_mode], h);
36679 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36680@@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
36681 dev_info(&h->pdev->dev, "Board READY.\n");
36682 dev_info(&h->pdev->dev,
36683 "Waiting for stale completions to drain.\n");
36684- h->access.set_intr_mask(h, CCISS_INTR_ON);
36685+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36686 msleep(10000);
36687- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36688+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36689
36690 rc = controller_reset_failed(h->cfgtable);
36691 if (rc)
36692@@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
36693 cciss_scsi_setup(h);
36694
36695 /* Turn the interrupts on so we can service requests */
36696- h->access.set_intr_mask(h, CCISS_INTR_ON);
36697+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36698
36699 /* Get the firmware version */
36700 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36701@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36702 kfree(flush_buf);
36703 if (return_code != IO_OK)
36704 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36705- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36706+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36707 free_irq(h->intr[h->intr_mode], h);
36708 }
36709
36710diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36711index 7fda30e..eb5dfe0 100644
36712--- a/drivers/block/cciss.h
36713+++ b/drivers/block/cciss.h
36714@@ -101,7 +101,7 @@ struct ctlr_info
36715 /* information about each logical volume */
36716 drive_info_struct *drv[CISS_MAX_LUN];
36717
36718- struct access_method access;
36719+ struct access_method *access;
36720
36721 /* queue and queue Info */
36722 struct list_head reqQ;
36723diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
36724index 2b94403..fd6ad1f 100644
36725--- a/drivers/block/cpqarray.c
36726+++ b/drivers/block/cpqarray.c
36727@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36728 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
36729 goto Enomem4;
36730 }
36731- hba[i]->access.set_intr_mask(hba[i], 0);
36732+ hba[i]->access->set_intr_mask(hba[i], 0);
36733 if (request_irq(hba[i]->intr, do_ida_intr,
36734 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
36735 {
36736@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36737 add_timer(&hba[i]->timer);
36738
36739 /* Enable IRQ now that spinlock and rate limit timer are set up */
36740- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36741+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36742
36743 for(j=0; j<NWD; j++) {
36744 struct gendisk *disk = ida_gendisk[i][j];
36745@@ -694,7 +694,7 @@ DBGINFO(
36746 for(i=0; i<NR_PRODUCTS; i++) {
36747 if (board_id == products[i].board_id) {
36748 c->product_name = products[i].product_name;
36749- c->access = *(products[i].access);
36750+ c->access = products[i].access;
36751 break;
36752 }
36753 }
36754@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
36755 hba[ctlr]->intr = intr;
36756 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
36757 hba[ctlr]->product_name = products[j].product_name;
36758- hba[ctlr]->access = *(products[j].access);
36759+ hba[ctlr]->access = products[j].access;
36760 hba[ctlr]->ctlr = ctlr;
36761 hba[ctlr]->board_id = board_id;
36762 hba[ctlr]->pci_dev = NULL; /* not PCI */
36763@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
36764
36765 while((c = h->reqQ) != NULL) {
36766 /* Can't do anything if we're busy */
36767- if (h->access.fifo_full(h) == 0)
36768+ if (h->access->fifo_full(h) == 0)
36769 return;
36770
36771 /* Get the first entry from the request Q */
36772@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
36773 h->Qdepth--;
36774
36775 /* Tell the controller to do our bidding */
36776- h->access.submit_command(h, c);
36777+ h->access->submit_command(h, c);
36778
36779 /* Get onto the completion Q */
36780 addQ(&h->cmpQ, c);
36781@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36782 unsigned long flags;
36783 __u32 a,a1;
36784
36785- istat = h->access.intr_pending(h);
36786+ istat = h->access->intr_pending(h);
36787 /* Is this interrupt for us? */
36788 if (istat == 0)
36789 return IRQ_NONE;
36790@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36791 */
36792 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
36793 if (istat & FIFO_NOT_EMPTY) {
36794- while((a = h->access.command_completed(h))) {
36795+ while((a = h->access->command_completed(h))) {
36796 a1 = a; a &= ~3;
36797 if ((c = h->cmpQ) == NULL)
36798 {
36799@@ -1448,11 +1448,11 @@ static int sendcmd(
36800 /*
36801 * Disable interrupt
36802 */
36803- info_p->access.set_intr_mask(info_p, 0);
36804+ info_p->access->set_intr_mask(info_p, 0);
36805 /* Make sure there is room in the command FIFO */
36806 /* Actually it should be completely empty at this time. */
36807 for (i = 200000; i > 0; i--) {
36808- temp = info_p->access.fifo_full(info_p);
36809+ temp = info_p->access->fifo_full(info_p);
36810 if (temp != 0) {
36811 break;
36812 }
36813@@ -1465,7 +1465,7 @@ DBG(
36814 /*
36815 * Send the cmd
36816 */
36817- info_p->access.submit_command(info_p, c);
36818+ info_p->access->submit_command(info_p, c);
36819 complete = pollcomplete(ctlr);
36820
36821 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
36822@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
36823 * we check the new geometry. Then turn interrupts back on when
36824 * we're done.
36825 */
36826- host->access.set_intr_mask(host, 0);
36827+ host->access->set_intr_mask(host, 0);
36828 getgeometry(ctlr);
36829- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
36830+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
36831
36832 for(i=0; i<NWD; i++) {
36833 struct gendisk *disk = ida_gendisk[ctlr][i];
36834@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
36835 /* Wait (up to 2 seconds) for a command to complete */
36836
36837 for (i = 200000; i > 0; i--) {
36838- done = hba[ctlr]->access.command_completed(hba[ctlr]);
36839+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
36840 if (done == 0) {
36841 udelay(10); /* a short fixed delay */
36842 } else
36843diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
36844index be73e9d..7fbf140 100644
36845--- a/drivers/block/cpqarray.h
36846+++ b/drivers/block/cpqarray.h
36847@@ -99,7 +99,7 @@ struct ctlr_info {
36848 drv_info_t drv[NWD];
36849 struct proc_dir_entry *proc;
36850
36851- struct access_method access;
36852+ struct access_method *access;
36853
36854 cmdlist_t *reqQ;
36855 cmdlist_t *cmpQ;
36856diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
36857index 2d7f608..11245fe 100644
36858--- a/drivers/block/drbd/drbd_int.h
36859+++ b/drivers/block/drbd/drbd_int.h
36860@@ -582,7 +582,7 @@ struct drbd_epoch {
36861 struct drbd_tconn *tconn;
36862 struct list_head list;
36863 unsigned int barrier_nr;
36864- atomic_t epoch_size; /* increased on every request added. */
36865+ atomic_unchecked_t epoch_size; /* increased on every request added. */
36866 atomic_t active; /* increased on every req. added, and dec on every finished. */
36867 unsigned long flags;
36868 };
36869@@ -1022,7 +1022,7 @@ struct drbd_conf {
36870 unsigned int al_tr_number;
36871 int al_tr_cycle;
36872 wait_queue_head_t seq_wait;
36873- atomic_t packet_seq;
36874+ atomic_unchecked_t packet_seq;
36875 unsigned int peer_seq;
36876 spinlock_t peer_seq_lock;
36877 unsigned int minor;
36878@@ -1572,7 +1572,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
36879 char __user *uoptval;
36880 int err;
36881
36882- uoptval = (char __user __force *)optval;
36883+ uoptval = (char __force_user *)optval;
36884
36885 set_fs(KERNEL_DS);
36886 if (level == SOL_SOCKET)
36887diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
36888index 55635ed..40e837c 100644
36889--- a/drivers/block/drbd/drbd_main.c
36890+++ b/drivers/block/drbd/drbd_main.c
36891@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
36892 p->sector = sector;
36893 p->block_id = block_id;
36894 p->blksize = blksize;
36895- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36896+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36897 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
36898 }
36899
36900@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
36901 return -EIO;
36902 p->sector = cpu_to_be64(req->i.sector);
36903 p->block_id = (unsigned long)req;
36904- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36905+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36906 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
36907 if (mdev->state.conn >= C_SYNC_SOURCE &&
36908 mdev->state.conn <= C_PAUSED_SYNC_T)
36909@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
36910 {
36911 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
36912
36913- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
36914- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
36915+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
36916+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
36917 kfree(tconn->current_epoch);
36918
36919 idr_destroy(&tconn->volumes);
36920diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
36921index 8cc1e64..ba7ffa9 100644
36922--- a/drivers/block/drbd/drbd_nl.c
36923+++ b/drivers/block/drbd/drbd_nl.c
36924@@ -3440,7 +3440,7 @@ out:
36925
36926 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
36927 {
36928- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
36929+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
36930 struct sk_buff *msg;
36931 struct drbd_genlmsghdr *d_out;
36932 unsigned seq;
36933@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
36934 return;
36935 }
36936
36937- seq = atomic_inc_return(&drbd_genl_seq);
36938+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
36939 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
36940 if (!msg)
36941 goto failed;
36942diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
36943index cc29cd3..d4b058b 100644
36944--- a/drivers/block/drbd/drbd_receiver.c
36945+++ b/drivers/block/drbd/drbd_receiver.c
36946@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
36947 {
36948 int err;
36949
36950- atomic_set(&mdev->packet_seq, 0);
36951+ atomic_set_unchecked(&mdev->packet_seq, 0);
36952 mdev->peer_seq = 0;
36953
36954 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
36955@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36956 do {
36957 next_epoch = NULL;
36958
36959- epoch_size = atomic_read(&epoch->epoch_size);
36960+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
36961
36962 switch (ev & ~EV_CLEANUP) {
36963 case EV_PUT:
36964@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36965 rv = FE_DESTROYED;
36966 } else {
36967 epoch->flags = 0;
36968- atomic_set(&epoch->epoch_size, 0);
36969+ atomic_set_unchecked(&epoch->epoch_size, 0);
36970 /* atomic_set(&epoch->active, 0); is already zero */
36971 if (rv == FE_STILL_LIVE)
36972 rv = FE_RECYCLED;
36973@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36974 conn_wait_active_ee_empty(tconn);
36975 drbd_flush(tconn);
36976
36977- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36978+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36979 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
36980 if (epoch)
36981 break;
36982@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36983 }
36984
36985 epoch->flags = 0;
36986- atomic_set(&epoch->epoch_size, 0);
36987+ atomic_set_unchecked(&epoch->epoch_size, 0);
36988 atomic_set(&epoch->active, 0);
36989
36990 spin_lock(&tconn->epoch_lock);
36991- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36992+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36993 list_add(&epoch->list, &tconn->current_epoch->list);
36994 tconn->current_epoch = epoch;
36995 tconn->epochs++;
36996@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36997
36998 err = wait_for_and_update_peer_seq(mdev, peer_seq);
36999 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
37000- atomic_inc(&tconn->current_epoch->epoch_size);
37001+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
37002 err2 = drbd_drain_block(mdev, pi->size);
37003 if (!err)
37004 err = err2;
37005@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37006
37007 spin_lock(&tconn->epoch_lock);
37008 peer_req->epoch = tconn->current_epoch;
37009- atomic_inc(&peer_req->epoch->epoch_size);
37010+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
37011 atomic_inc(&peer_req->epoch->active);
37012 spin_unlock(&tconn->epoch_lock);
37013
37014@@ -4347,7 +4347,7 @@ struct data_cmd {
37015 int expect_payload;
37016 size_t pkt_size;
37017 int (*fn)(struct drbd_tconn *, struct packet_info *);
37018-};
37019+} __do_const;
37020
37021 static struct data_cmd drbd_cmd_handler[] = {
37022 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
37023@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
37024 if (!list_empty(&tconn->current_epoch->list))
37025 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
37026 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
37027- atomic_set(&tconn->current_epoch->epoch_size, 0);
37028+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
37029 tconn->send.seen_any_write_yet = false;
37030
37031 conn_info(tconn, "Connection closed\n");
37032@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
37033 struct asender_cmd {
37034 size_t pkt_size;
37035 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
37036-};
37037+} __do_const;
37038
37039 static struct asender_cmd asender_tbl[] = {
37040 [P_PING] = { 0, got_Ping },
37041diff --git a/drivers/block/loop.c b/drivers/block/loop.c
37042index c8dac73..1800093 100644
37043--- a/drivers/block/loop.c
37044+++ b/drivers/block/loop.c
37045@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
37046
37047 file_start_write(file);
37048 set_fs(get_ds());
37049- bw = file->f_op->write(file, buf, len, &pos);
37050+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
37051 set_fs(old_fs);
37052 file_end_write(file);
37053 if (likely(bw == len))
37054diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
37055index 5618847..5a46f3b 100644
37056--- a/drivers/block/pktcdvd.c
37057+++ b/drivers/block/pktcdvd.c
37058@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
37059
37060 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
37061 {
37062- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
37063+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
37064 }
37065
37066 /*
37067@@ -1897,7 +1897,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
37068 return -EROFS;
37069 }
37070 pd->settings.fp = ti.fp;
37071- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
37072+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
37073
37074 if (ti.nwa_v) {
37075 pd->nwa = be32_to_cpu(ti.next_writable);
37076diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
37077index 60abf59..80789e1 100644
37078--- a/drivers/bluetooth/btwilink.c
37079+++ b/drivers/bluetooth/btwilink.c
37080@@ -293,7 +293,7 @@ static int ti_st_send_frame(struct sk_buff *skb)
37081
37082 static int bt_ti_probe(struct platform_device *pdev)
37083 {
37084- static struct ti_st *hst;
37085+ struct ti_st *hst;
37086 struct hci_dev *hdev;
37087 int err;
37088
37089diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
37090index 2009266..7be9ca2 100644
37091--- a/drivers/bus/arm-cci.c
37092+++ b/drivers/bus/arm-cci.c
37093@@ -405,7 +405,7 @@ static int __init cci_probe(void)
37094
37095 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
37096
37097- ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
37098+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
37099 if (!ports)
37100 return -ENOMEM;
37101
37102diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
37103index 8a3aff7..d7538c2 100644
37104--- a/drivers/cdrom/cdrom.c
37105+++ b/drivers/cdrom/cdrom.c
37106@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
37107 ENSURE(reset, CDC_RESET);
37108 ENSURE(generic_packet, CDC_GENERIC_PACKET);
37109 cdi->mc_flags = 0;
37110- cdo->n_minors = 0;
37111 cdi->options = CDO_USE_FFLAGS;
37112
37113 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
37114@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
37115 else
37116 cdi->cdda_method = CDDA_OLD;
37117
37118- if (!cdo->generic_packet)
37119- cdo->generic_packet = cdrom_dummy_generic_packet;
37120+ if (!cdo->generic_packet) {
37121+ pax_open_kernel();
37122+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
37123+ pax_close_kernel();
37124+ }
37125
37126 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
37127 mutex_lock(&cdrom_mutex);
37128@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
37129 if (cdi->exit)
37130 cdi->exit(cdi);
37131
37132- cdi->ops->n_minors--;
37133 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
37134 }
37135
37136@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
37137 */
37138 nr = nframes;
37139 do {
37140- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37141+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37142 if (cgc.buffer)
37143 break;
37144
37145@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
37146 struct cdrom_device_info *cdi;
37147 int ret;
37148
37149- ret = scnprintf(info + *pos, max_size - *pos, header);
37150+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
37151 if (!ret)
37152 return 1;
37153
37154diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
37155index 5980cb9..6d7bd7e 100644
37156--- a/drivers/cdrom/gdrom.c
37157+++ b/drivers/cdrom/gdrom.c
37158@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
37159 .audio_ioctl = gdrom_audio_ioctl,
37160 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
37161 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
37162- .n_minors = 1,
37163 };
37164
37165 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
37166diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
37167index 1421997..33f5d6d 100644
37168--- a/drivers/char/Kconfig
37169+++ b/drivers/char/Kconfig
37170@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
37171
37172 config DEVKMEM
37173 bool "/dev/kmem virtual device support"
37174- default y
37175+ default n
37176+ depends on !GRKERNSEC_KMEM
37177 help
37178 Say Y here if you want to support the /dev/kmem device. The
37179 /dev/kmem device is rarely used, but can be used for certain
37180@@ -570,6 +571,7 @@ config DEVPORT
37181 bool
37182 depends on !M68K
37183 depends on ISA || PCI
37184+ depends on !GRKERNSEC_KMEM
37185 default y
37186
37187 source "drivers/s390/char/Kconfig"
37188diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
37189index a48e05b..6bac831 100644
37190--- a/drivers/char/agp/compat_ioctl.c
37191+++ b/drivers/char/agp/compat_ioctl.c
37192@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
37193 return -ENOMEM;
37194 }
37195
37196- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
37197+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
37198 sizeof(*usegment) * ureserve.seg_count)) {
37199 kfree(usegment);
37200 kfree(ksegment);
37201diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
37202index 1b19239..b87b143 100644
37203--- a/drivers/char/agp/frontend.c
37204+++ b/drivers/char/agp/frontend.c
37205@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37206 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
37207 return -EFAULT;
37208
37209- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
37210+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
37211 return -EFAULT;
37212
37213 client = agp_find_client_by_pid(reserve.pid);
37214@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37215 if (segment == NULL)
37216 return -ENOMEM;
37217
37218- if (copy_from_user(segment, (void __user *) reserve.seg_list,
37219+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
37220 sizeof(struct agp_segment) * reserve.seg_count)) {
37221 kfree(segment);
37222 return -EFAULT;
37223diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
37224index 4f94375..413694e 100644
37225--- a/drivers/char/genrtc.c
37226+++ b/drivers/char/genrtc.c
37227@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
37228 switch (cmd) {
37229
37230 case RTC_PLL_GET:
37231+ memset(&pll, 0, sizeof(pll));
37232 if (get_rtc_pll(&pll))
37233 return -EINVAL;
37234 else
37235diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
37236index 448ce5e..3a76625 100644
37237--- a/drivers/char/hpet.c
37238+++ b/drivers/char/hpet.c
37239@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
37240 }
37241
37242 static int
37243-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
37244+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
37245 struct hpet_info *info)
37246 {
37247 struct hpet_timer __iomem *timer;
37248diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
37249index 86fe45c..c0ea948 100644
37250--- a/drivers/char/hw_random/intel-rng.c
37251+++ b/drivers/char/hw_random/intel-rng.c
37252@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
37253
37254 if (no_fwh_detect)
37255 return -ENODEV;
37256- printk(warning);
37257+ printk("%s", warning);
37258 return -EBUSY;
37259 }
37260
37261diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
37262index ec4e10f..f2a763b 100644
37263--- a/drivers/char/ipmi/ipmi_msghandler.c
37264+++ b/drivers/char/ipmi/ipmi_msghandler.c
37265@@ -420,7 +420,7 @@ struct ipmi_smi {
37266 struct proc_dir_entry *proc_dir;
37267 char proc_dir_name[10];
37268
37269- atomic_t stats[IPMI_NUM_STATS];
37270+ atomic_unchecked_t stats[IPMI_NUM_STATS];
37271
37272 /*
37273 * run_to_completion duplicate of smb_info, smi_info
37274@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
37275
37276
37277 #define ipmi_inc_stat(intf, stat) \
37278- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
37279+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
37280 #define ipmi_get_stat(intf, stat) \
37281- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
37282+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
37283
37284 static int is_lan_addr(struct ipmi_addr *addr)
37285 {
37286@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
37287 INIT_LIST_HEAD(&intf->cmd_rcvrs);
37288 init_waitqueue_head(&intf->waitq);
37289 for (i = 0; i < IPMI_NUM_STATS; i++)
37290- atomic_set(&intf->stats[i], 0);
37291+ atomic_set_unchecked(&intf->stats[i], 0);
37292
37293 intf->proc_dir = NULL;
37294
37295diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
37296index 15e4a60..b046093 100644
37297--- a/drivers/char/ipmi/ipmi_si_intf.c
37298+++ b/drivers/char/ipmi/ipmi_si_intf.c
37299@@ -280,7 +280,7 @@ struct smi_info {
37300 unsigned char slave_addr;
37301
37302 /* Counters and things for the proc filesystem. */
37303- atomic_t stats[SI_NUM_STATS];
37304+ atomic_unchecked_t stats[SI_NUM_STATS];
37305
37306 struct task_struct *thread;
37307
37308@@ -289,9 +289,9 @@ struct smi_info {
37309 };
37310
37311 #define smi_inc_stat(smi, stat) \
37312- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
37313+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
37314 #define smi_get_stat(smi, stat) \
37315- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
37316+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
37317
37318 #define SI_MAX_PARMS 4
37319
37320@@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info *new_smi)
37321 atomic_set(&new_smi->req_events, 0);
37322 new_smi->run_to_completion = 0;
37323 for (i = 0; i < SI_NUM_STATS; i++)
37324- atomic_set(&new_smi->stats[i], 0);
37325+ atomic_set_unchecked(&new_smi->stats[i], 0);
37326
37327 new_smi->interrupt_disabled = 1;
37328 atomic_set(&new_smi->stop_operation, 0);
37329diff --git a/drivers/char/mem.c b/drivers/char/mem.c
37330index f895a8c..2bc9147 100644
37331--- a/drivers/char/mem.c
37332+++ b/drivers/char/mem.c
37333@@ -18,6 +18,7 @@
37334 #include <linux/raw.h>
37335 #include <linux/tty.h>
37336 #include <linux/capability.h>
37337+#include <linux/security.h>
37338 #include <linux/ptrace.h>
37339 #include <linux/device.h>
37340 #include <linux/highmem.h>
37341@@ -37,6 +38,10 @@
37342
37343 #define DEVPORT_MINOR 4
37344
37345+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37346+extern const struct file_operations grsec_fops;
37347+#endif
37348+
37349 static inline unsigned long size_inside_page(unsigned long start,
37350 unsigned long size)
37351 {
37352@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37353
37354 while (cursor < to) {
37355 if (!devmem_is_allowed(pfn)) {
37356+#ifdef CONFIG_GRKERNSEC_KMEM
37357+ gr_handle_mem_readwrite(from, to);
37358+#else
37359 printk(KERN_INFO
37360 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
37361 current->comm, from, to);
37362+#endif
37363 return 0;
37364 }
37365 cursor += PAGE_SIZE;
37366@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37367 }
37368 return 1;
37369 }
37370+#elif defined(CONFIG_GRKERNSEC_KMEM)
37371+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37372+{
37373+ return 0;
37374+}
37375 #else
37376 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37377 {
37378@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37379
37380 while (count > 0) {
37381 unsigned long remaining;
37382+ char *temp;
37383
37384 sz = size_inside_page(p, count);
37385
37386@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37387 if (!ptr)
37388 return -EFAULT;
37389
37390- remaining = copy_to_user(buf, ptr, sz);
37391+#ifdef CONFIG_PAX_USERCOPY
37392+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37393+ if (!temp) {
37394+ unxlate_dev_mem_ptr(p, ptr);
37395+ return -ENOMEM;
37396+ }
37397+ memcpy(temp, ptr, sz);
37398+#else
37399+ temp = ptr;
37400+#endif
37401+
37402+ remaining = copy_to_user(buf, temp, sz);
37403+
37404+#ifdef CONFIG_PAX_USERCOPY
37405+ kfree(temp);
37406+#endif
37407+
37408 unxlate_dev_mem_ptr(p, ptr);
37409 if (remaining)
37410 return -EFAULT;
37411@@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37412 size_t count, loff_t *ppos)
37413 {
37414 unsigned long p = *ppos;
37415- ssize_t low_count, read, sz;
37416+ ssize_t low_count, read, sz, err = 0;
37417 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
37418- int err = 0;
37419
37420 read = 0;
37421 if (p < (unsigned long) high_memory) {
37422@@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37423 }
37424 #endif
37425 while (low_count > 0) {
37426+ char *temp;
37427+
37428 sz = size_inside_page(p, low_count);
37429
37430 /*
37431@@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37432 */
37433 kbuf = xlate_dev_kmem_ptr((char *)p);
37434
37435- if (copy_to_user(buf, kbuf, sz))
37436+#ifdef CONFIG_PAX_USERCOPY
37437+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37438+ if (!temp)
37439+ return -ENOMEM;
37440+ memcpy(temp, kbuf, sz);
37441+#else
37442+ temp = kbuf;
37443+#endif
37444+
37445+ err = copy_to_user(buf, temp, sz);
37446+
37447+#ifdef CONFIG_PAX_USERCOPY
37448+ kfree(temp);
37449+#endif
37450+
37451+ if (err)
37452 return -EFAULT;
37453 buf += sz;
37454 p += sz;
37455@@ -822,6 +869,9 @@ static const struct memdev {
37456 #ifdef CONFIG_PRINTK
37457 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
37458 #endif
37459+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37460+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
37461+#endif
37462 };
37463
37464 static int memory_open(struct inode *inode, struct file *filp)
37465@@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
37466 continue;
37467
37468 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
37469- NULL, devlist[minor].name);
37470+ NULL, "%s", devlist[minor].name);
37471 }
37472
37473 return tty_init();
37474diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
37475index 9df78e2..01ba9ae 100644
37476--- a/drivers/char/nvram.c
37477+++ b/drivers/char/nvram.c
37478@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
37479
37480 spin_unlock_irq(&rtc_lock);
37481
37482- if (copy_to_user(buf, contents, tmp - contents))
37483+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
37484 return -EFAULT;
37485
37486 *ppos = i;
37487diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
37488index d39cca6..8c1e269 100644
37489--- a/drivers/char/pcmcia/synclink_cs.c
37490+++ b/drivers/char/pcmcia/synclink_cs.c
37491@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37492
37493 if (debug_level >= DEBUG_LEVEL_INFO)
37494 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
37495- __FILE__, __LINE__, info->device_name, port->count);
37496+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
37497
37498- WARN_ON(!port->count);
37499+ WARN_ON(!atomic_read(&port->count));
37500
37501 if (tty_port_close_start(port, tty, filp) == 0)
37502 goto cleanup;
37503@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37504 cleanup:
37505 if (debug_level >= DEBUG_LEVEL_INFO)
37506 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
37507- tty->driver->name, port->count);
37508+ tty->driver->name, atomic_read(&port->count));
37509 }
37510
37511 /* Wait until the transmitter is empty.
37512@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37513
37514 if (debug_level >= DEBUG_LEVEL_INFO)
37515 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
37516- __FILE__, __LINE__, tty->driver->name, port->count);
37517+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
37518
37519 /* If port is closing, signal caller to try again */
37520 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
37521@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37522 goto cleanup;
37523 }
37524 spin_lock(&port->lock);
37525- port->count++;
37526+ atomic_inc(&port->count);
37527 spin_unlock(&port->lock);
37528 spin_unlock_irqrestore(&info->netlock, flags);
37529
37530- if (port->count == 1) {
37531+ if (atomic_read(&port->count) == 1) {
37532 /* 1st open on this device, init hardware */
37533 retval = startup(info, tty);
37534 if (retval < 0)
37535@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
37536 unsigned short new_crctype;
37537
37538 /* return error if TTY interface open */
37539- if (info->port.count)
37540+ if (atomic_read(&info->port.count))
37541 return -EBUSY;
37542
37543 switch (encoding)
37544@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
37545
37546 /* arbitrate between network and tty opens */
37547 spin_lock_irqsave(&info->netlock, flags);
37548- if (info->port.count != 0 || info->netcount != 0) {
37549+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
37550 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
37551 spin_unlock_irqrestore(&info->netlock, flags);
37552 return -EBUSY;
37553@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37554 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
37555
37556 /* return error if TTY interface open */
37557- if (info->port.count)
37558+ if (atomic_read(&info->port.count))
37559 return -EBUSY;
37560
37561 if (cmd != SIOCWANDEV)
37562diff --git a/drivers/char/random.c b/drivers/char/random.c
37563index 7a744d3..35a177ee 100644
37564--- a/drivers/char/random.c
37565+++ b/drivers/char/random.c
37566@@ -269,8 +269,13 @@
37567 /*
37568 * Configuration information
37569 */
37570+#ifdef CONFIG_GRKERNSEC_RANDNET
37571+#define INPUT_POOL_WORDS 512
37572+#define OUTPUT_POOL_WORDS 128
37573+#else
37574 #define INPUT_POOL_WORDS 128
37575 #define OUTPUT_POOL_WORDS 32
37576+#endif
37577 #define SEC_XFER_SIZE 512
37578 #define EXTRACT_SIZE 10
37579
37580@@ -310,10 +315,17 @@ static struct poolinfo {
37581 int poolwords;
37582 int tap1, tap2, tap3, tap4, tap5;
37583 } poolinfo_table[] = {
37584+#ifdef CONFIG_GRKERNSEC_RANDNET
37585+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
37586+ { 512, 411, 308, 208, 104, 1 },
37587+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
37588+ { 128, 103, 76, 51, 25, 1 },
37589+#else
37590 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
37591 { 128, 103, 76, 51, 25, 1 },
37592 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
37593 { 32, 26, 20, 14, 7, 1 },
37594+#endif
37595 #if 0
37596 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
37597 { 2048, 1638, 1231, 819, 411, 1 },
37598@@ -521,8 +533,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
37599 input_rotate += i ? 7 : 14;
37600 }
37601
37602- ACCESS_ONCE(r->input_rotate) = input_rotate;
37603- ACCESS_ONCE(r->add_ptr) = i;
37604+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
37605+ ACCESS_ONCE_RW(r->add_ptr) = i;
37606 smp_wmb();
37607
37608 if (out)
37609@@ -1029,7 +1041,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
37610
37611 extract_buf(r, tmp);
37612 i = min_t(int, nbytes, EXTRACT_SIZE);
37613- if (copy_to_user(buf, tmp, i)) {
37614+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
37615 ret = -EFAULT;
37616 break;
37617 }
37618@@ -1365,7 +1377,7 @@ EXPORT_SYMBOL(generate_random_uuid);
37619 #include <linux/sysctl.h>
37620
37621 static int min_read_thresh = 8, min_write_thresh;
37622-static int max_read_thresh = INPUT_POOL_WORDS * 32;
37623+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
37624 static int max_write_thresh = INPUT_POOL_WORDS * 32;
37625 static char sysctl_bootid[16];
37626
37627@@ -1381,7 +1393,7 @@ static char sysctl_bootid[16];
37628 static int proc_do_uuid(struct ctl_table *table, int write,
37629 void __user *buffer, size_t *lenp, loff_t *ppos)
37630 {
37631- struct ctl_table fake_table;
37632+ ctl_table_no_const fake_table;
37633 unsigned char buf[64], tmp_uuid[16], *uuid;
37634
37635 uuid = table->data;
37636diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
37637index 7cc1fe22..b602d6b 100644
37638--- a/drivers/char/sonypi.c
37639+++ b/drivers/char/sonypi.c
37640@@ -54,6 +54,7 @@
37641
37642 #include <asm/uaccess.h>
37643 #include <asm/io.h>
37644+#include <asm/local.h>
37645
37646 #include <linux/sonypi.h>
37647
37648@@ -490,7 +491,7 @@ static struct sonypi_device {
37649 spinlock_t fifo_lock;
37650 wait_queue_head_t fifo_proc_list;
37651 struct fasync_struct *fifo_async;
37652- int open_count;
37653+ local_t open_count;
37654 int model;
37655 struct input_dev *input_jog_dev;
37656 struct input_dev *input_key_dev;
37657@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
37658 static int sonypi_misc_release(struct inode *inode, struct file *file)
37659 {
37660 mutex_lock(&sonypi_device.lock);
37661- sonypi_device.open_count--;
37662+ local_dec(&sonypi_device.open_count);
37663 mutex_unlock(&sonypi_device.lock);
37664 return 0;
37665 }
37666@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
37667 {
37668 mutex_lock(&sonypi_device.lock);
37669 /* Flush input queue on first open */
37670- if (!sonypi_device.open_count)
37671+ if (!local_read(&sonypi_device.open_count))
37672 kfifo_reset(&sonypi_device.fifo);
37673- sonypi_device.open_count++;
37674+ local_inc(&sonypi_device.open_count);
37675 mutex_unlock(&sonypi_device.lock);
37676
37677 return 0;
37678diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
37679index 64420b3..5c40b56 100644
37680--- a/drivers/char/tpm/tpm_acpi.c
37681+++ b/drivers/char/tpm/tpm_acpi.c
37682@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
37683 virt = acpi_os_map_memory(start, len);
37684 if (!virt) {
37685 kfree(log->bios_event_log);
37686+ log->bios_event_log = NULL;
37687 printk("%s: ERROR - Unable to map memory\n", __func__);
37688 return -EIO;
37689 }
37690
37691- memcpy_fromio(log->bios_event_log, virt, len);
37692+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
37693
37694 acpi_os_unmap_memory(virt, len);
37695 return 0;
37696diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
37697index 84ddc55..1d32f1e 100644
37698--- a/drivers/char/tpm/tpm_eventlog.c
37699+++ b/drivers/char/tpm/tpm_eventlog.c
37700@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
37701 event = addr;
37702
37703 if ((event->event_type == 0 && event->event_size == 0) ||
37704- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
37705+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
37706 return NULL;
37707
37708 return addr;
37709@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
37710 return NULL;
37711
37712 if ((event->event_type == 0 && event->event_size == 0) ||
37713- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
37714+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
37715 return NULL;
37716
37717 (*pos)++;
37718@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
37719 int i;
37720
37721 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
37722- seq_putc(m, data[i]);
37723+ if (!seq_putc(m, data[i]))
37724+ return -EFAULT;
37725
37726 return 0;
37727 }
37728diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
37729index b79cf3e..de172d64f 100644
37730--- a/drivers/char/virtio_console.c
37731+++ b/drivers/char/virtio_console.c
37732@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
37733 if (to_user) {
37734 ssize_t ret;
37735
37736- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
37737+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
37738 if (ret)
37739 return -EFAULT;
37740 } else {
37741@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
37742 if (!port_has_data(port) && !port->host_connected)
37743 return 0;
37744
37745- return fill_readbuf(port, ubuf, count, true);
37746+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
37747 }
37748
37749 static int wait_port_writable(struct port *port, bool nonblock)
37750diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
37751index a33f46f..a720eed 100644
37752--- a/drivers/clk/clk-composite.c
37753+++ b/drivers/clk/clk-composite.c
37754@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
37755 struct clk *clk;
37756 struct clk_init_data init;
37757 struct clk_composite *composite;
37758- struct clk_ops *clk_composite_ops;
37759+ clk_ops_no_const *clk_composite_ops;
37760
37761 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
37762 if (!composite) {
37763diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
37764index 81dd31a..ef5c542 100644
37765--- a/drivers/clk/socfpga/clk.c
37766+++ b/drivers/clk/socfpga/clk.c
37767@@ -22,6 +22,7 @@
37768 #include <linux/clk-provider.h>
37769 #include <linux/io.h>
37770 #include <linux/of.h>
37771+#include <asm/pgtable.h>
37772
37773 /* Clock Manager offsets */
37774 #define CLKMGR_CTRL 0x0
37775@@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
37776 streq(clk_name, "periph_pll") ||
37777 streq(clk_name, "sdram_pll")) {
37778 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
37779- clk_pll_ops.enable = clk_gate_ops.enable;
37780- clk_pll_ops.disable = clk_gate_ops.disable;
37781+ pax_open_kernel();
37782+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
37783+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
37784+ pax_close_kernel();
37785 }
37786
37787 clk = clk_register(NULL, &socfpga_clk->hw.hw);
37788@@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
37789 return parent_rate / div;
37790 }
37791
37792-static struct clk_ops gateclk_ops = {
37793+static clk_ops_no_const gateclk_ops __read_only = {
37794 .recalc_rate = socfpga_clk_recalc_rate,
37795 .get_parent = socfpga_clk_get_parent,
37796 .set_parent = socfpga_clk_set_parent,
37797diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
37798index 506fd23..01a593f 100644
37799--- a/drivers/cpufreq/acpi-cpufreq.c
37800+++ b/drivers/cpufreq/acpi-cpufreq.c
37801@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
37802 return sprintf(buf, "%u\n", boost_enabled);
37803 }
37804
37805-static struct global_attr global_boost = __ATTR(boost, 0644,
37806+static global_attr_no_const global_boost = __ATTR(boost, 0644,
37807 show_global_boost,
37808 store_global_boost);
37809
37810@@ -721,8 +721,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37811 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
37812 per_cpu(acfreq_data, cpu) = data;
37813
37814- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
37815- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37816+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
37817+ pax_open_kernel();
37818+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37819+ pax_close_kernel();
37820+ }
37821
37822 result = acpi_processor_register_performance(data->acpi_data, cpu);
37823 if (result)
37824@@ -850,7 +853,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37825 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
37826 break;
37827 case ACPI_ADR_SPACE_FIXED_HARDWARE:
37828- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37829+ pax_open_kernel();
37830+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37831+ pax_close_kernel();
37832 policy->cur = get_cur_freq_on_cpu(cpu);
37833 break;
37834 default:
37835diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
37836index 04548f7..457a342 100644
37837--- a/drivers/cpufreq/cpufreq.c
37838+++ b/drivers/cpufreq/cpufreq.c
37839@@ -2069,7 +2069,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
37840 return NOTIFY_OK;
37841 }
37842
37843-static struct notifier_block __refdata cpufreq_cpu_notifier = {
37844+static struct notifier_block cpufreq_cpu_notifier = {
37845 .notifier_call = cpufreq_cpu_callback,
37846 };
37847
37848@@ -2101,8 +2101,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
37849
37850 pr_debug("trying to register driver %s\n", driver_data->name);
37851
37852- if (driver_data->setpolicy)
37853- driver_data->flags |= CPUFREQ_CONST_LOOPS;
37854+ if (driver_data->setpolicy) {
37855+ pax_open_kernel();
37856+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
37857+ pax_close_kernel();
37858+ }
37859
37860 write_lock_irqsave(&cpufreq_driver_lock, flags);
37861 if (cpufreq_driver) {
37862diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
37863index 0806c31..6a73276 100644
37864--- a/drivers/cpufreq/cpufreq_governor.c
37865+++ b/drivers/cpufreq/cpufreq_governor.c
37866@@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37867 struct dbs_data *dbs_data;
37868 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
37869 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
37870- struct od_ops *od_ops = NULL;
37871+ const struct od_ops *od_ops = NULL;
37872 struct od_dbs_tuners *od_tuners = NULL;
37873 struct cs_dbs_tuners *cs_tuners = NULL;
37874 struct cpu_dbs_common_info *cpu_cdbs;
37875@@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37876
37877 if ((cdata->governor == GOV_CONSERVATIVE) &&
37878 (!policy->governor->initialized)) {
37879- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37880+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37881
37882 cpufreq_register_notifier(cs_ops->notifier_block,
37883 CPUFREQ_TRANSITION_NOTIFIER);
37884@@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37885
37886 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
37887 (policy->governor->initialized == 1)) {
37888- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37889+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37890
37891 cpufreq_unregister_notifier(cs_ops->notifier_block,
37892 CPUFREQ_TRANSITION_NOTIFIER);
37893diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
37894index 88cd39f..87f0393 100644
37895--- a/drivers/cpufreq/cpufreq_governor.h
37896+++ b/drivers/cpufreq/cpufreq_governor.h
37897@@ -202,7 +202,7 @@ struct common_dbs_data {
37898 void (*exit)(struct dbs_data *dbs_data);
37899
37900 /* Governor specific ops, see below */
37901- void *gov_ops;
37902+ const void *gov_ops;
37903 };
37904
37905 /* Governor Per policy data */
37906@@ -222,7 +222,7 @@ struct od_ops {
37907 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
37908 unsigned int freq_next, unsigned int relation);
37909 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
37910-};
37911+} __no_const;
37912
37913 struct cs_ops {
37914 struct notifier_block *notifier_block;
37915diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
37916index 32f26f6..feb657b 100644
37917--- a/drivers/cpufreq/cpufreq_ondemand.c
37918+++ b/drivers/cpufreq/cpufreq_ondemand.c
37919@@ -522,7 +522,7 @@ static void od_exit(struct dbs_data *dbs_data)
37920
37921 define_get_cpu_dbs_routines(od_cpu_dbs_info);
37922
37923-static struct od_ops od_ops = {
37924+static struct od_ops od_ops __read_only = {
37925 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
37926 .powersave_bias_target = generic_powersave_bias_target,
37927 .freq_increase = dbs_freq_increase,
37928@@ -577,14 +577,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
37929 (struct cpufreq_policy *, unsigned int, unsigned int),
37930 unsigned int powersave_bias)
37931 {
37932- od_ops.powersave_bias_target = f;
37933+ pax_open_kernel();
37934+ *(void **)&od_ops.powersave_bias_target = f;
37935+ pax_close_kernel();
37936 od_set_powersave_bias(powersave_bias);
37937 }
37938 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
37939
37940 void od_unregister_powersave_bias_handler(void)
37941 {
37942- od_ops.powersave_bias_target = generic_powersave_bias_target;
37943+ pax_open_kernel();
37944+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
37945+ pax_close_kernel();
37946 od_set_powersave_bias(0);
37947 }
37948 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
37949diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
37950index 4cf0d28..5830372 100644
37951--- a/drivers/cpufreq/cpufreq_stats.c
37952+++ b/drivers/cpufreq/cpufreq_stats.c
37953@@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
37954 }
37955
37956 /* priority=1 so this will get called before cpufreq_remove_dev */
37957-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
37958+static struct notifier_block cpufreq_stat_cpu_notifier = {
37959 .notifier_call = cpufreq_stat_cpu_callback,
37960 .priority = 1,
37961 };
37962diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
37963index 2f0a2a6..93d728e 100644
37964--- a/drivers/cpufreq/p4-clockmod.c
37965+++ b/drivers/cpufreq/p4-clockmod.c
37966@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37967 case 0x0F: /* Core Duo */
37968 case 0x16: /* Celeron Core */
37969 case 0x1C: /* Atom */
37970- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37971+ pax_open_kernel();
37972+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37973+ pax_close_kernel();
37974 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
37975 case 0x0D: /* Pentium M (Dothan) */
37976- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37977+ pax_open_kernel();
37978+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37979+ pax_close_kernel();
37980 /* fall through */
37981 case 0x09: /* Pentium M (Banias) */
37982 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
37983@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37984
37985 /* on P-4s, the TSC runs with constant frequency independent whether
37986 * throttling is active or not. */
37987- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37988+ pax_open_kernel();
37989+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37990+ pax_close_kernel();
37991
37992 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
37993 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
37994diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
37995index ac76b48..2445bc6 100644
37996--- a/drivers/cpufreq/sparc-us3-cpufreq.c
37997+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
37998@@ -18,14 +18,12 @@
37999 #include <asm/head.h>
38000 #include <asm/timer.h>
38001
38002-static struct cpufreq_driver *cpufreq_us3_driver;
38003-
38004 struct us3_freq_percpu_info {
38005 struct cpufreq_frequency_table table[4];
38006 };
38007
38008 /* Indexed by cpu number. */
38009-static struct us3_freq_percpu_info *us3_freq_table;
38010+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
38011
38012 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
38013 * in the Safari config register.
38014@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
38015
38016 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
38017 {
38018- if (cpufreq_us3_driver)
38019- us3_set_cpu_divider_index(policy, 0);
38020+ us3_set_cpu_divider_index(policy->cpu, 0);
38021
38022 return 0;
38023 }
38024
38025+static int __init us3_freq_init(void);
38026+static void __exit us3_freq_exit(void);
38027+
38028+static struct cpufreq_driver cpufreq_us3_driver = {
38029+ .init = us3_freq_cpu_init,
38030+ .verify = us3_freq_verify,
38031+ .target = us3_freq_target,
38032+ .get = us3_freq_get,
38033+ .exit = us3_freq_cpu_exit,
38034+ .owner = THIS_MODULE,
38035+ .name = "UltraSPARC-III",
38036+
38037+};
38038+
38039 static int __init us3_freq_init(void)
38040 {
38041 unsigned long manuf, impl, ver;
38042@@ -208,55 +219,15 @@ static int __init us3_freq_init(void)
38043 (impl == CHEETAH_IMPL ||
38044 impl == CHEETAH_PLUS_IMPL ||
38045 impl == JAGUAR_IMPL ||
38046- impl == PANTHER_IMPL)) {
38047- struct cpufreq_driver *driver;
38048-
38049- ret = -ENOMEM;
38050- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
38051- if (!driver)
38052- goto err_out;
38053-
38054- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
38055- GFP_KERNEL);
38056- if (!us3_freq_table)
38057- goto err_out;
38058-
38059- driver->init = us3_freq_cpu_init;
38060- driver->verify = us3_freq_verify;
38061- driver->target = us3_freq_target;
38062- driver->get = us3_freq_get;
38063- driver->exit = us3_freq_cpu_exit;
38064- strcpy(driver->name, "UltraSPARC-III");
38065-
38066- cpufreq_us3_driver = driver;
38067- ret = cpufreq_register_driver(driver);
38068- if (ret)
38069- goto err_out;
38070-
38071- return 0;
38072-
38073-err_out:
38074- if (driver) {
38075- kfree(driver);
38076- cpufreq_us3_driver = NULL;
38077- }
38078- kfree(us3_freq_table);
38079- us3_freq_table = NULL;
38080- return ret;
38081- }
38082+ impl == PANTHER_IMPL))
38083+ return cpufreq_register_driver(&cpufreq_us3_driver);
38084
38085 return -ENODEV;
38086 }
38087
38088 static void __exit us3_freq_exit(void)
38089 {
38090- if (cpufreq_us3_driver) {
38091- cpufreq_unregister_driver(cpufreq_us3_driver);
38092- kfree(cpufreq_us3_driver);
38093- cpufreq_us3_driver = NULL;
38094- kfree(us3_freq_table);
38095- us3_freq_table = NULL;
38096- }
38097+ cpufreq_unregister_driver(&cpufreq_us3_driver);
38098 }
38099
38100 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
38101diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
38102index f897d51..15da295 100644
38103--- a/drivers/cpufreq/speedstep-centrino.c
38104+++ b/drivers/cpufreq/speedstep-centrino.c
38105@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
38106 !cpu_has(cpu, X86_FEATURE_EST))
38107 return -ENODEV;
38108
38109- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
38110- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38111+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
38112+ pax_open_kernel();
38113+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38114+ pax_close_kernel();
38115+ }
38116
38117 if (policy->cpu != 0)
38118 return -ENODEV;
38119diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
38120index 22c07fb..9dff5ac 100644
38121--- a/drivers/cpuidle/cpuidle.c
38122+++ b/drivers/cpuidle/cpuidle.c
38123@@ -252,7 +252,7 @@ static int poll_idle(struct cpuidle_device *dev,
38124
38125 static void poll_idle_init(struct cpuidle_driver *drv)
38126 {
38127- struct cpuidle_state *state = &drv->states[0];
38128+ cpuidle_state_no_const *state = &drv->states[0];
38129
38130 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
38131 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
38132diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
38133index ea2f8e7..70ac501 100644
38134--- a/drivers/cpuidle/governor.c
38135+++ b/drivers/cpuidle/governor.c
38136@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
38137 mutex_lock(&cpuidle_lock);
38138 if (__cpuidle_find_governor(gov->name) == NULL) {
38139 ret = 0;
38140- list_add_tail(&gov->governor_list, &cpuidle_governors);
38141+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
38142 if (!cpuidle_curr_governor ||
38143 cpuidle_curr_governor->rating < gov->rating)
38144 cpuidle_switch_governor(gov);
38145@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
38146 new_gov = cpuidle_replace_governor(gov->rating);
38147 cpuidle_switch_governor(new_gov);
38148 }
38149- list_del(&gov->governor_list);
38150+ pax_list_del((struct list_head *)&gov->governor_list);
38151 mutex_unlock(&cpuidle_lock);
38152 }
38153
38154diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
38155index 8739cc0..dc859d0 100644
38156--- a/drivers/cpuidle/sysfs.c
38157+++ b/drivers/cpuidle/sysfs.c
38158@@ -134,7 +134,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
38159 NULL
38160 };
38161
38162-static struct attribute_group cpuidle_attr_group = {
38163+static attribute_group_no_const cpuidle_attr_group = {
38164 .attrs = cpuidle_default_attrs,
38165 .name = "cpuidle",
38166 };
38167diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
38168index 12fea3e..1e28f47 100644
38169--- a/drivers/crypto/hifn_795x.c
38170+++ b/drivers/crypto/hifn_795x.c
38171@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
38172 MODULE_PARM_DESC(hifn_pll_ref,
38173 "PLL reference clock (pci[freq] or ext[freq], default ext)");
38174
38175-static atomic_t hifn_dev_number;
38176+static atomic_unchecked_t hifn_dev_number;
38177
38178 #define ACRYPTO_OP_DECRYPT 0
38179 #define ACRYPTO_OP_ENCRYPT 1
38180@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
38181 goto err_out_disable_pci_device;
38182
38183 snprintf(name, sizeof(name), "hifn%d",
38184- atomic_inc_return(&hifn_dev_number)-1);
38185+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
38186
38187 err = pci_request_regions(pdev, name);
38188 if (err)
38189diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
38190index c99c00d..990a4b2 100644
38191--- a/drivers/devfreq/devfreq.c
38192+++ b/drivers/devfreq/devfreq.c
38193@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
38194 goto err_out;
38195 }
38196
38197- list_add(&governor->node, &devfreq_governor_list);
38198+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
38199
38200 list_for_each_entry(devfreq, &devfreq_list, node) {
38201 int ret = 0;
38202@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
38203 }
38204 }
38205
38206- list_del(&governor->node);
38207+ pax_list_del((struct list_head *)&governor->node);
38208 err_out:
38209 mutex_unlock(&devfreq_list_lock);
38210
38211diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
38212index 1069e88..dfcd642 100644
38213--- a/drivers/dma/sh/shdmac.c
38214+++ b/drivers/dma/sh/shdmac.c
38215@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
38216 return ret;
38217 }
38218
38219-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
38220+static struct notifier_block sh_dmae_nmi_notifier = {
38221 .notifier_call = sh_dmae_nmi_handler,
38222
38223 /* Run before NMI debug handler and KGDB */
38224diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
38225index 211021d..201d47f 100644
38226--- a/drivers/edac/edac_device.c
38227+++ b/drivers/edac/edac_device.c
38228@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
38229 */
38230 int edac_device_alloc_index(void)
38231 {
38232- static atomic_t device_indexes = ATOMIC_INIT(0);
38233+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
38234
38235- return atomic_inc_return(&device_indexes) - 1;
38236+ return atomic_inc_return_unchecked(&device_indexes) - 1;
38237 }
38238 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
38239
38240diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
38241index 9f7e0e60..348c875 100644
38242--- a/drivers/edac/edac_mc_sysfs.c
38243+++ b/drivers/edac/edac_mc_sysfs.c
38244@@ -150,7 +150,7 @@ static const char * const edac_caps[] = {
38245 struct dev_ch_attribute {
38246 struct device_attribute attr;
38247 int channel;
38248-};
38249+} __do_const;
38250
38251 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
38252 struct dev_ch_attribute dev_attr_legacy_##_name = \
38253@@ -1007,14 +1007,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
38254 }
38255
38256 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
38257+ pax_open_kernel();
38258 if (mci->get_sdram_scrub_rate) {
38259- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38260- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38261+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38262+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38263 }
38264 if (mci->set_sdram_scrub_rate) {
38265- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38266- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38267+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38268+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38269 }
38270+ pax_close_kernel();
38271 err = device_create_file(&mci->dev,
38272 &dev_attr_sdram_scrub_rate);
38273 if (err) {
38274diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
38275index dd370f9..0281629 100644
38276--- a/drivers/edac/edac_pci.c
38277+++ b/drivers/edac/edac_pci.c
38278@@ -29,7 +29,7 @@
38279
38280 static DEFINE_MUTEX(edac_pci_ctls_mutex);
38281 static LIST_HEAD(edac_pci_list);
38282-static atomic_t pci_indexes = ATOMIC_INIT(0);
38283+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
38284
38285 /*
38286 * edac_pci_alloc_ctl_info
38287@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
38288 */
38289 int edac_pci_alloc_index(void)
38290 {
38291- return atomic_inc_return(&pci_indexes) - 1;
38292+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
38293 }
38294 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
38295
38296diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
38297index e8658e4..22746d6 100644
38298--- a/drivers/edac/edac_pci_sysfs.c
38299+++ b/drivers/edac/edac_pci_sysfs.c
38300@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
38301 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
38302 static int edac_pci_poll_msec = 1000; /* one second workq period */
38303
38304-static atomic_t pci_parity_count = ATOMIC_INIT(0);
38305-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
38306+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
38307+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
38308
38309 static struct kobject *edac_pci_top_main_kobj;
38310 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
38311@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
38312 void *value;
38313 ssize_t(*show) (void *, char *);
38314 ssize_t(*store) (void *, const char *, size_t);
38315-};
38316+} __do_const;
38317
38318 /* Set of show/store abstract level functions for PCI Parity object */
38319 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
38320@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38321 edac_printk(KERN_CRIT, EDAC_PCI,
38322 "Signaled System Error on %s\n",
38323 pci_name(dev));
38324- atomic_inc(&pci_nonparity_count);
38325+ atomic_inc_unchecked(&pci_nonparity_count);
38326 }
38327
38328 if (status & (PCI_STATUS_PARITY)) {
38329@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38330 "Master Data Parity Error on %s\n",
38331 pci_name(dev));
38332
38333- atomic_inc(&pci_parity_count);
38334+ atomic_inc_unchecked(&pci_parity_count);
38335 }
38336
38337 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38338@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38339 "Detected Parity Error on %s\n",
38340 pci_name(dev));
38341
38342- atomic_inc(&pci_parity_count);
38343+ atomic_inc_unchecked(&pci_parity_count);
38344 }
38345 }
38346
38347@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38348 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
38349 "Signaled System Error on %s\n",
38350 pci_name(dev));
38351- atomic_inc(&pci_nonparity_count);
38352+ atomic_inc_unchecked(&pci_nonparity_count);
38353 }
38354
38355 if (status & (PCI_STATUS_PARITY)) {
38356@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38357 "Master Data Parity Error on "
38358 "%s\n", pci_name(dev));
38359
38360- atomic_inc(&pci_parity_count);
38361+ atomic_inc_unchecked(&pci_parity_count);
38362 }
38363
38364 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38365@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38366 "Detected Parity Error on %s\n",
38367 pci_name(dev));
38368
38369- atomic_inc(&pci_parity_count);
38370+ atomic_inc_unchecked(&pci_parity_count);
38371 }
38372 }
38373 }
38374@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
38375 if (!check_pci_errors)
38376 return;
38377
38378- before_count = atomic_read(&pci_parity_count);
38379+ before_count = atomic_read_unchecked(&pci_parity_count);
38380
38381 /* scan all PCI devices looking for a Parity Error on devices and
38382 * bridges.
38383@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
38384 /* Only if operator has selected panic on PCI Error */
38385 if (edac_pci_get_panic_on_pe()) {
38386 /* If the count is different 'after' from 'before' */
38387- if (before_count != atomic_read(&pci_parity_count))
38388+ if (before_count != atomic_read_unchecked(&pci_parity_count))
38389 panic("EDAC: PCI Parity Error");
38390 }
38391 }
38392diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
38393index 51b7e3a..aa8a3e8 100644
38394--- a/drivers/edac/mce_amd.h
38395+++ b/drivers/edac/mce_amd.h
38396@@ -77,7 +77,7 @@ struct amd_decoder_ops {
38397 bool (*mc0_mce)(u16, u8);
38398 bool (*mc1_mce)(u16, u8);
38399 bool (*mc2_mce)(u16, u8);
38400-};
38401+} __no_const;
38402
38403 void amd_report_gart_errors(bool);
38404 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
38405diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
38406index 57ea7f4..af06b76 100644
38407--- a/drivers/firewire/core-card.c
38408+++ b/drivers/firewire/core-card.c
38409@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
38410 const struct fw_card_driver *driver,
38411 struct device *device)
38412 {
38413- static atomic_t index = ATOMIC_INIT(-1);
38414+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
38415
38416- card->index = atomic_inc_return(&index);
38417+ card->index = atomic_inc_return_unchecked(&index);
38418 card->driver = driver;
38419 card->device = device;
38420 card->current_tlabel = 0;
38421@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
38422
38423 void fw_core_remove_card(struct fw_card *card)
38424 {
38425- struct fw_card_driver dummy_driver = dummy_driver_template;
38426+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
38427
38428 card->driver->update_phy_reg(card, 4,
38429 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
38430diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
38431index de4aa40..49ab1f2 100644
38432--- a/drivers/firewire/core-device.c
38433+++ b/drivers/firewire/core-device.c
38434@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
38435 struct config_rom_attribute {
38436 struct device_attribute attr;
38437 u32 key;
38438-};
38439+} __do_const;
38440
38441 static ssize_t show_immediate(struct device *dev,
38442 struct device_attribute *dattr, char *buf)
38443diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
38444index e5af0e3..d318058 100644
38445--- a/drivers/firewire/core-transaction.c
38446+++ b/drivers/firewire/core-transaction.c
38447@@ -38,6 +38,7 @@
38448 #include <linux/timer.h>
38449 #include <linux/types.h>
38450 #include <linux/workqueue.h>
38451+#include <linux/sched.h>
38452
38453 #include <asm/byteorder.h>
38454
38455diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
38456index 515a42c..5ecf3ba 100644
38457--- a/drivers/firewire/core.h
38458+++ b/drivers/firewire/core.h
38459@@ -111,6 +111,7 @@ struct fw_card_driver {
38460
38461 int (*stop_iso)(struct fw_iso_context *ctx);
38462 };
38463+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
38464
38465 void fw_card_initialize(struct fw_card *card,
38466 const struct fw_card_driver *driver, struct device *device);
38467diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
38468index 94a58a0..f5eba42 100644
38469--- a/drivers/firmware/dmi-id.c
38470+++ b/drivers/firmware/dmi-id.c
38471@@ -16,7 +16,7 @@
38472 struct dmi_device_attribute{
38473 struct device_attribute dev_attr;
38474 int field;
38475-};
38476+} __do_const;
38477 #define to_dmi_dev_attr(_dev_attr) \
38478 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
38479
38480diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
38481index fa0affb..aa448eb 100644
38482--- a/drivers/firmware/dmi_scan.c
38483+++ b/drivers/firmware/dmi_scan.c
38484@@ -791,7 +791,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
38485 if (buf == NULL)
38486 return -1;
38487
38488- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
38489+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
38490
38491 iounmap(buf);
38492 return 0;
38493diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
38494index 5145fa3..0d3babd 100644
38495--- a/drivers/firmware/efi/efi.c
38496+++ b/drivers/firmware/efi/efi.c
38497@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
38498 };
38499
38500 static struct efivars generic_efivars;
38501-static struct efivar_operations generic_ops;
38502+static efivar_operations_no_const generic_ops __read_only;
38503
38504 static int generic_ops_register(void)
38505 {
38506- generic_ops.get_variable = efi.get_variable;
38507- generic_ops.set_variable = efi.set_variable;
38508- generic_ops.get_next_variable = efi.get_next_variable;
38509- generic_ops.query_variable_store = efi_query_variable_store;
38510+ pax_open_kernel();
38511+ *(void **)&generic_ops.get_variable = efi.get_variable;
38512+ *(void **)&generic_ops.set_variable = efi.set_variable;
38513+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
38514+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
38515+ pax_close_kernel();
38516
38517 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
38518 }
38519diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
38520index 8c5a61a..cf07bd0 100644
38521--- a/drivers/firmware/efi/efivars.c
38522+++ b/drivers/firmware/efi/efivars.c
38523@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
38524 static int
38525 create_efivars_bin_attributes(void)
38526 {
38527- struct bin_attribute *attr;
38528+ bin_attribute_no_const *attr;
38529 int error;
38530
38531 /* new_var */
38532diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
38533index 2a90ba6..07f3733 100644
38534--- a/drivers/firmware/google/memconsole.c
38535+++ b/drivers/firmware/google/memconsole.c
38536@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
38537 if (!found_memconsole())
38538 return -ENODEV;
38539
38540- memconsole_bin_attr.size = memconsole_length;
38541+ pax_open_kernel();
38542+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
38543+ pax_close_kernel();
38544
38545 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
38546
38547diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
38548index 814addb..0937d7f 100644
38549--- a/drivers/gpio/gpio-ich.c
38550+++ b/drivers/gpio/gpio-ich.c
38551@@ -71,7 +71,7 @@ struct ichx_desc {
38552 /* Some chipsets have quirks, let these use their own request/get */
38553 int (*request)(struct gpio_chip *chip, unsigned offset);
38554 int (*get)(struct gpio_chip *chip, unsigned offset);
38555-};
38556+} __do_const;
38557
38558 static struct {
38559 spinlock_t lock;
38560diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
38561index 9902732..64b62dd 100644
38562--- a/drivers/gpio/gpio-vr41xx.c
38563+++ b/drivers/gpio/gpio-vr41xx.c
38564@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
38565 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
38566 maskl, pendl, maskh, pendh);
38567
38568- atomic_inc(&irq_err_count);
38569+ atomic_inc_unchecked(&irq_err_count);
38570
38571 return -EINVAL;
38572 }
38573diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
38574index c722c3b..2ec6040 100644
38575--- a/drivers/gpu/drm/drm_crtc_helper.c
38576+++ b/drivers/gpu/drm/drm_crtc_helper.c
38577@@ -328,7 +328,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
38578 struct drm_crtc *tmp;
38579 int crtc_mask = 1;
38580
38581- WARN(!crtc, "checking null crtc?\n");
38582+ BUG_ON(!crtc);
38583
38584 dev = crtc->dev;
38585
38586diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
38587index fe58d08..07bc38e 100644
38588--- a/drivers/gpu/drm/drm_drv.c
38589+++ b/drivers/gpu/drm/drm_drv.c
38590@@ -186,7 +186,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
38591 atomic_set(&dev->vma_count, 0);
38592
38593 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
38594- atomic_set(&dev->counts[i], 0);
38595+ atomic_set_unchecked(&dev->counts[i], 0);
38596
38597 dev->sigdata.lock = NULL;
38598
38599@@ -302,7 +302,7 @@ module_exit(drm_core_exit);
38600 /**
38601 * Copy and IOCTL return string to user space
38602 */
38603-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
38604+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
38605 {
38606 int len;
38607
38608@@ -372,7 +372,7 @@ long drm_ioctl(struct file *filp,
38609 struct drm_file *file_priv = filp->private_data;
38610 struct drm_device *dev;
38611 const struct drm_ioctl_desc *ioctl = NULL;
38612- drm_ioctl_t *func;
38613+ drm_ioctl_no_const_t func;
38614 unsigned int nr = DRM_IOCTL_NR(cmd);
38615 int retcode = -EINVAL;
38616 char stack_kdata[128];
38617@@ -385,7 +385,7 @@ long drm_ioctl(struct file *filp,
38618 return -ENODEV;
38619
38620 atomic_inc(&dev->ioctl_count);
38621- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
38622+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
38623 ++file_priv->ioctl_count;
38624
38625 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
38626diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
38627index 3f84277..c627c54 100644
38628--- a/drivers/gpu/drm/drm_fops.c
38629+++ b/drivers/gpu/drm/drm_fops.c
38630@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
38631 if (drm_device_is_unplugged(dev))
38632 return -ENODEV;
38633
38634- if (!dev->open_count++)
38635+ if (local_inc_return(&dev->open_count) == 1)
38636 need_setup = 1;
38637 mutex_lock(&dev->struct_mutex);
38638 old_imapping = inode->i_mapping;
38639@@ -113,7 +113,7 @@ int drm_open(struct inode *inode, struct file *filp)
38640 retcode = drm_open_helper(inode, filp, dev);
38641 if (retcode)
38642 goto err_undo;
38643- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
38644+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
38645 if (need_setup) {
38646 retcode = drm_setup(dev);
38647 if (retcode)
38648@@ -128,7 +128,7 @@ err_undo:
38649 iput(container_of(dev->dev_mapping, struct inode, i_data));
38650 dev->dev_mapping = old_mapping;
38651 mutex_unlock(&dev->struct_mutex);
38652- dev->open_count--;
38653+ local_dec(&dev->open_count);
38654 return retcode;
38655 }
38656 EXPORT_SYMBOL(drm_open);
38657@@ -405,7 +405,7 @@ int drm_release(struct inode *inode, struct file *filp)
38658
38659 mutex_lock(&drm_global_mutex);
38660
38661- DRM_DEBUG("open_count = %d\n", dev->open_count);
38662+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
38663
38664 if (dev->driver->preclose)
38665 dev->driver->preclose(dev, file_priv);
38666@@ -414,10 +414,10 @@ int drm_release(struct inode *inode, struct file *filp)
38667 * Begin inline drm_release
38668 */
38669
38670- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
38671+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
38672 task_pid_nr(current),
38673 (long)old_encode_dev(file_priv->minor->device),
38674- dev->open_count);
38675+ local_read(&dev->open_count));
38676
38677 /* Release any auth tokens that might point to this file_priv,
38678 (do that under the drm_global_mutex) */
38679@@ -516,8 +516,8 @@ int drm_release(struct inode *inode, struct file *filp)
38680 * End inline drm_release
38681 */
38682
38683- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
38684- if (!--dev->open_count) {
38685+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
38686+ if (local_dec_and_test(&dev->open_count)) {
38687 if (atomic_read(&dev->ioctl_count)) {
38688 DRM_ERROR("Device busy: %d\n",
38689 atomic_read(&dev->ioctl_count));
38690diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
38691index f731116..629842c 100644
38692--- a/drivers/gpu/drm/drm_global.c
38693+++ b/drivers/gpu/drm/drm_global.c
38694@@ -36,7 +36,7 @@
38695 struct drm_global_item {
38696 struct mutex mutex;
38697 void *object;
38698- int refcount;
38699+ atomic_t refcount;
38700 };
38701
38702 static struct drm_global_item glob[DRM_GLOBAL_NUM];
38703@@ -49,7 +49,7 @@ void drm_global_init(void)
38704 struct drm_global_item *item = &glob[i];
38705 mutex_init(&item->mutex);
38706 item->object = NULL;
38707- item->refcount = 0;
38708+ atomic_set(&item->refcount, 0);
38709 }
38710 }
38711
38712@@ -59,7 +59,7 @@ void drm_global_release(void)
38713 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
38714 struct drm_global_item *item = &glob[i];
38715 BUG_ON(item->object != NULL);
38716- BUG_ON(item->refcount != 0);
38717+ BUG_ON(atomic_read(&item->refcount) != 0);
38718 }
38719 }
38720
38721@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38722 void *object;
38723
38724 mutex_lock(&item->mutex);
38725- if (item->refcount == 0) {
38726+ if (atomic_read(&item->refcount) == 0) {
38727 item->object = kzalloc(ref->size, GFP_KERNEL);
38728 if (unlikely(item->object == NULL)) {
38729 ret = -ENOMEM;
38730@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38731 goto out_err;
38732
38733 }
38734- ++item->refcount;
38735+ atomic_inc(&item->refcount);
38736 ref->object = item->object;
38737 object = item->object;
38738 mutex_unlock(&item->mutex);
38739@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
38740 struct drm_global_item *item = &glob[ref->global_type];
38741
38742 mutex_lock(&item->mutex);
38743- BUG_ON(item->refcount == 0);
38744+ BUG_ON(atomic_read(&item->refcount) == 0);
38745 BUG_ON(ref->object != item->object);
38746- if (--item->refcount == 0) {
38747+ if (atomic_dec_and_test(&item->refcount)) {
38748 ref->release(ref);
38749 item->object = NULL;
38750 }
38751diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
38752index 5329832..b503f49 100644
38753--- a/drivers/gpu/drm/drm_info.c
38754+++ b/drivers/gpu/drm/drm_info.c
38755@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
38756 struct drm_local_map *map;
38757 struct drm_map_list *r_list;
38758
38759- /* Hardcoded from _DRM_FRAME_BUFFER,
38760- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
38761- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
38762- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
38763+ static const char * const types[] = {
38764+ [_DRM_FRAME_BUFFER] = "FB",
38765+ [_DRM_REGISTERS] = "REG",
38766+ [_DRM_SHM] = "SHM",
38767+ [_DRM_AGP] = "AGP",
38768+ [_DRM_SCATTER_GATHER] = "SG",
38769+ [_DRM_CONSISTENT] = "PCI",
38770+ [_DRM_GEM] = "GEM" };
38771 const char *type;
38772 int i;
38773
38774@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
38775 map = r_list->map;
38776 if (!map)
38777 continue;
38778- if (map->type < 0 || map->type > 5)
38779+ if (map->type >= ARRAY_SIZE(types))
38780 type = "??";
38781 else
38782 type = types[map->type];
38783@@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, void *data)
38784 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
38785 vma->vm_flags & VM_LOCKED ? 'l' : '-',
38786 vma->vm_flags & VM_IO ? 'i' : '-',
38787+#ifdef CONFIG_GRKERNSEC_HIDESYM
38788+ 0);
38789+#else
38790 vma->vm_pgoff);
38791+#endif
38792
38793 #if defined(__i386__)
38794 pgprot = pgprot_val(vma->vm_page_prot);
38795diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
38796index 2f4c434..dd12cd2 100644
38797--- a/drivers/gpu/drm/drm_ioc32.c
38798+++ b/drivers/gpu/drm/drm_ioc32.c
38799@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
38800 request = compat_alloc_user_space(nbytes);
38801 if (!access_ok(VERIFY_WRITE, request, nbytes))
38802 return -EFAULT;
38803- list = (struct drm_buf_desc *) (request + 1);
38804+ list = (struct drm_buf_desc __user *) (request + 1);
38805
38806 if (__put_user(count, &request->count)
38807 || __put_user(list, &request->list))
38808@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
38809 request = compat_alloc_user_space(nbytes);
38810 if (!access_ok(VERIFY_WRITE, request, nbytes))
38811 return -EFAULT;
38812- list = (struct drm_buf_pub *) (request + 1);
38813+ list = (struct drm_buf_pub __user *) (request + 1);
38814
38815 if (__put_user(count, &request->count)
38816 || __put_user(list, &request->list))
38817@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
38818 return 0;
38819 }
38820
38821-drm_ioctl_compat_t *drm_compat_ioctls[] = {
38822+drm_ioctl_compat_t drm_compat_ioctls[] = {
38823 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
38824 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
38825 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
38826@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
38827 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38828 {
38829 unsigned int nr = DRM_IOCTL_NR(cmd);
38830- drm_ioctl_compat_t *fn;
38831 int ret;
38832
38833 /* Assume that ioctls without an explicit compat routine will just
38834@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38835 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
38836 return drm_ioctl(filp, cmd, arg);
38837
38838- fn = drm_compat_ioctls[nr];
38839-
38840- if (fn != NULL)
38841- ret = (*fn) (filp, cmd, arg);
38842+ if (drm_compat_ioctls[nr] != NULL)
38843+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
38844 else
38845 ret = drm_ioctl(filp, cmd, arg);
38846
38847diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
38848index d752c96..fe08455 100644
38849--- a/drivers/gpu/drm/drm_lock.c
38850+++ b/drivers/gpu/drm/drm_lock.c
38851@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38852 if (drm_lock_take(&master->lock, lock->context)) {
38853 master->lock.file_priv = file_priv;
38854 master->lock.lock_time = jiffies;
38855- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
38856+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
38857 break; /* Got lock */
38858 }
38859
38860@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38861 return -EINVAL;
38862 }
38863
38864- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
38865+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
38866
38867 if (drm_lock_free(&master->lock, lock->context)) {
38868 /* FIXME: Should really bail out here. */
38869diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
38870index 39d8645..59e06fa 100644
38871--- a/drivers/gpu/drm/drm_stub.c
38872+++ b/drivers/gpu/drm/drm_stub.c
38873@@ -484,7 +484,7 @@ void drm_unplug_dev(struct drm_device *dev)
38874
38875 drm_device_set_unplugged(dev);
38876
38877- if (dev->open_count == 0) {
38878+ if (local_read(&dev->open_count) == 0) {
38879 drm_put_dev(dev);
38880 }
38881 mutex_unlock(&drm_global_mutex);
38882diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
38883index 2290b3b..22056a1 100644
38884--- a/drivers/gpu/drm/drm_sysfs.c
38885+++ b/drivers/gpu/drm/drm_sysfs.c
38886@@ -524,7 +524,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
38887 int drm_sysfs_device_add(struct drm_minor *minor)
38888 {
38889 int err;
38890- char *minor_str;
38891+ const char *minor_str;
38892
38893 minor->kdev.parent = minor->dev->dev;
38894
38895diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
38896index ab1892eb..d7009ca 100644
38897--- a/drivers/gpu/drm/i810/i810_dma.c
38898+++ b/drivers/gpu/drm/i810/i810_dma.c
38899@@ -944,8 +944,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
38900 dma->buflist[vertex->idx],
38901 vertex->discard, vertex->used);
38902
38903- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38904- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38905+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38906+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38907 sarea_priv->last_enqueue = dev_priv->counter - 1;
38908 sarea_priv->last_dispatch = (int)hw_status[5];
38909
38910@@ -1105,8 +1105,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
38911 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
38912 mc->last_render);
38913
38914- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38915- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38916+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38917+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38918 sarea_priv->last_enqueue = dev_priv->counter - 1;
38919 sarea_priv->last_dispatch = (int)hw_status[5];
38920
38921diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
38922index d4d16ed..8fb0b51 100644
38923--- a/drivers/gpu/drm/i810/i810_drv.h
38924+++ b/drivers/gpu/drm/i810/i810_drv.h
38925@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
38926 int page_flipping;
38927
38928 wait_queue_head_t irq_queue;
38929- atomic_t irq_received;
38930- atomic_t irq_emitted;
38931+ atomic_unchecked_t irq_received;
38932+ atomic_unchecked_t irq_emitted;
38933
38934 int front_offset;
38935 } drm_i810_private_t;
38936diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
38937index a6f4cb5..6b2beb2 100644
38938--- a/drivers/gpu/drm/i915/i915_debugfs.c
38939+++ b/drivers/gpu/drm/i915/i915_debugfs.c
38940@@ -624,7 +624,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
38941 I915_READ(GTIMR));
38942 }
38943 seq_printf(m, "Interrupts received: %d\n",
38944- atomic_read(&dev_priv->irq_received));
38945+ atomic_read_unchecked(&dev_priv->irq_received));
38946 for_each_ring(ring, dev_priv, i) {
38947 if (IS_GEN6(dev) || IS_GEN7(dev)) {
38948 seq_printf(m,
38949diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
38950index d5c784d..06e5c36 100644
38951--- a/drivers/gpu/drm/i915/i915_dma.c
38952+++ b/drivers/gpu/drm/i915/i915_dma.c
38953@@ -1263,7 +1263,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
38954 bool can_switch;
38955
38956 spin_lock(&dev->count_lock);
38957- can_switch = (dev->open_count == 0);
38958+ can_switch = (local_read(&dev->open_count) == 0);
38959 spin_unlock(&dev->count_lock);
38960 return can_switch;
38961 }
38962diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
38963index ab0f2c0..53c1bda 100644
38964--- a/drivers/gpu/drm/i915/i915_drv.h
38965+++ b/drivers/gpu/drm/i915/i915_drv.h
38966@@ -1181,7 +1181,7 @@ typedef struct drm_i915_private {
38967 drm_dma_handle_t *status_page_dmah;
38968 struct resource mch_res;
38969
38970- atomic_t irq_received;
38971+ atomic_unchecked_t irq_received;
38972
38973 /* protects the irq masks */
38974 spinlock_t irq_lock;
38975diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38976index bf34577..3fd2ffa 100644
38977--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38978+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38979@@ -768,9 +768,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
38980
38981 static int
38982 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
38983- int count)
38984+ unsigned int count)
38985 {
38986- int i;
38987+ unsigned int i;
38988 int relocs_total = 0;
38989 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
38990
38991diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
38992index 3c59584..500f2e9 100644
38993--- a/drivers/gpu/drm/i915/i915_ioc32.c
38994+++ b/drivers/gpu/drm/i915/i915_ioc32.c
38995@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
38996 (unsigned long)request);
38997 }
38998
38999-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39000+static drm_ioctl_compat_t i915_compat_ioctls[] = {
39001 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
39002 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
39003 [DRM_I915_GETPARAM] = compat_i915_getparam,
39004@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39005 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39006 {
39007 unsigned int nr = DRM_IOCTL_NR(cmd);
39008- drm_ioctl_compat_t *fn = NULL;
39009 int ret;
39010
39011 if (nr < DRM_COMMAND_BASE)
39012 return drm_compat_ioctl(filp, cmd, arg);
39013
39014- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
39015- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39016-
39017- if (fn != NULL)
39018+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
39019+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39020 ret = (*fn) (filp, cmd, arg);
39021- else
39022+ } else
39023 ret = drm_ioctl(filp, cmd, arg);
39024
39025 return ret;
39026diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
39027index 4b91228..590c643 100644
39028--- a/drivers/gpu/drm/i915/i915_irq.c
39029+++ b/drivers/gpu/drm/i915/i915_irq.c
39030@@ -1085,7 +1085,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
39031 int pipe;
39032 u32 pipe_stats[I915_MAX_PIPES];
39033
39034- atomic_inc(&dev_priv->irq_received);
39035+ atomic_inc_unchecked(&dev_priv->irq_received);
39036
39037 while (true) {
39038 iir = I915_READ(VLV_IIR);
39039@@ -1390,7 +1390,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
39040 irqreturn_t ret = IRQ_NONE;
39041 bool err_int_reenable = false;
39042
39043- atomic_inc(&dev_priv->irq_received);
39044+ atomic_inc_unchecked(&dev_priv->irq_received);
39045
39046 /* We get interrupts on unclaimed registers, so check for this before we
39047 * do any I915_{READ,WRITE}. */
39048@@ -2146,7 +2146,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
39049 {
39050 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39051
39052- atomic_set(&dev_priv->irq_received, 0);
39053+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39054
39055 I915_WRITE(HWSTAM, 0xeffe);
39056
39057@@ -2164,7 +2164,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
39058 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39059 int pipe;
39060
39061- atomic_set(&dev_priv->irq_received, 0);
39062+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39063
39064 /* VLV magic */
39065 I915_WRITE(VLV_IMR, 0);
39066@@ -2452,7 +2452,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
39067 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39068 int pipe;
39069
39070- atomic_set(&dev_priv->irq_received, 0);
39071+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39072
39073 for_each_pipe(pipe)
39074 I915_WRITE(PIPESTAT(pipe), 0);
39075@@ -2530,7 +2530,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
39076 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39077 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39078
39079- atomic_inc(&dev_priv->irq_received);
39080+ atomic_inc_unchecked(&dev_priv->irq_received);
39081
39082 iir = I915_READ16(IIR);
39083 if (iir == 0)
39084@@ -2604,7 +2604,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
39085 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39086 int pipe;
39087
39088- atomic_set(&dev_priv->irq_received, 0);
39089+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39090
39091 if (I915_HAS_HOTPLUG(dev)) {
39092 I915_WRITE(PORT_HOTPLUG_EN, 0);
39093@@ -2703,7 +2703,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
39094 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39095 int pipe, ret = IRQ_NONE;
39096
39097- atomic_inc(&dev_priv->irq_received);
39098+ atomic_inc_unchecked(&dev_priv->irq_received);
39099
39100 iir = I915_READ(IIR);
39101 do {
39102@@ -2827,7 +2827,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
39103 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39104 int pipe;
39105
39106- atomic_set(&dev_priv->irq_received, 0);
39107+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39108
39109 I915_WRITE(PORT_HOTPLUG_EN, 0);
39110 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
39111@@ -2941,7 +2941,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
39112 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39113 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39114
39115- atomic_inc(&dev_priv->irq_received);
39116+ atomic_inc_unchecked(&dev_priv->irq_received);
39117
39118 iir = I915_READ(IIR);
39119
39120diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
39121index f535670..bde09e2 100644
39122--- a/drivers/gpu/drm/i915/intel_display.c
39123+++ b/drivers/gpu/drm/i915/intel_display.c
39124@@ -10019,13 +10019,13 @@ struct intel_quirk {
39125 int subsystem_vendor;
39126 int subsystem_device;
39127 void (*hook)(struct drm_device *dev);
39128-};
39129+} __do_const;
39130
39131 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
39132 struct intel_dmi_quirk {
39133 void (*hook)(struct drm_device *dev);
39134 const struct dmi_system_id (*dmi_id_list)[];
39135-};
39136+} __do_const;
39137
39138 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39139 {
39140@@ -10033,18 +10033,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39141 return 1;
39142 }
39143
39144-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39145+static const struct dmi_system_id intel_dmi_quirks_table[] = {
39146 {
39147- .dmi_id_list = &(const struct dmi_system_id[]) {
39148- {
39149- .callback = intel_dmi_reverse_brightness,
39150- .ident = "NCR Corporation",
39151- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39152- DMI_MATCH(DMI_PRODUCT_NAME, ""),
39153- },
39154- },
39155- { } /* terminating entry */
39156+ .callback = intel_dmi_reverse_brightness,
39157+ .ident = "NCR Corporation",
39158+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39159+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
39160 },
39161+ },
39162+ { } /* terminating entry */
39163+};
39164+
39165+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39166+ {
39167+ .dmi_id_list = &intel_dmi_quirks_table,
39168 .hook = quirk_invert_brightness,
39169 },
39170 };
39171diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
39172index ca4bc54..ee598a2 100644
39173--- a/drivers/gpu/drm/mga/mga_drv.h
39174+++ b/drivers/gpu/drm/mga/mga_drv.h
39175@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
39176 u32 clear_cmd;
39177 u32 maccess;
39178
39179- atomic_t vbl_received; /**< Number of vblanks received. */
39180+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
39181 wait_queue_head_t fence_queue;
39182- atomic_t last_fence_retired;
39183+ atomic_unchecked_t last_fence_retired;
39184 u32 next_fence_to_post;
39185
39186 unsigned int fb_cpp;
39187diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
39188index 709e90d..89a1c0d 100644
39189--- a/drivers/gpu/drm/mga/mga_ioc32.c
39190+++ b/drivers/gpu/drm/mga/mga_ioc32.c
39191@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
39192 return 0;
39193 }
39194
39195-drm_ioctl_compat_t *mga_compat_ioctls[] = {
39196+drm_ioctl_compat_t mga_compat_ioctls[] = {
39197 [DRM_MGA_INIT] = compat_mga_init,
39198 [DRM_MGA_GETPARAM] = compat_mga_getparam,
39199 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
39200@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
39201 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39202 {
39203 unsigned int nr = DRM_IOCTL_NR(cmd);
39204- drm_ioctl_compat_t *fn = NULL;
39205 int ret;
39206
39207 if (nr < DRM_COMMAND_BASE)
39208 return drm_compat_ioctl(filp, cmd, arg);
39209
39210- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
39211- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39212-
39213- if (fn != NULL)
39214+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
39215+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39216 ret = (*fn) (filp, cmd, arg);
39217- else
39218+ } else
39219 ret = drm_ioctl(filp, cmd, arg);
39220
39221 return ret;
39222diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
39223index 598c281..60d590e 100644
39224--- a/drivers/gpu/drm/mga/mga_irq.c
39225+++ b/drivers/gpu/drm/mga/mga_irq.c
39226@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
39227 if (crtc != 0)
39228 return 0;
39229
39230- return atomic_read(&dev_priv->vbl_received);
39231+ return atomic_read_unchecked(&dev_priv->vbl_received);
39232 }
39233
39234
39235@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39236 /* VBLANK interrupt */
39237 if (status & MGA_VLINEPEN) {
39238 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
39239- atomic_inc(&dev_priv->vbl_received);
39240+ atomic_inc_unchecked(&dev_priv->vbl_received);
39241 drm_handle_vblank(dev, 0);
39242 handled = 1;
39243 }
39244@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39245 if ((prim_start & ~0x03) != (prim_end & ~0x03))
39246 MGA_WRITE(MGA_PRIMEND, prim_end);
39247
39248- atomic_inc(&dev_priv->last_fence_retired);
39249+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
39250 DRM_WAKEUP(&dev_priv->fence_queue);
39251 handled = 1;
39252 }
39253@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
39254 * using fences.
39255 */
39256 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
39257- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
39258+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
39259 - *sequence) <= (1 << 23)));
39260
39261 *sequence = cur_fence;
39262diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
39263index 3e72876..d1c15ad 100644
39264--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
39265+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
39266@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
39267 struct bit_table {
39268 const char id;
39269 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
39270-};
39271+} __no_const;
39272
39273 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
39274
39275diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
39276index 994fd6e..6e12565 100644
39277--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
39278+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
39279@@ -94,7 +94,6 @@ struct nouveau_drm {
39280 struct drm_global_reference mem_global_ref;
39281 struct ttm_bo_global_ref bo_global_ref;
39282 struct ttm_bo_device bdev;
39283- atomic_t validate_sequence;
39284 int (*move)(struct nouveau_channel *,
39285 struct ttm_buffer_object *,
39286 struct ttm_mem_reg *, struct ttm_mem_reg *);
39287diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39288index c1a7e5a..38b8539 100644
39289--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39290+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39291@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
39292 unsigned long arg)
39293 {
39294 unsigned int nr = DRM_IOCTL_NR(cmd);
39295- drm_ioctl_compat_t *fn = NULL;
39296+ drm_ioctl_compat_t fn = NULL;
39297 int ret;
39298
39299 if (nr < DRM_COMMAND_BASE)
39300diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
39301index 81638d7..2e45854 100644
39302--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
39303+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
39304@@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
39305 bool can_switch;
39306
39307 spin_lock(&dev->count_lock);
39308- can_switch = (dev->open_count == 0);
39309+ can_switch = (local_read(&dev->open_count) == 0);
39310 spin_unlock(&dev->count_lock);
39311 return can_switch;
39312 }
39313diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
39314index eb89653..613cf71 100644
39315--- a/drivers/gpu/drm/qxl/qxl_cmd.c
39316+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
39317@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
39318 int ret;
39319
39320 mutex_lock(&qdev->async_io_mutex);
39321- irq_num = atomic_read(&qdev->irq_received_io_cmd);
39322+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39323 if (qdev->last_sent_io_cmd > irq_num) {
39324 if (intr)
39325 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39326- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39327+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39328 else
39329 ret = wait_event_timeout(qdev->io_cmd_event,
39330- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39331+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39332 /* 0 is timeout, just bail the "hw" has gone away */
39333 if (ret <= 0)
39334 goto out;
39335- irq_num = atomic_read(&qdev->irq_received_io_cmd);
39336+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39337 }
39338 outb(val, addr);
39339 qdev->last_sent_io_cmd = irq_num + 1;
39340 if (intr)
39341 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39342- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39343+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39344 else
39345 ret = wait_event_timeout(qdev->io_cmd_event,
39346- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39347+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39348 out:
39349 if (ret > 0)
39350 ret = 0;
39351diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
39352index c3c2bbd..bc3c0fb 100644
39353--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
39354+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
39355@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
39356 struct drm_info_node *node = (struct drm_info_node *) m->private;
39357 struct qxl_device *qdev = node->minor->dev->dev_private;
39358
39359- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
39360- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
39361- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
39362- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
39363+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
39364+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
39365+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
39366+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
39367 seq_printf(m, "%d\n", qdev->irq_received_error);
39368 return 0;
39369 }
39370diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
39371index f7c9add..fb971d2 100644
39372--- a/drivers/gpu/drm/qxl/qxl_drv.h
39373+++ b/drivers/gpu/drm/qxl/qxl_drv.h
39374@@ -290,10 +290,10 @@ struct qxl_device {
39375 unsigned int last_sent_io_cmd;
39376
39377 /* interrupt handling */
39378- atomic_t irq_received;
39379- atomic_t irq_received_display;
39380- atomic_t irq_received_cursor;
39381- atomic_t irq_received_io_cmd;
39382+ atomic_unchecked_t irq_received;
39383+ atomic_unchecked_t irq_received_display;
39384+ atomic_unchecked_t irq_received_cursor;
39385+ atomic_unchecked_t irq_received_io_cmd;
39386 unsigned irq_received_error;
39387 wait_queue_head_t display_event;
39388 wait_queue_head_t cursor_event;
39389diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
39390index 21393dc..329f3a9 100644
39391--- a/drivers/gpu/drm/qxl/qxl_irq.c
39392+++ b/drivers/gpu/drm/qxl/qxl_irq.c
39393@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
39394
39395 pending = xchg(&qdev->ram_header->int_pending, 0);
39396
39397- atomic_inc(&qdev->irq_received);
39398+ atomic_inc_unchecked(&qdev->irq_received);
39399
39400 if (pending & QXL_INTERRUPT_DISPLAY) {
39401- atomic_inc(&qdev->irq_received_display);
39402+ atomic_inc_unchecked(&qdev->irq_received_display);
39403 wake_up_all(&qdev->display_event);
39404 qxl_queue_garbage_collect(qdev, false);
39405 }
39406 if (pending & QXL_INTERRUPT_CURSOR) {
39407- atomic_inc(&qdev->irq_received_cursor);
39408+ atomic_inc_unchecked(&qdev->irq_received_cursor);
39409 wake_up_all(&qdev->cursor_event);
39410 }
39411 if (pending & QXL_INTERRUPT_IO_CMD) {
39412- atomic_inc(&qdev->irq_received_io_cmd);
39413+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
39414 wake_up_all(&qdev->io_cmd_event);
39415 }
39416 if (pending & QXL_INTERRUPT_ERROR) {
39417@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
39418 init_waitqueue_head(&qdev->io_cmd_event);
39419 INIT_WORK(&qdev->client_monitors_config_work,
39420 qxl_client_monitors_config_work_func);
39421- atomic_set(&qdev->irq_received, 0);
39422- atomic_set(&qdev->irq_received_display, 0);
39423- atomic_set(&qdev->irq_received_cursor, 0);
39424- atomic_set(&qdev->irq_received_io_cmd, 0);
39425+ atomic_set_unchecked(&qdev->irq_received, 0);
39426+ atomic_set_unchecked(&qdev->irq_received_display, 0);
39427+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
39428+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
39429 qdev->irq_received_error = 0;
39430 ret = drm_irq_install(qdev->ddev);
39431 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
39432diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
39433index 037786d..2a95e33 100644
39434--- a/drivers/gpu/drm/qxl/qxl_ttm.c
39435+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
39436@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
39437 }
39438 }
39439
39440-static struct vm_operations_struct qxl_ttm_vm_ops;
39441+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
39442 static const struct vm_operations_struct *ttm_vm_ops;
39443
39444 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39445@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
39446 return r;
39447 if (unlikely(ttm_vm_ops == NULL)) {
39448 ttm_vm_ops = vma->vm_ops;
39449+ pax_open_kernel();
39450 qxl_ttm_vm_ops = *ttm_vm_ops;
39451 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
39452+ pax_close_kernel();
39453 }
39454 vma->vm_ops = &qxl_ttm_vm_ops;
39455 return 0;
39456@@ -558,25 +560,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
39457 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
39458 {
39459 #if defined(CONFIG_DEBUG_FS)
39460- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
39461- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
39462- unsigned i;
39463+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
39464+ {
39465+ .name = "qxl_mem_mm",
39466+ .show = &qxl_mm_dump_table,
39467+ },
39468+ {
39469+ .name = "qxl_surf_mm",
39470+ .show = &qxl_mm_dump_table,
39471+ }
39472+ };
39473
39474- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
39475- if (i == 0)
39476- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
39477- else
39478- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
39479- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
39480- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
39481- qxl_mem_types_list[i].driver_features = 0;
39482- if (i == 0)
39483- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
39484- else
39485- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
39486+ pax_open_kernel();
39487+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
39488+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
39489+ pax_close_kernel();
39490
39491- }
39492- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
39493+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
39494 #else
39495 return 0;
39496 #endif
39497diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
39498index c451257..0ad2134 100644
39499--- a/drivers/gpu/drm/r128/r128_cce.c
39500+++ b/drivers/gpu/drm/r128/r128_cce.c
39501@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
39502
39503 /* GH: Simple idle check.
39504 */
39505- atomic_set(&dev_priv->idle_count, 0);
39506+ atomic_set_unchecked(&dev_priv->idle_count, 0);
39507
39508 /* We don't support anything other than bus-mastering ring mode,
39509 * but the ring can be in either AGP or PCI space for the ring
39510diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
39511index 56eb5e3..c4ec43d 100644
39512--- a/drivers/gpu/drm/r128/r128_drv.h
39513+++ b/drivers/gpu/drm/r128/r128_drv.h
39514@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
39515 int is_pci;
39516 unsigned long cce_buffers_offset;
39517
39518- atomic_t idle_count;
39519+ atomic_unchecked_t idle_count;
39520
39521 int page_flipping;
39522 int current_page;
39523 u32 crtc_offset;
39524 u32 crtc_offset_cntl;
39525
39526- atomic_t vbl_received;
39527+ atomic_unchecked_t vbl_received;
39528
39529 u32 color_fmt;
39530 unsigned int front_offset;
39531diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
39532index a954c54..9cc595c 100644
39533--- a/drivers/gpu/drm/r128/r128_ioc32.c
39534+++ b/drivers/gpu/drm/r128/r128_ioc32.c
39535@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
39536 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
39537 }
39538
39539-drm_ioctl_compat_t *r128_compat_ioctls[] = {
39540+drm_ioctl_compat_t r128_compat_ioctls[] = {
39541 [DRM_R128_INIT] = compat_r128_init,
39542 [DRM_R128_DEPTH] = compat_r128_depth,
39543 [DRM_R128_STIPPLE] = compat_r128_stipple,
39544@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
39545 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39546 {
39547 unsigned int nr = DRM_IOCTL_NR(cmd);
39548- drm_ioctl_compat_t *fn = NULL;
39549 int ret;
39550
39551 if (nr < DRM_COMMAND_BASE)
39552 return drm_compat_ioctl(filp, cmd, arg);
39553
39554- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
39555- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
39556-
39557- if (fn != NULL)
39558+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
39559+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
39560 ret = (*fn) (filp, cmd, arg);
39561- else
39562+ } else
39563 ret = drm_ioctl(filp, cmd, arg);
39564
39565 return ret;
39566diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
39567index 2ea4f09..d391371 100644
39568--- a/drivers/gpu/drm/r128/r128_irq.c
39569+++ b/drivers/gpu/drm/r128/r128_irq.c
39570@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
39571 if (crtc != 0)
39572 return 0;
39573
39574- return atomic_read(&dev_priv->vbl_received);
39575+ return atomic_read_unchecked(&dev_priv->vbl_received);
39576 }
39577
39578 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39579@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39580 /* VBLANK interrupt */
39581 if (status & R128_CRTC_VBLANK_INT) {
39582 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
39583- atomic_inc(&dev_priv->vbl_received);
39584+ atomic_inc_unchecked(&dev_priv->vbl_received);
39585 drm_handle_vblank(dev, 0);
39586 return IRQ_HANDLED;
39587 }
39588diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
39589index 01dd9ae..6352f04 100644
39590--- a/drivers/gpu/drm/r128/r128_state.c
39591+++ b/drivers/gpu/drm/r128/r128_state.c
39592@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
39593
39594 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
39595 {
39596- if (atomic_read(&dev_priv->idle_count) == 0)
39597+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
39598 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
39599 else
39600- atomic_set(&dev_priv->idle_count, 0);
39601+ atomic_set_unchecked(&dev_priv->idle_count, 0);
39602 }
39603
39604 #endif
39605diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
39606index af85299..ed9ac8d 100644
39607--- a/drivers/gpu/drm/radeon/mkregtable.c
39608+++ b/drivers/gpu/drm/radeon/mkregtable.c
39609@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
39610 regex_t mask_rex;
39611 regmatch_t match[4];
39612 char buf[1024];
39613- size_t end;
39614+ long end;
39615 int len;
39616 int done = 0;
39617 int r;
39618 unsigned o;
39619 struct offset *offset;
39620 char last_reg_s[10];
39621- int last_reg;
39622+ unsigned long last_reg;
39623
39624 if (regcomp
39625 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
39626diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
39627index 841d0e0..9eaa268 100644
39628--- a/drivers/gpu/drm/radeon/radeon_device.c
39629+++ b/drivers/gpu/drm/radeon/radeon_device.c
39630@@ -1117,7 +1117,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
39631 bool can_switch;
39632
39633 spin_lock(&dev->count_lock);
39634- can_switch = (dev->open_count == 0);
39635+ can_switch = (local_read(&dev->open_count) == 0);
39636 spin_unlock(&dev->count_lock);
39637 return can_switch;
39638 }
39639diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
39640index b369d42..8dd04eb 100644
39641--- a/drivers/gpu/drm/radeon/radeon_drv.h
39642+++ b/drivers/gpu/drm/radeon/radeon_drv.h
39643@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
39644
39645 /* SW interrupt */
39646 wait_queue_head_t swi_queue;
39647- atomic_t swi_emitted;
39648+ atomic_unchecked_t swi_emitted;
39649 int vblank_crtc;
39650 uint32_t irq_enable_reg;
39651 uint32_t r500_disp_irq_reg;
39652diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
39653index c180df8..5fd8186 100644
39654--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
39655+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
39656@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
39657 request = compat_alloc_user_space(sizeof(*request));
39658 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
39659 || __put_user(req32.param, &request->param)
39660- || __put_user((void __user *)(unsigned long)req32.value,
39661+ || __put_user((unsigned long)req32.value,
39662 &request->value))
39663 return -EFAULT;
39664
39665@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
39666 #define compat_radeon_cp_setparam NULL
39667 #endif /* X86_64 || IA64 */
39668
39669-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
39670+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
39671 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
39672 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
39673 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
39674@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
39675 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39676 {
39677 unsigned int nr = DRM_IOCTL_NR(cmd);
39678- drm_ioctl_compat_t *fn = NULL;
39679 int ret;
39680
39681 if (nr < DRM_COMMAND_BASE)
39682 return drm_compat_ioctl(filp, cmd, arg);
39683
39684- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
39685- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39686-
39687- if (fn != NULL)
39688+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
39689+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39690 ret = (*fn) (filp, cmd, arg);
39691- else
39692+ } else
39693 ret = drm_ioctl(filp, cmd, arg);
39694
39695 return ret;
39696diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
39697index 8d68e97..9dcfed8 100644
39698--- a/drivers/gpu/drm/radeon/radeon_irq.c
39699+++ b/drivers/gpu/drm/radeon/radeon_irq.c
39700@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
39701 unsigned int ret;
39702 RING_LOCALS;
39703
39704- atomic_inc(&dev_priv->swi_emitted);
39705- ret = atomic_read(&dev_priv->swi_emitted);
39706+ atomic_inc_unchecked(&dev_priv->swi_emitted);
39707+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
39708
39709 BEGIN_RING(4);
39710 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
39711@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
39712 drm_radeon_private_t *dev_priv =
39713 (drm_radeon_private_t *) dev->dev_private;
39714
39715- atomic_set(&dev_priv->swi_emitted, 0);
39716+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
39717 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
39718
39719 dev->max_vblank_count = 0x001fffff;
39720diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
39721index 4d20910..6726b6d 100644
39722--- a/drivers/gpu/drm/radeon/radeon_state.c
39723+++ b/drivers/gpu/drm/radeon/radeon_state.c
39724@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
39725 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
39726 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
39727
39728- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39729+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39730 sarea_priv->nbox * sizeof(depth_boxes[0])))
39731 return -EFAULT;
39732
39733@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
39734 {
39735 drm_radeon_private_t *dev_priv = dev->dev_private;
39736 drm_radeon_getparam_t *param = data;
39737- int value;
39738+ int value = 0;
39739
39740 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
39741
39742diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
39743index 71245d6..94c556d 100644
39744--- a/drivers/gpu/drm/radeon/radeon_ttm.c
39745+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
39746@@ -784,7 +784,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
39747 man->size = size >> PAGE_SHIFT;
39748 }
39749
39750-static struct vm_operations_struct radeon_ttm_vm_ops;
39751+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
39752 static const struct vm_operations_struct *ttm_vm_ops = NULL;
39753
39754 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39755@@ -825,8 +825,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
39756 }
39757 if (unlikely(ttm_vm_ops == NULL)) {
39758 ttm_vm_ops = vma->vm_ops;
39759+ pax_open_kernel();
39760 radeon_ttm_vm_ops = *ttm_vm_ops;
39761 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
39762+ pax_close_kernel();
39763 }
39764 vma->vm_ops = &radeon_ttm_vm_ops;
39765 return 0;
39766@@ -855,38 +857,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
39767 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
39768 {
39769 #if defined(CONFIG_DEBUG_FS)
39770- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
39771- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
39772- unsigned i;
39773+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
39774+ {
39775+ .name = "radeon_vram_mm",
39776+ .show = &radeon_mm_dump_table,
39777+ },
39778+ {
39779+ .name = "radeon_gtt_mm",
39780+ .show = &radeon_mm_dump_table,
39781+ },
39782+ {
39783+ .name = "ttm_page_pool",
39784+ .show = &ttm_page_alloc_debugfs,
39785+ },
39786+ {
39787+ .name = "ttm_dma_page_pool",
39788+ .show = &ttm_dma_page_alloc_debugfs,
39789+ },
39790+ };
39791+ unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
39792
39793- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
39794- if (i == 0)
39795- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
39796- else
39797- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
39798- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39799- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
39800- radeon_mem_types_list[i].driver_features = 0;
39801- if (i == 0)
39802- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39803- else
39804- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39805-
39806- }
39807- /* Add ttm page pool to debugfs */
39808- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
39809- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39810- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
39811- radeon_mem_types_list[i].driver_features = 0;
39812- radeon_mem_types_list[i++].data = NULL;
39813+ pax_open_kernel();
39814+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39815+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39816+ pax_close_kernel();
39817 #ifdef CONFIG_SWIOTLB
39818- if (swiotlb_nr_tbl()) {
39819- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
39820- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39821- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
39822- radeon_mem_types_list[i].driver_features = 0;
39823- radeon_mem_types_list[i++].data = NULL;
39824- }
39825+ if (swiotlb_nr_tbl())
39826+ i++;
39827 #endif
39828 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
39829
39830diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
39831index 1447d79..40b2a5b 100644
39832--- a/drivers/gpu/drm/radeon/rs690.c
39833+++ b/drivers/gpu/drm/radeon/rs690.c
39834@@ -345,9 +345,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
39835 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
39836 rdev->pm.sideport_bandwidth.full)
39837 max_bandwidth = rdev->pm.sideport_bandwidth;
39838- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
39839+ read_delay_latency.full = dfixed_const(800 * 1000);
39840 read_delay_latency.full = dfixed_div(read_delay_latency,
39841 rdev->pm.igp_sideport_mclk);
39842+ a.full = dfixed_const(370);
39843+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
39844 } else {
39845 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
39846 rdev->pm.k8_bandwidth.full)
39847diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
39848index dbc2def..0a9f710 100644
39849--- a/drivers/gpu/drm/ttm/ttm_memory.c
39850+++ b/drivers/gpu/drm/ttm/ttm_memory.c
39851@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
39852 zone->glob = glob;
39853 glob->zone_kernel = zone;
39854 ret = kobject_init_and_add(
39855- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39856+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39857 if (unlikely(ret != 0)) {
39858 kobject_put(&zone->kobj);
39859 return ret;
39860@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
39861 zone->glob = glob;
39862 glob->zone_dma32 = zone;
39863 ret = kobject_init_and_add(
39864- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39865+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39866 if (unlikely(ret != 0)) {
39867 kobject_put(&zone->kobj);
39868 return ret;
39869diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39870index 863bef9..cba15cf 100644
39871--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
39872+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39873@@ -391,9 +391,9 @@ out:
39874 static unsigned long
39875 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
39876 {
39877- static atomic_t start_pool = ATOMIC_INIT(0);
39878+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
39879 unsigned i;
39880- unsigned pool_offset = atomic_add_return(1, &start_pool);
39881+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
39882 struct ttm_page_pool *pool;
39883 int shrink_pages = sc->nr_to_scan;
39884 unsigned long freed = 0;
39885diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
39886index 97e9d61..bf23c461 100644
39887--- a/drivers/gpu/drm/udl/udl_fb.c
39888+++ b/drivers/gpu/drm/udl/udl_fb.c
39889@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
39890 fb_deferred_io_cleanup(info);
39891 kfree(info->fbdefio);
39892 info->fbdefio = NULL;
39893- info->fbops->fb_mmap = udl_fb_mmap;
39894 }
39895
39896 pr_warn("released /dev/fb%d user=%d count=%d\n",
39897diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
39898index a811ef2..ff99b05 100644
39899--- a/drivers/gpu/drm/via/via_drv.h
39900+++ b/drivers/gpu/drm/via/via_drv.h
39901@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
39902 typedef uint32_t maskarray_t[5];
39903
39904 typedef struct drm_via_irq {
39905- atomic_t irq_received;
39906+ atomic_unchecked_t irq_received;
39907 uint32_t pending_mask;
39908 uint32_t enable_mask;
39909 wait_queue_head_t irq_queue;
39910@@ -75,7 +75,7 @@ typedef struct drm_via_private {
39911 struct timeval last_vblank;
39912 int last_vblank_valid;
39913 unsigned usec_per_vblank;
39914- atomic_t vbl_received;
39915+ atomic_unchecked_t vbl_received;
39916 drm_via_state_t hc_state;
39917 char pci_buf[VIA_PCI_BUF_SIZE];
39918 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
39919diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
39920index ac98964..5dbf512 100644
39921--- a/drivers/gpu/drm/via/via_irq.c
39922+++ b/drivers/gpu/drm/via/via_irq.c
39923@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
39924 if (crtc != 0)
39925 return 0;
39926
39927- return atomic_read(&dev_priv->vbl_received);
39928+ return atomic_read_unchecked(&dev_priv->vbl_received);
39929 }
39930
39931 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39932@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39933
39934 status = VIA_READ(VIA_REG_INTERRUPT);
39935 if (status & VIA_IRQ_VBLANK_PENDING) {
39936- atomic_inc(&dev_priv->vbl_received);
39937- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
39938+ atomic_inc_unchecked(&dev_priv->vbl_received);
39939+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
39940 do_gettimeofday(&cur_vblank);
39941 if (dev_priv->last_vblank_valid) {
39942 dev_priv->usec_per_vblank =
39943@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39944 dev_priv->last_vblank = cur_vblank;
39945 dev_priv->last_vblank_valid = 1;
39946 }
39947- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
39948+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
39949 DRM_DEBUG("US per vblank is: %u\n",
39950 dev_priv->usec_per_vblank);
39951 }
39952@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39953
39954 for (i = 0; i < dev_priv->num_irqs; ++i) {
39955 if (status & cur_irq->pending_mask) {
39956- atomic_inc(&cur_irq->irq_received);
39957+ atomic_inc_unchecked(&cur_irq->irq_received);
39958 DRM_WAKEUP(&cur_irq->irq_queue);
39959 handled = 1;
39960 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
39961@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
39962 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39963 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
39964 masks[irq][4]));
39965- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
39966+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
39967 } else {
39968 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39969 (((cur_irq_sequence =
39970- atomic_read(&cur_irq->irq_received)) -
39971+ atomic_read_unchecked(&cur_irq->irq_received)) -
39972 *sequence) <= (1 << 23)));
39973 }
39974 *sequence = cur_irq_sequence;
39975@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
39976 }
39977
39978 for (i = 0; i < dev_priv->num_irqs; ++i) {
39979- atomic_set(&cur_irq->irq_received, 0);
39980+ atomic_set_unchecked(&cur_irq->irq_received, 0);
39981 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
39982 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
39983 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
39984@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
39985 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
39986 case VIA_IRQ_RELATIVE:
39987 irqwait->request.sequence +=
39988- atomic_read(&cur_irq->irq_received);
39989+ atomic_read_unchecked(&cur_irq->irq_received);
39990 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
39991 case VIA_IRQ_ABSOLUTE:
39992 break;
39993diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39994index 150ec64..f5165f2 100644
39995--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39996+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39997@@ -290,7 +290,7 @@ struct vmw_private {
39998 * Fencing and IRQs.
39999 */
40000
40001- atomic_t marker_seq;
40002+ atomic_unchecked_t marker_seq;
40003 wait_queue_head_t fence_queue;
40004 wait_queue_head_t fifo_queue;
40005 int fence_queue_waiters; /* Protected by hw_mutex */
40006diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40007index 3eb1486..0a47ee9 100644
40008--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40009+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40010@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
40011 (unsigned int) min,
40012 (unsigned int) fifo->capabilities);
40013
40014- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40015+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40016 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
40017 vmw_marker_queue_init(&fifo->marker_queue);
40018 return vmw_fifo_send_fence(dev_priv, &dummy);
40019@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
40020 if (reserveable)
40021 iowrite32(bytes, fifo_mem +
40022 SVGA_FIFO_RESERVED);
40023- return fifo_mem + (next_cmd >> 2);
40024+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
40025 } else {
40026 need_bounce = true;
40027 }
40028@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40029
40030 fm = vmw_fifo_reserve(dev_priv, bytes);
40031 if (unlikely(fm == NULL)) {
40032- *seqno = atomic_read(&dev_priv->marker_seq);
40033+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40034 ret = -ENOMEM;
40035 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
40036 false, 3*HZ);
40037@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40038 }
40039
40040 do {
40041- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
40042+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
40043 } while (*seqno == 0);
40044
40045 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
40046diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40047index c509d40..3b640c3 100644
40048--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40049+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40050@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
40051 int ret;
40052
40053 num_clips = arg->num_clips;
40054- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40055+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40056
40057 if (unlikely(num_clips == 0))
40058 return 0;
40059@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
40060 int ret;
40061
40062 num_clips = arg->num_clips;
40063- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40064+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40065
40066 if (unlikely(num_clips == 0))
40067 return 0;
40068diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40069index 4640adb..e1384ed 100644
40070--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40071+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40072@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
40073 * emitted. Then the fence is stale and signaled.
40074 */
40075
40076- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
40077+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
40078 > VMW_FENCE_WRAP);
40079
40080 return ret;
40081@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
40082
40083 if (fifo_idle)
40084 down_read(&fifo_state->rwsem);
40085- signal_seq = atomic_read(&dev_priv->marker_seq);
40086+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
40087 ret = 0;
40088
40089 for (;;) {
40090diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40091index 8a8725c2..afed796 100644
40092--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40093+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40094@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
40095 while (!vmw_lag_lt(queue, us)) {
40096 spin_lock(&queue->lock);
40097 if (list_empty(&queue->head))
40098- seqno = atomic_read(&dev_priv->marker_seq);
40099+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40100 else {
40101 marker = list_first_entry(&queue->head,
40102 struct vmw_marker, head);
40103diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
40104index b1a05ad..1c9d899 100644
40105--- a/drivers/gpu/host1x/drm/dc.c
40106+++ b/drivers/gpu/host1x/drm/dc.c
40107@@ -1004,7 +1004,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
40108 }
40109
40110 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
40111- dc->debugfs_files[i].data = dc;
40112+ *(void **)&dc->debugfs_files[i].data = dc;
40113
40114 err = drm_debugfs_create_files(dc->debugfs_files,
40115 ARRAY_SIZE(debugfs_files),
40116diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
40117index ec0ae2d..dc0780b 100644
40118--- a/drivers/gpu/vga/vga_switcheroo.c
40119+++ b/drivers/gpu/vga/vga_switcheroo.c
40120@@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
40121
40122 /* this version is for the case where the power switch is separate
40123 to the device being powered down. */
40124-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
40125+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
40126 {
40127 /* copy over all the bus versions */
40128 if (dev->bus && dev->bus->pm) {
40129@@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
40130 return ret;
40131 }
40132
40133-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
40134+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
40135 {
40136 /* copy over all the bus versions */
40137 if (dev->bus && dev->bus->pm) {
40138diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
40139index aedfe50..1dc929b 100644
40140--- a/drivers/hid/hid-core.c
40141+++ b/drivers/hid/hid-core.c
40142@@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
40143
40144 int hid_add_device(struct hid_device *hdev)
40145 {
40146- static atomic_t id = ATOMIC_INIT(0);
40147+ static atomic_unchecked_t id = ATOMIC_INIT(0);
40148 int ret;
40149
40150 if (WARN_ON(hdev->status & HID_STAT_ADDED))
40151@@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hdev)
40152 /* XXX hack, any other cleaner solution after the driver core
40153 * is converted to allow more than 20 bytes as the device name? */
40154 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
40155- hdev->vendor, hdev->product, atomic_inc_return(&id));
40156+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
40157
40158 hid_debug_register(hdev, dev_name(&hdev->dev));
40159 ret = device_add(&hdev->dev);
40160diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
40161index c13fb5b..55a3802 100644
40162--- a/drivers/hid/hid-wiimote-debug.c
40163+++ b/drivers/hid/hid-wiimote-debug.c
40164@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
40165 else if (size == 0)
40166 return -EIO;
40167
40168- if (copy_to_user(u, buf, size))
40169+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
40170 return -EFAULT;
40171
40172 *off += size;
40173diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
40174index cedc6da..2c3da2a 100644
40175--- a/drivers/hid/uhid.c
40176+++ b/drivers/hid/uhid.c
40177@@ -47,7 +47,7 @@ struct uhid_device {
40178 struct mutex report_lock;
40179 wait_queue_head_t report_wait;
40180 atomic_t report_done;
40181- atomic_t report_id;
40182+ atomic_unchecked_t report_id;
40183 struct uhid_event report_buf;
40184 };
40185
40186@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
40187
40188 spin_lock_irqsave(&uhid->qlock, flags);
40189 ev->type = UHID_FEATURE;
40190- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
40191+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
40192 ev->u.feature.rnum = rnum;
40193 ev->u.feature.rtype = report_type;
40194
40195@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
40196 spin_lock_irqsave(&uhid->qlock, flags);
40197
40198 /* id for old report; drop it silently */
40199- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
40200+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
40201 goto unlock;
40202 if (atomic_read(&uhid->report_done))
40203 goto unlock;
40204diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
40205index 6de6c98..18319e9 100644
40206--- a/drivers/hv/channel.c
40207+++ b/drivers/hv/channel.c
40208@@ -406,8 +406,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
40209 int ret = 0;
40210 int t;
40211
40212- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
40213- atomic_inc(&vmbus_connection.next_gpadl_handle);
40214+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
40215+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
40216
40217 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
40218 if (ret)
40219diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
40220index 88f4096..e50452e 100644
40221--- a/drivers/hv/hv.c
40222+++ b/drivers/hv/hv.c
40223@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
40224 u64 output_address = (output) ? virt_to_phys(output) : 0;
40225 u32 output_address_hi = output_address >> 32;
40226 u32 output_address_lo = output_address & 0xFFFFFFFF;
40227- void *hypercall_page = hv_context.hypercall_page;
40228+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
40229
40230 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
40231 "=a"(hv_status_lo) : "d" (control_hi),
40232diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
40233index 7e17a54..a50a33d 100644
40234--- a/drivers/hv/hv_balloon.c
40235+++ b/drivers/hv/hv_balloon.c
40236@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
40237
40238 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
40239 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
40240-static atomic_t trans_id = ATOMIC_INIT(0);
40241+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
40242
40243 static int dm_ring_size = (5 * PAGE_SIZE);
40244
40245@@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
40246 pr_info("Memory hot add failed\n");
40247
40248 dm->state = DM_INITIALIZED;
40249- resp.hdr.trans_id = atomic_inc_return(&trans_id);
40250+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40251 vmbus_sendpacket(dm->dev->channel, &resp,
40252 sizeof(struct dm_hot_add_response),
40253 (unsigned long)NULL,
40254@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
40255 memset(&status, 0, sizeof(struct dm_status));
40256 status.hdr.type = DM_STATUS_REPORT;
40257 status.hdr.size = sizeof(struct dm_status);
40258- status.hdr.trans_id = atomic_inc_return(&trans_id);
40259+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40260
40261 /*
40262 * The host expects the guest to report free memory.
40263@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
40264 * send the status. This can happen if we were interrupted
40265 * after we picked our transaction ID.
40266 */
40267- if (status.hdr.trans_id != atomic_read(&trans_id))
40268+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
40269 return;
40270
40271 vmbus_sendpacket(dm->dev->channel, &status,
40272@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
40273 */
40274
40275 do {
40276- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
40277+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40278 ret = vmbus_sendpacket(dm_device.dev->channel,
40279 bl_resp,
40280 bl_resp->hdr.size,
40281@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
40282
40283 memset(&resp, 0, sizeof(struct dm_unballoon_response));
40284 resp.hdr.type = DM_UNBALLOON_RESPONSE;
40285- resp.hdr.trans_id = atomic_inc_return(&trans_id);
40286+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40287 resp.hdr.size = sizeof(struct dm_unballoon_response);
40288
40289 vmbus_sendpacket(dm_device.dev->channel, &resp,
40290@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
40291 memset(&version_req, 0, sizeof(struct dm_version_request));
40292 version_req.hdr.type = DM_VERSION_REQUEST;
40293 version_req.hdr.size = sizeof(struct dm_version_request);
40294- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40295+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40296 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
40297 version_req.is_last_attempt = 1;
40298
40299@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
40300 memset(&version_req, 0, sizeof(struct dm_version_request));
40301 version_req.hdr.type = DM_VERSION_REQUEST;
40302 version_req.hdr.size = sizeof(struct dm_version_request);
40303- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40304+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40305 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
40306 version_req.is_last_attempt = 0;
40307
40308@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
40309 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
40310 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
40311 cap_msg.hdr.size = sizeof(struct dm_capabilities);
40312- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
40313+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40314
40315 cap_msg.caps.cap_bits.balloon = 1;
40316 cap_msg.caps.cap_bits.hot_add = 1;
40317diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
40318index d84918f..7f38f9f 100644
40319--- a/drivers/hv/hyperv_vmbus.h
40320+++ b/drivers/hv/hyperv_vmbus.h
40321@@ -595,7 +595,7 @@ enum vmbus_connect_state {
40322 struct vmbus_connection {
40323 enum vmbus_connect_state conn_state;
40324
40325- atomic_t next_gpadl_handle;
40326+ atomic_unchecked_t next_gpadl_handle;
40327
40328 /*
40329 * Represents channel interrupts. Each bit position represents a
40330diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40331index f9fe46f..356b119 100644
40332--- a/drivers/hv/vmbus_drv.c
40333+++ b/drivers/hv/vmbus_drv.c
40334@@ -672,10 +672,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40335 {
40336 int ret = 0;
40337
40338- static atomic_t device_num = ATOMIC_INIT(0);
40339+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40340
40341 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40342- atomic_inc_return(&device_num));
40343+ atomic_inc_return_unchecked(&device_num));
40344
40345 child_device_obj->device.bus = &hv_bus;
40346 child_device_obj->device.parent = &hv_acpi_dev->dev;
40347diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
40348index a9e3d01..9dd246e 100644
40349--- a/drivers/hwmon/acpi_power_meter.c
40350+++ b/drivers/hwmon/acpi_power_meter.c
40351@@ -117,7 +117,7 @@ struct sensor_template {
40352 struct device_attribute *devattr,
40353 const char *buf, size_t count);
40354 int index;
40355-};
40356+} __do_const;
40357
40358 /* Averaging interval */
40359 static int update_avg_interval(struct acpi_power_meter_resource *resource)
40360@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
40361 struct sensor_template *attrs)
40362 {
40363 struct device *dev = &resource->acpi_dev->dev;
40364- struct sensor_device_attribute *sensors =
40365+ sensor_device_attribute_no_const *sensors =
40366 &resource->sensors[resource->num_sensors];
40367 int res = 0;
40368
40369diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
40370index 3288f13..71cfb4e 100644
40371--- a/drivers/hwmon/applesmc.c
40372+++ b/drivers/hwmon/applesmc.c
40373@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
40374 {
40375 struct applesmc_node_group *grp;
40376 struct applesmc_dev_attr *node;
40377- struct attribute *attr;
40378+ attribute_no_const *attr;
40379 int ret, i;
40380
40381 for (grp = groups; grp->format; grp++) {
40382diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
40383index b25c643..a13460d 100644
40384--- a/drivers/hwmon/asus_atk0110.c
40385+++ b/drivers/hwmon/asus_atk0110.c
40386@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
40387 struct atk_sensor_data {
40388 struct list_head list;
40389 struct atk_data *data;
40390- struct device_attribute label_attr;
40391- struct device_attribute input_attr;
40392- struct device_attribute limit1_attr;
40393- struct device_attribute limit2_attr;
40394+ device_attribute_no_const label_attr;
40395+ device_attribute_no_const input_attr;
40396+ device_attribute_no_const limit1_attr;
40397+ device_attribute_no_const limit2_attr;
40398 char label_attr_name[ATTR_NAME_SIZE];
40399 char input_attr_name[ATTR_NAME_SIZE];
40400 char limit1_attr_name[ATTR_NAME_SIZE];
40401@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
40402 static struct device_attribute atk_name_attr =
40403 __ATTR(name, 0444, atk_name_show, NULL);
40404
40405-static void atk_init_attribute(struct device_attribute *attr, char *name,
40406+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
40407 sysfs_show_func show)
40408 {
40409 sysfs_attr_init(&attr->attr);
40410diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
40411index 78be661..4dd032f 100644
40412--- a/drivers/hwmon/coretemp.c
40413+++ b/drivers/hwmon/coretemp.c
40414@@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
40415 return NOTIFY_OK;
40416 }
40417
40418-static struct notifier_block coretemp_cpu_notifier __refdata = {
40419+static struct notifier_block coretemp_cpu_notifier = {
40420 .notifier_call = coretemp_cpu_callback,
40421 };
40422
40423diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
40424index 632f1dc..57e6a58 100644
40425--- a/drivers/hwmon/ibmaem.c
40426+++ b/drivers/hwmon/ibmaem.c
40427@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
40428 struct aem_rw_sensor_template *rw)
40429 {
40430 struct device *dev = &data->pdev->dev;
40431- struct sensor_device_attribute *sensors = data->sensors;
40432+ sensor_device_attribute_no_const *sensors = data->sensors;
40433 int err;
40434
40435 /* Set up read-only sensors */
40436diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
40437index 708081b..fe2d4ab 100644
40438--- a/drivers/hwmon/iio_hwmon.c
40439+++ b/drivers/hwmon/iio_hwmon.c
40440@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
40441 {
40442 struct device *dev = &pdev->dev;
40443 struct iio_hwmon_state *st;
40444- struct sensor_device_attribute *a;
40445+ sensor_device_attribute_no_const *a;
40446 int ret, i;
40447 int in_i = 1, temp_i = 1, curr_i = 1;
40448 enum iio_chan_type type;
40449diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
40450index 6eb03ce..bea7e3e 100644
40451--- a/drivers/hwmon/nct6775.c
40452+++ b/drivers/hwmon/nct6775.c
40453@@ -936,10 +936,10 @@ static struct attribute_group *
40454 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
40455 int repeat)
40456 {
40457- struct attribute_group *group;
40458+ attribute_group_no_const *group;
40459 struct sensor_device_attr_u *su;
40460- struct sensor_device_attribute *a;
40461- struct sensor_device_attribute_2 *a2;
40462+ sensor_device_attribute_no_const *a;
40463+ sensor_device_attribute_2_no_const *a2;
40464 struct attribute **attrs;
40465 struct sensor_device_template **t;
40466 int err, i, j, count;
40467diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
40468index 9319fcf..189ff45 100644
40469--- a/drivers/hwmon/pmbus/pmbus_core.c
40470+++ b/drivers/hwmon/pmbus/pmbus_core.c
40471@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
40472 return 0;
40473 }
40474
40475-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40476+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
40477 const char *name,
40478 umode_t mode,
40479 ssize_t (*show)(struct device *dev,
40480@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40481 dev_attr->store = store;
40482 }
40483
40484-static void pmbus_attr_init(struct sensor_device_attribute *a,
40485+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
40486 const char *name,
40487 umode_t mode,
40488 ssize_t (*show)(struct device *dev,
40489@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
40490 u16 reg, u8 mask)
40491 {
40492 struct pmbus_boolean *boolean;
40493- struct sensor_device_attribute *a;
40494+ sensor_device_attribute_no_const *a;
40495
40496 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
40497 if (!boolean)
40498@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
40499 bool update, bool readonly)
40500 {
40501 struct pmbus_sensor *sensor;
40502- struct device_attribute *a;
40503+ device_attribute_no_const *a;
40504
40505 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
40506 if (!sensor)
40507@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
40508 const char *lstring, int index)
40509 {
40510 struct pmbus_label *label;
40511- struct device_attribute *a;
40512+ device_attribute_no_const *a;
40513
40514 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
40515 if (!label)
40516diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
40517index 97cd45a..ac54d8b 100644
40518--- a/drivers/hwmon/sht15.c
40519+++ b/drivers/hwmon/sht15.c
40520@@ -169,7 +169,7 @@ struct sht15_data {
40521 int supply_uv;
40522 bool supply_uv_valid;
40523 struct work_struct update_supply_work;
40524- atomic_t interrupt_handled;
40525+ atomic_unchecked_t interrupt_handled;
40526 };
40527
40528 /**
40529@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
40530 ret = gpio_direction_input(data->pdata->gpio_data);
40531 if (ret)
40532 return ret;
40533- atomic_set(&data->interrupt_handled, 0);
40534+ atomic_set_unchecked(&data->interrupt_handled, 0);
40535
40536 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40537 if (gpio_get_value(data->pdata->gpio_data) == 0) {
40538 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
40539 /* Only relevant if the interrupt hasn't occurred. */
40540- if (!atomic_read(&data->interrupt_handled))
40541+ if (!atomic_read_unchecked(&data->interrupt_handled))
40542 schedule_work(&data->read_work);
40543 }
40544 ret = wait_event_timeout(data->wait_queue,
40545@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
40546
40547 /* First disable the interrupt */
40548 disable_irq_nosync(irq);
40549- atomic_inc(&data->interrupt_handled);
40550+ atomic_inc_unchecked(&data->interrupt_handled);
40551 /* Then schedule a reading work struct */
40552 if (data->state != SHT15_READING_NOTHING)
40553 schedule_work(&data->read_work);
40554@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
40555 * If not, then start the interrupt again - care here as could
40556 * have gone low in meantime so verify it hasn't!
40557 */
40558- atomic_set(&data->interrupt_handled, 0);
40559+ atomic_set_unchecked(&data->interrupt_handled, 0);
40560 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40561 /* If still not occurred or another handler was scheduled */
40562 if (gpio_get_value(data->pdata->gpio_data)
40563- || atomic_read(&data->interrupt_handled))
40564+ || atomic_read_unchecked(&data->interrupt_handled))
40565 return;
40566 }
40567
40568diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
40569index 38944e9..ae9e5ed 100644
40570--- a/drivers/hwmon/via-cputemp.c
40571+++ b/drivers/hwmon/via-cputemp.c
40572@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
40573 return NOTIFY_OK;
40574 }
40575
40576-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
40577+static struct notifier_block via_cputemp_cpu_notifier = {
40578 .notifier_call = via_cputemp_cpu_callback,
40579 };
40580
40581diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
40582index 07f01ac..d79ad3d 100644
40583--- a/drivers/i2c/busses/i2c-amd756-s4882.c
40584+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
40585@@ -43,7 +43,7 @@
40586 extern struct i2c_adapter amd756_smbus;
40587
40588 static struct i2c_adapter *s4882_adapter;
40589-static struct i2c_algorithm *s4882_algo;
40590+static i2c_algorithm_no_const *s4882_algo;
40591
40592 /* Wrapper access functions for multiplexed SMBus */
40593 static DEFINE_MUTEX(amd756_lock);
40594diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
40595index dae3ddf..26e21d1 100644
40596--- a/drivers/i2c/busses/i2c-diolan-u2c.c
40597+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
40598@@ -99,7 +99,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
40599 /* usb layer */
40600
40601 /* Send command to device, and get response. */
40602-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
40603+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
40604 {
40605 int ret = 0;
40606 int actual;
40607diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
40608index 2ca268d..c6acbdf 100644
40609--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
40610+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
40611@@ -41,7 +41,7 @@
40612 extern struct i2c_adapter *nforce2_smbus;
40613
40614 static struct i2c_adapter *s4985_adapter;
40615-static struct i2c_algorithm *s4985_algo;
40616+static i2c_algorithm_no_const *s4985_algo;
40617
40618 /* Wrapper access functions for multiplexed SMBus */
40619 static DEFINE_MUTEX(nforce2_lock);
40620diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
40621index c3ccdea..5b3dc1a 100644
40622--- a/drivers/i2c/i2c-dev.c
40623+++ b/drivers/i2c/i2c-dev.c
40624@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
40625 break;
40626 }
40627
40628- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
40629+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
40630 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
40631 if (IS_ERR(rdwr_pa[i].buf)) {
40632 res = PTR_ERR(rdwr_pa[i].buf);
40633diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
40634index 0b510ba..4fbb5085 100644
40635--- a/drivers/ide/ide-cd.c
40636+++ b/drivers/ide/ide-cd.c
40637@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
40638 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
40639 if ((unsigned long)buf & alignment
40640 || blk_rq_bytes(rq) & q->dma_pad_mask
40641- || object_is_on_stack(buf))
40642+ || object_starts_on_stack(buf))
40643 drive->dma = 0;
40644 }
40645 }
40646diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
40647index f95c697..0a1b05c 100644
40648--- a/drivers/iio/industrialio-core.c
40649+++ b/drivers/iio/industrialio-core.c
40650@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
40651 }
40652
40653 static
40654-int __iio_device_attr_init(struct device_attribute *dev_attr,
40655+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
40656 const char *postfix,
40657 struct iio_chan_spec const *chan,
40658 ssize_t (*readfunc)(struct device *dev,
40659diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
40660index 784b97c..c9ceadf 100644
40661--- a/drivers/infiniband/core/cm.c
40662+++ b/drivers/infiniband/core/cm.c
40663@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
40664
40665 struct cm_counter_group {
40666 struct kobject obj;
40667- atomic_long_t counter[CM_ATTR_COUNT];
40668+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
40669 };
40670
40671 struct cm_counter_attribute {
40672@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
40673 struct ib_mad_send_buf *msg = NULL;
40674 int ret;
40675
40676- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40677+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40678 counter[CM_REQ_COUNTER]);
40679
40680 /* Quick state check to discard duplicate REQs. */
40681@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
40682 if (!cm_id_priv)
40683 return;
40684
40685- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40686+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40687 counter[CM_REP_COUNTER]);
40688 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
40689 if (ret)
40690@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
40691 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
40692 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
40693 spin_unlock_irq(&cm_id_priv->lock);
40694- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40695+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40696 counter[CM_RTU_COUNTER]);
40697 goto out;
40698 }
40699@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
40700 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
40701 dreq_msg->local_comm_id);
40702 if (!cm_id_priv) {
40703- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40704+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40705 counter[CM_DREQ_COUNTER]);
40706 cm_issue_drep(work->port, work->mad_recv_wc);
40707 return -EINVAL;
40708@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
40709 case IB_CM_MRA_REP_RCVD:
40710 break;
40711 case IB_CM_TIMEWAIT:
40712- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40713+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40714 counter[CM_DREQ_COUNTER]);
40715 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40716 goto unlock;
40717@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
40718 cm_free_msg(msg);
40719 goto deref;
40720 case IB_CM_DREQ_RCVD:
40721- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40722+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40723 counter[CM_DREQ_COUNTER]);
40724 goto unlock;
40725 default:
40726@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
40727 ib_modify_mad(cm_id_priv->av.port->mad_agent,
40728 cm_id_priv->msg, timeout)) {
40729 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
40730- atomic_long_inc(&work->port->
40731+ atomic_long_inc_unchecked(&work->port->
40732 counter_group[CM_RECV_DUPLICATES].
40733 counter[CM_MRA_COUNTER]);
40734 goto out;
40735@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
40736 break;
40737 case IB_CM_MRA_REQ_RCVD:
40738 case IB_CM_MRA_REP_RCVD:
40739- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40740+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40741 counter[CM_MRA_COUNTER]);
40742 /* fall through */
40743 default:
40744@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
40745 case IB_CM_LAP_IDLE:
40746 break;
40747 case IB_CM_MRA_LAP_SENT:
40748- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40749+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40750 counter[CM_LAP_COUNTER]);
40751 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40752 goto unlock;
40753@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
40754 cm_free_msg(msg);
40755 goto deref;
40756 case IB_CM_LAP_RCVD:
40757- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40758+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40759 counter[CM_LAP_COUNTER]);
40760 goto unlock;
40761 default:
40762@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
40763 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
40764 if (cur_cm_id_priv) {
40765 spin_unlock_irq(&cm.lock);
40766- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40767+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40768 counter[CM_SIDR_REQ_COUNTER]);
40769 goto out; /* Duplicate message. */
40770 }
40771@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
40772 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
40773 msg->retries = 1;
40774
40775- atomic_long_add(1 + msg->retries,
40776+ atomic_long_add_unchecked(1 + msg->retries,
40777 &port->counter_group[CM_XMIT].counter[attr_index]);
40778 if (msg->retries)
40779- atomic_long_add(msg->retries,
40780+ atomic_long_add_unchecked(msg->retries,
40781 &port->counter_group[CM_XMIT_RETRIES].
40782 counter[attr_index]);
40783
40784@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
40785 }
40786
40787 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
40788- atomic_long_inc(&port->counter_group[CM_RECV].
40789+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
40790 counter[attr_id - CM_ATTR_ID_OFFSET]);
40791
40792 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
40793@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
40794 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
40795
40796 return sprintf(buf, "%ld\n",
40797- atomic_long_read(&group->counter[cm_attr->index]));
40798+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
40799 }
40800
40801 static const struct sysfs_ops cm_counter_ops = {
40802diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
40803index 9f5ad7c..588cd84 100644
40804--- a/drivers/infiniband/core/fmr_pool.c
40805+++ b/drivers/infiniband/core/fmr_pool.c
40806@@ -98,8 +98,8 @@ struct ib_fmr_pool {
40807
40808 struct task_struct *thread;
40809
40810- atomic_t req_ser;
40811- atomic_t flush_ser;
40812+ atomic_unchecked_t req_ser;
40813+ atomic_unchecked_t flush_ser;
40814
40815 wait_queue_head_t force_wait;
40816 };
40817@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40818 struct ib_fmr_pool *pool = pool_ptr;
40819
40820 do {
40821- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
40822+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
40823 ib_fmr_batch_release(pool);
40824
40825- atomic_inc(&pool->flush_ser);
40826+ atomic_inc_unchecked(&pool->flush_ser);
40827 wake_up_interruptible(&pool->force_wait);
40828
40829 if (pool->flush_function)
40830@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40831 }
40832
40833 set_current_state(TASK_INTERRUPTIBLE);
40834- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
40835+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
40836 !kthread_should_stop())
40837 schedule();
40838 __set_current_state(TASK_RUNNING);
40839@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
40840 pool->dirty_watermark = params->dirty_watermark;
40841 pool->dirty_len = 0;
40842 spin_lock_init(&pool->pool_lock);
40843- atomic_set(&pool->req_ser, 0);
40844- atomic_set(&pool->flush_ser, 0);
40845+ atomic_set_unchecked(&pool->req_ser, 0);
40846+ atomic_set_unchecked(&pool->flush_ser, 0);
40847 init_waitqueue_head(&pool->force_wait);
40848
40849 pool->thread = kthread_run(ib_fmr_cleanup_thread,
40850@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
40851 }
40852 spin_unlock_irq(&pool->pool_lock);
40853
40854- serial = atomic_inc_return(&pool->req_ser);
40855+ serial = atomic_inc_return_unchecked(&pool->req_ser);
40856 wake_up_process(pool->thread);
40857
40858 if (wait_event_interruptible(pool->force_wait,
40859- atomic_read(&pool->flush_ser) - serial >= 0))
40860+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
40861 return -EINTR;
40862
40863 return 0;
40864@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
40865 } else {
40866 list_add_tail(&fmr->list, &pool->dirty_list);
40867 if (++pool->dirty_len >= pool->dirty_watermark) {
40868- atomic_inc(&pool->req_ser);
40869+ atomic_inc_unchecked(&pool->req_ser);
40870 wake_up_process(pool->thread);
40871 }
40872 }
40873diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
40874index 4cb8eb2..146bf60 100644
40875--- a/drivers/infiniband/hw/cxgb4/mem.c
40876+++ b/drivers/infiniband/hw/cxgb4/mem.c
40877@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40878 int err;
40879 struct fw_ri_tpte tpt;
40880 u32 stag_idx;
40881- static atomic_t key;
40882+ static atomic_unchecked_t key;
40883
40884 if (c4iw_fatal_error(rdev))
40885 return -EIO;
40886@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40887 if (rdev->stats.stag.cur > rdev->stats.stag.max)
40888 rdev->stats.stag.max = rdev->stats.stag.cur;
40889 mutex_unlock(&rdev->stats.lock);
40890- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
40891+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
40892 }
40893 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
40894 __func__, stag_state, type, pdid, stag_idx);
40895diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
40896index 79b3dbc..96e5fcc 100644
40897--- a/drivers/infiniband/hw/ipath/ipath_rc.c
40898+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
40899@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40900 struct ib_atomic_eth *ateth;
40901 struct ipath_ack_entry *e;
40902 u64 vaddr;
40903- atomic64_t *maddr;
40904+ atomic64_unchecked_t *maddr;
40905 u64 sdata;
40906 u32 rkey;
40907 u8 next;
40908@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40909 IB_ACCESS_REMOTE_ATOMIC)))
40910 goto nack_acc_unlck;
40911 /* Perform atomic OP and save result. */
40912- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40913+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40914 sdata = be64_to_cpu(ateth->swap_data);
40915 e = &qp->s_ack_queue[qp->r_head_ack_queue];
40916 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
40917- (u64) atomic64_add_return(sdata, maddr) - sdata :
40918+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40919 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40920 be64_to_cpu(ateth->compare_data),
40921 sdata);
40922diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
40923index 1f95bba..9530f87 100644
40924--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
40925+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
40926@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
40927 unsigned long flags;
40928 struct ib_wc wc;
40929 u64 sdata;
40930- atomic64_t *maddr;
40931+ atomic64_unchecked_t *maddr;
40932 enum ib_wc_status send_status;
40933
40934 /*
40935@@ -382,11 +382,11 @@ again:
40936 IB_ACCESS_REMOTE_ATOMIC)))
40937 goto acc_err;
40938 /* Perform atomic OP and save result. */
40939- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40940+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40941 sdata = wqe->wr.wr.atomic.compare_add;
40942 *(u64 *) sqp->s_sge.sge.vaddr =
40943 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
40944- (u64) atomic64_add_return(sdata, maddr) - sdata :
40945+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40946 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40947 sdata, wqe->wr.wr.atomic.swap);
40948 goto send_comp;
40949diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
40950index f2a3f48..673ec79 100644
40951--- a/drivers/infiniband/hw/mlx4/mad.c
40952+++ b/drivers/infiniband/hw/mlx4/mad.c
40953@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
40954
40955 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
40956 {
40957- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
40958+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
40959 cpu_to_be64(0xff00000000000000LL);
40960 }
40961
40962diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
40963index 25b2cdf..099ff97 100644
40964--- a/drivers/infiniband/hw/mlx4/mcg.c
40965+++ b/drivers/infiniband/hw/mlx4/mcg.c
40966@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
40967 {
40968 char name[20];
40969
40970- atomic_set(&ctx->tid, 0);
40971+ atomic_set_unchecked(&ctx->tid, 0);
40972 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
40973 ctx->mcg_wq = create_singlethread_workqueue(name);
40974 if (!ctx->mcg_wq)
40975diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
40976index 036b663..c9a8c73 100644
40977--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
40978+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
40979@@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
40980 struct list_head mcg_mgid0_list;
40981 struct workqueue_struct *mcg_wq;
40982 struct mlx4_ib_demux_pv_ctx **tun;
40983- atomic_t tid;
40984+ atomic_unchecked_t tid;
40985 int flushing; /* flushing the work queue */
40986 };
40987
40988diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
40989index 9d3e5c1..6f166df 100644
40990--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
40991+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
40992@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
40993 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
40994 }
40995
40996-int mthca_QUERY_FW(struct mthca_dev *dev)
40997+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
40998 {
40999 struct mthca_mailbox *mailbox;
41000 u32 *outbox;
41001@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41002 CMD_TIME_CLASS_B);
41003 }
41004
41005-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41006+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41007 int num_mtt)
41008 {
41009 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
41010@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
41011 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
41012 }
41013
41014-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41015+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41016 int eq_num)
41017 {
41018 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
41019@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
41020 CMD_TIME_CLASS_B);
41021 }
41022
41023-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41024+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41025 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
41026 void *in_mad, void *response_mad)
41027 {
41028diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
41029index 87897b9..7e79542 100644
41030--- a/drivers/infiniband/hw/mthca/mthca_main.c
41031+++ b/drivers/infiniband/hw/mthca/mthca_main.c
41032@@ -692,7 +692,7 @@ err_close:
41033 return err;
41034 }
41035
41036-static int mthca_setup_hca(struct mthca_dev *dev)
41037+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
41038 {
41039 int err;
41040
41041diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
41042index ed9a989..6aa5dc2 100644
41043--- a/drivers/infiniband/hw/mthca/mthca_mr.c
41044+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
41045@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
41046 * through the bitmaps)
41047 */
41048
41049-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41050+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41051 {
41052 int o;
41053 int m;
41054@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
41055 return key;
41056 }
41057
41058-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41059+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41060 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
41061 {
41062 struct mthca_mailbox *mailbox;
41063@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
41064 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
41065 }
41066
41067-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41068+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41069 u64 *buffer_list, int buffer_size_shift,
41070 int list_len, u64 iova, u64 total_size,
41071 u32 access, struct mthca_mr *mr)
41072diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
41073index 5b71d43..35a9e14 100644
41074--- a/drivers/infiniband/hw/mthca/mthca_provider.c
41075+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
41076@@ -763,7 +763,7 @@ unlock:
41077 return 0;
41078 }
41079
41080-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41081+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41082 {
41083 struct mthca_dev *dev = to_mdev(ibcq->device);
41084 struct mthca_cq *cq = to_mcq(ibcq);
41085diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
41086index 4291410..d2ab1fb 100644
41087--- a/drivers/infiniband/hw/nes/nes.c
41088+++ b/drivers/infiniband/hw/nes/nes.c
41089@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
41090 LIST_HEAD(nes_adapter_list);
41091 static LIST_HEAD(nes_dev_list);
41092
41093-atomic_t qps_destroyed;
41094+atomic_unchecked_t qps_destroyed;
41095
41096 static unsigned int ee_flsh_adapter;
41097 static unsigned int sysfs_nonidx_addr;
41098@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
41099 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
41100 struct nes_adapter *nesadapter = nesdev->nesadapter;
41101
41102- atomic_inc(&qps_destroyed);
41103+ atomic_inc_unchecked(&qps_destroyed);
41104
41105 /* Free the control structures */
41106
41107diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
41108index 33cc589..3bd6538 100644
41109--- a/drivers/infiniband/hw/nes/nes.h
41110+++ b/drivers/infiniband/hw/nes/nes.h
41111@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
41112 extern unsigned int wqm_quanta;
41113 extern struct list_head nes_adapter_list;
41114
41115-extern atomic_t cm_connects;
41116-extern atomic_t cm_accepts;
41117-extern atomic_t cm_disconnects;
41118-extern atomic_t cm_closes;
41119-extern atomic_t cm_connecteds;
41120-extern atomic_t cm_connect_reqs;
41121-extern atomic_t cm_rejects;
41122-extern atomic_t mod_qp_timouts;
41123-extern atomic_t qps_created;
41124-extern atomic_t qps_destroyed;
41125-extern atomic_t sw_qps_destroyed;
41126+extern atomic_unchecked_t cm_connects;
41127+extern atomic_unchecked_t cm_accepts;
41128+extern atomic_unchecked_t cm_disconnects;
41129+extern atomic_unchecked_t cm_closes;
41130+extern atomic_unchecked_t cm_connecteds;
41131+extern atomic_unchecked_t cm_connect_reqs;
41132+extern atomic_unchecked_t cm_rejects;
41133+extern atomic_unchecked_t mod_qp_timouts;
41134+extern atomic_unchecked_t qps_created;
41135+extern atomic_unchecked_t qps_destroyed;
41136+extern atomic_unchecked_t sw_qps_destroyed;
41137 extern u32 mh_detected;
41138 extern u32 mh_pauses_sent;
41139 extern u32 cm_packets_sent;
41140@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
41141 extern u32 cm_packets_received;
41142 extern u32 cm_packets_dropped;
41143 extern u32 cm_packets_retrans;
41144-extern atomic_t cm_listens_created;
41145-extern atomic_t cm_listens_destroyed;
41146+extern atomic_unchecked_t cm_listens_created;
41147+extern atomic_unchecked_t cm_listens_destroyed;
41148 extern u32 cm_backlog_drops;
41149-extern atomic_t cm_loopbacks;
41150-extern atomic_t cm_nodes_created;
41151-extern atomic_t cm_nodes_destroyed;
41152-extern atomic_t cm_accel_dropped_pkts;
41153-extern atomic_t cm_resets_recvd;
41154-extern atomic_t pau_qps_created;
41155-extern atomic_t pau_qps_destroyed;
41156+extern atomic_unchecked_t cm_loopbacks;
41157+extern atomic_unchecked_t cm_nodes_created;
41158+extern atomic_unchecked_t cm_nodes_destroyed;
41159+extern atomic_unchecked_t cm_accel_dropped_pkts;
41160+extern atomic_unchecked_t cm_resets_recvd;
41161+extern atomic_unchecked_t pau_qps_created;
41162+extern atomic_unchecked_t pau_qps_destroyed;
41163
41164 extern u32 int_mod_timer_init;
41165 extern u32 int_mod_cq_depth_256;
41166diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
41167index 6b29249..57081dd 100644
41168--- a/drivers/infiniband/hw/nes/nes_cm.c
41169+++ b/drivers/infiniband/hw/nes/nes_cm.c
41170@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
41171 u32 cm_packets_retrans;
41172 u32 cm_packets_created;
41173 u32 cm_packets_received;
41174-atomic_t cm_listens_created;
41175-atomic_t cm_listens_destroyed;
41176+atomic_unchecked_t cm_listens_created;
41177+atomic_unchecked_t cm_listens_destroyed;
41178 u32 cm_backlog_drops;
41179-atomic_t cm_loopbacks;
41180-atomic_t cm_nodes_created;
41181-atomic_t cm_nodes_destroyed;
41182-atomic_t cm_accel_dropped_pkts;
41183-atomic_t cm_resets_recvd;
41184+atomic_unchecked_t cm_loopbacks;
41185+atomic_unchecked_t cm_nodes_created;
41186+atomic_unchecked_t cm_nodes_destroyed;
41187+atomic_unchecked_t cm_accel_dropped_pkts;
41188+atomic_unchecked_t cm_resets_recvd;
41189
41190 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
41191 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
41192@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
41193
41194 static struct nes_cm_core *g_cm_core;
41195
41196-atomic_t cm_connects;
41197-atomic_t cm_accepts;
41198-atomic_t cm_disconnects;
41199-atomic_t cm_closes;
41200-atomic_t cm_connecteds;
41201-atomic_t cm_connect_reqs;
41202-atomic_t cm_rejects;
41203+atomic_unchecked_t cm_connects;
41204+atomic_unchecked_t cm_accepts;
41205+atomic_unchecked_t cm_disconnects;
41206+atomic_unchecked_t cm_closes;
41207+atomic_unchecked_t cm_connecteds;
41208+atomic_unchecked_t cm_connect_reqs;
41209+atomic_unchecked_t cm_rejects;
41210
41211 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
41212 {
41213@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
41214 kfree(listener);
41215 listener = NULL;
41216 ret = 0;
41217- atomic_inc(&cm_listens_destroyed);
41218+ atomic_inc_unchecked(&cm_listens_destroyed);
41219 } else {
41220 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
41221 }
41222@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
41223 cm_node->rem_mac);
41224
41225 add_hte_node(cm_core, cm_node);
41226- atomic_inc(&cm_nodes_created);
41227+ atomic_inc_unchecked(&cm_nodes_created);
41228
41229 return cm_node;
41230 }
41231@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
41232 }
41233
41234 atomic_dec(&cm_core->node_cnt);
41235- atomic_inc(&cm_nodes_destroyed);
41236+ atomic_inc_unchecked(&cm_nodes_destroyed);
41237 nesqp = cm_node->nesqp;
41238 if (nesqp) {
41239 nesqp->cm_node = NULL;
41240@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
41241
41242 static void drop_packet(struct sk_buff *skb)
41243 {
41244- atomic_inc(&cm_accel_dropped_pkts);
41245+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
41246 dev_kfree_skb_any(skb);
41247 }
41248
41249@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
41250 {
41251
41252 int reset = 0; /* whether to send reset in case of err.. */
41253- atomic_inc(&cm_resets_recvd);
41254+ atomic_inc_unchecked(&cm_resets_recvd);
41255 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
41256 " refcnt=%d\n", cm_node, cm_node->state,
41257 atomic_read(&cm_node->ref_count));
41258@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
41259 rem_ref_cm_node(cm_node->cm_core, cm_node);
41260 return NULL;
41261 }
41262- atomic_inc(&cm_loopbacks);
41263+ atomic_inc_unchecked(&cm_loopbacks);
41264 loopbackremotenode->loopbackpartner = cm_node;
41265 loopbackremotenode->tcp_cntxt.rcv_wscale =
41266 NES_CM_DEFAULT_RCV_WND_SCALE;
41267@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
41268 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
41269 else {
41270 rem_ref_cm_node(cm_core, cm_node);
41271- atomic_inc(&cm_accel_dropped_pkts);
41272+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
41273 dev_kfree_skb_any(skb);
41274 }
41275 break;
41276@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41277
41278 if ((cm_id) && (cm_id->event_handler)) {
41279 if (issue_disconn) {
41280- atomic_inc(&cm_disconnects);
41281+ atomic_inc_unchecked(&cm_disconnects);
41282 cm_event.event = IW_CM_EVENT_DISCONNECT;
41283 cm_event.status = disconn_status;
41284 cm_event.local_addr = cm_id->local_addr;
41285@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41286 }
41287
41288 if (issue_close) {
41289- atomic_inc(&cm_closes);
41290+ atomic_inc_unchecked(&cm_closes);
41291 nes_disconnect(nesqp, 1);
41292
41293 cm_id->provider_data = nesqp;
41294@@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
41295
41296 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
41297 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
41298- atomic_inc(&cm_accepts);
41299+ atomic_inc_unchecked(&cm_accepts);
41300
41301 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
41302 netdev_refcnt_read(nesvnic->netdev));
41303@@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
41304 struct nes_cm_core *cm_core;
41305 u8 *start_buff;
41306
41307- atomic_inc(&cm_rejects);
41308+ atomic_inc_unchecked(&cm_rejects);
41309 cm_node = (struct nes_cm_node *)cm_id->provider_data;
41310 loopback = cm_node->loopbackpartner;
41311 cm_core = cm_node->cm_core;
41312@@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
41313 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
41314 ntohs(laddr->sin_port));
41315
41316- atomic_inc(&cm_connects);
41317+ atomic_inc_unchecked(&cm_connects);
41318 nesqp->active_conn = 1;
41319
41320 /* cache the cm_id in the qp */
41321@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
41322 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
41323 return err;
41324 }
41325- atomic_inc(&cm_listens_created);
41326+ atomic_inc_unchecked(&cm_listens_created);
41327 }
41328
41329 cm_id->add_ref(cm_id);
41330@@ -3505,7 +3505,7 @@ static void cm_event_connected(struct nes_cm_event *event)
41331
41332 if (nesqp->destroyed)
41333 return;
41334- atomic_inc(&cm_connecteds);
41335+ atomic_inc_unchecked(&cm_connecteds);
41336 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
41337 " local port 0x%04X. jiffies = %lu.\n",
41338 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
41339@@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm_event *event)
41340
41341 cm_id->add_ref(cm_id);
41342 ret = cm_id->event_handler(cm_id, &cm_event);
41343- atomic_inc(&cm_closes);
41344+ atomic_inc_unchecked(&cm_closes);
41345 cm_event.event = IW_CM_EVENT_CLOSE;
41346 cm_event.status = 0;
41347 cm_event.provider_data = cm_id->provider_data;
41348@@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
41349 return;
41350 cm_id = cm_node->cm_id;
41351
41352- atomic_inc(&cm_connect_reqs);
41353+ atomic_inc_unchecked(&cm_connect_reqs);
41354 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
41355 cm_node, cm_id, jiffies);
41356
41357@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
41358 return;
41359 cm_id = cm_node->cm_id;
41360
41361- atomic_inc(&cm_connect_reqs);
41362+ atomic_inc_unchecked(&cm_connect_reqs);
41363 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
41364 cm_node, cm_id, jiffies);
41365
41366diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
41367index 4166452..fc952c3 100644
41368--- a/drivers/infiniband/hw/nes/nes_mgt.c
41369+++ b/drivers/infiniband/hw/nes/nes_mgt.c
41370@@ -40,8 +40,8 @@
41371 #include "nes.h"
41372 #include "nes_mgt.h"
41373
41374-atomic_t pau_qps_created;
41375-atomic_t pau_qps_destroyed;
41376+atomic_unchecked_t pau_qps_created;
41377+atomic_unchecked_t pau_qps_destroyed;
41378
41379 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
41380 {
41381@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
41382 {
41383 struct sk_buff *skb;
41384 unsigned long flags;
41385- atomic_inc(&pau_qps_destroyed);
41386+ atomic_inc_unchecked(&pau_qps_destroyed);
41387
41388 /* Free packets that have not yet been forwarded */
41389 /* Lock is acquired by skb_dequeue when removing the skb */
41390@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
41391 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
41392 skb_queue_head_init(&nesqp->pau_list);
41393 spin_lock_init(&nesqp->pau_lock);
41394- atomic_inc(&pau_qps_created);
41395+ atomic_inc_unchecked(&pau_qps_created);
41396 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
41397 }
41398
41399diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
41400index 49eb511..a774366 100644
41401--- a/drivers/infiniband/hw/nes/nes_nic.c
41402+++ b/drivers/infiniband/hw/nes/nes_nic.c
41403@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
41404 target_stat_values[++index] = mh_detected;
41405 target_stat_values[++index] = mh_pauses_sent;
41406 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
41407- target_stat_values[++index] = atomic_read(&cm_connects);
41408- target_stat_values[++index] = atomic_read(&cm_accepts);
41409- target_stat_values[++index] = atomic_read(&cm_disconnects);
41410- target_stat_values[++index] = atomic_read(&cm_connecteds);
41411- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
41412- target_stat_values[++index] = atomic_read(&cm_rejects);
41413- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
41414- target_stat_values[++index] = atomic_read(&qps_created);
41415- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
41416- target_stat_values[++index] = atomic_read(&qps_destroyed);
41417- target_stat_values[++index] = atomic_read(&cm_closes);
41418+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
41419+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
41420+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
41421+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
41422+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
41423+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
41424+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
41425+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
41426+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
41427+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
41428+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
41429 target_stat_values[++index] = cm_packets_sent;
41430 target_stat_values[++index] = cm_packets_bounced;
41431 target_stat_values[++index] = cm_packets_created;
41432 target_stat_values[++index] = cm_packets_received;
41433 target_stat_values[++index] = cm_packets_dropped;
41434 target_stat_values[++index] = cm_packets_retrans;
41435- target_stat_values[++index] = atomic_read(&cm_listens_created);
41436- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
41437+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
41438+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
41439 target_stat_values[++index] = cm_backlog_drops;
41440- target_stat_values[++index] = atomic_read(&cm_loopbacks);
41441- target_stat_values[++index] = atomic_read(&cm_nodes_created);
41442- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
41443- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
41444- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
41445+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
41446+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
41447+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
41448+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
41449+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
41450 target_stat_values[++index] = nesadapter->free_4kpbl;
41451 target_stat_values[++index] = nesadapter->free_256pbl;
41452 target_stat_values[++index] = int_mod_timer_init;
41453 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
41454 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
41455 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
41456- target_stat_values[++index] = atomic_read(&pau_qps_created);
41457- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
41458+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
41459+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
41460 }
41461
41462 /**
41463diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
41464index 5b53ca5..443da3c 100644
41465--- a/drivers/infiniband/hw/nes/nes_verbs.c
41466+++ b/drivers/infiniband/hw/nes/nes_verbs.c
41467@@ -46,9 +46,9 @@
41468
41469 #include <rdma/ib_umem.h>
41470
41471-atomic_t mod_qp_timouts;
41472-atomic_t qps_created;
41473-atomic_t sw_qps_destroyed;
41474+atomic_unchecked_t mod_qp_timouts;
41475+atomic_unchecked_t qps_created;
41476+atomic_unchecked_t sw_qps_destroyed;
41477
41478 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
41479
41480@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
41481 if (init_attr->create_flags)
41482 return ERR_PTR(-EINVAL);
41483
41484- atomic_inc(&qps_created);
41485+ atomic_inc_unchecked(&qps_created);
41486 switch (init_attr->qp_type) {
41487 case IB_QPT_RC:
41488 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
41489@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
41490 struct iw_cm_event cm_event;
41491 int ret = 0;
41492
41493- atomic_inc(&sw_qps_destroyed);
41494+ atomic_inc_unchecked(&sw_qps_destroyed);
41495 nesqp->destroyed = 1;
41496
41497 /* Blow away the connection if it exists. */
41498diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
41499index 1946101..09766d2 100644
41500--- a/drivers/infiniband/hw/qib/qib.h
41501+++ b/drivers/infiniband/hw/qib/qib.h
41502@@ -52,6 +52,7 @@
41503 #include <linux/kref.h>
41504 #include <linux/sched.h>
41505 #include <linux/kthread.h>
41506+#include <linux/slab.h>
41507
41508 #include "qib_common.h"
41509 #include "qib_verbs.h"
41510diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
41511index 922a7fe..bb035db 100644
41512--- a/drivers/input/gameport/gameport.c
41513+++ b/drivers/input/gameport/gameport.c
41514@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
41515 */
41516 static void gameport_init_port(struct gameport *gameport)
41517 {
41518- static atomic_t gameport_no = ATOMIC_INIT(0);
41519+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
41520
41521 __module_get(THIS_MODULE);
41522
41523 mutex_init(&gameport->drv_mutex);
41524 device_initialize(&gameport->dev);
41525 dev_set_name(&gameport->dev, "gameport%lu",
41526- (unsigned long)atomic_inc_return(&gameport_no) - 1);
41527+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
41528 gameport->dev.bus = &gameport_bus;
41529 gameport->dev.release = gameport_release_port;
41530 if (gameport->parent)
41531diff --git a/drivers/input/input.c b/drivers/input/input.c
41532index e75d015..57d1c28 100644
41533--- a/drivers/input/input.c
41534+++ b/drivers/input/input.c
41535@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
41536 */
41537 struct input_dev *input_allocate_device(void)
41538 {
41539- static atomic_t input_no = ATOMIC_INIT(0);
41540+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
41541 struct input_dev *dev;
41542
41543 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
41544@@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(void)
41545 INIT_LIST_HEAD(&dev->node);
41546
41547 dev_set_name(&dev->dev, "input%ld",
41548- (unsigned long) atomic_inc_return(&input_no) - 1);
41549+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
41550
41551 __module_get(THIS_MODULE);
41552 }
41553diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
41554index 04c69af..5f92d00 100644
41555--- a/drivers/input/joystick/sidewinder.c
41556+++ b/drivers/input/joystick/sidewinder.c
41557@@ -30,6 +30,7 @@
41558 #include <linux/kernel.h>
41559 #include <linux/module.h>
41560 #include <linux/slab.h>
41561+#include <linux/sched.h>
41562 #include <linux/init.h>
41563 #include <linux/input.h>
41564 #include <linux/gameport.h>
41565diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
41566index 75e3b10..fb390fd 100644
41567--- a/drivers/input/joystick/xpad.c
41568+++ b/drivers/input/joystick/xpad.c
41569@@ -736,7 +736,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
41570
41571 static int xpad_led_probe(struct usb_xpad *xpad)
41572 {
41573- static atomic_t led_seq = ATOMIC_INIT(0);
41574+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
41575 long led_no;
41576 struct xpad_led *led;
41577 struct led_classdev *led_cdev;
41578@@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
41579 if (!led)
41580 return -ENOMEM;
41581
41582- led_no = (long)atomic_inc_return(&led_seq) - 1;
41583+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
41584
41585 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
41586 led->xpad = xpad;
41587diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
41588index e204f26..8459f15 100644
41589--- a/drivers/input/misc/ims-pcu.c
41590+++ b/drivers/input/misc/ims-pcu.c
41591@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
41592
41593 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
41594 {
41595- static atomic_t device_no = ATOMIC_INIT(0);
41596+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
41597
41598 const struct ims_pcu_device_info *info;
41599 u8 device_id;
41600@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
41601 }
41602
41603 /* Device appears to be operable, complete initialization */
41604- pcu->device_no = atomic_inc_return(&device_no) - 1;
41605+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
41606
41607 error = ims_pcu_setup_backlight(pcu);
41608 if (error)
41609diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
41610index 2f0b39d..7370f13 100644
41611--- a/drivers/input/mouse/psmouse.h
41612+++ b/drivers/input/mouse/psmouse.h
41613@@ -116,7 +116,7 @@ struct psmouse_attribute {
41614 ssize_t (*set)(struct psmouse *psmouse, void *data,
41615 const char *buf, size_t count);
41616 bool protect;
41617-};
41618+} __do_const;
41619 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
41620
41621 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
41622diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
41623index 4c842c3..590b0bf 100644
41624--- a/drivers/input/mousedev.c
41625+++ b/drivers/input/mousedev.c
41626@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
41627
41628 spin_unlock_irq(&client->packet_lock);
41629
41630- if (copy_to_user(buffer, data, count))
41631+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
41632 return -EFAULT;
41633
41634 return count;
41635diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
41636index 2b56855..5a55837 100644
41637--- a/drivers/input/serio/serio.c
41638+++ b/drivers/input/serio/serio.c
41639@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
41640 */
41641 static void serio_init_port(struct serio *serio)
41642 {
41643- static atomic_t serio_no = ATOMIC_INIT(0);
41644+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
41645
41646 __module_get(THIS_MODULE);
41647
41648@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
41649 mutex_init(&serio->drv_mutex);
41650 device_initialize(&serio->dev);
41651 dev_set_name(&serio->dev, "serio%ld",
41652- (long)atomic_inc_return(&serio_no) - 1);
41653+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
41654 serio->dev.bus = &serio_bus;
41655 serio->dev.release = serio_release_port;
41656 serio->dev.groups = serio_device_attr_groups;
41657diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
41658index 59df2e7..8f1cafb 100644
41659--- a/drivers/input/serio/serio_raw.c
41660+++ b/drivers/input/serio/serio_raw.c
41661@@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
41662
41663 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
41664 {
41665- static atomic_t serio_raw_no = ATOMIC_INIT(0);
41666+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
41667 struct serio_raw *serio_raw;
41668 int err;
41669
41670@@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
41671 }
41672
41673 snprintf(serio_raw->name, sizeof(serio_raw->name),
41674- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
41675+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
41676 kref_init(&serio_raw->kref);
41677 INIT_LIST_HEAD(&serio_raw->client_list);
41678 init_waitqueue_head(&serio_raw->wait);
41679diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
41680index fbe9ca7..dbee61d 100644
41681--- a/drivers/iommu/iommu.c
41682+++ b/drivers/iommu/iommu.c
41683@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
41684 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
41685 {
41686 bus_register_notifier(bus, &iommu_bus_nb);
41687- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
41688+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
41689 }
41690
41691 /**
41692diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
41693index 39f81ae..2660096 100644
41694--- a/drivers/iommu/irq_remapping.c
41695+++ b/drivers/iommu/irq_remapping.c
41696@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
41697 void panic_if_irq_remap(const char *msg)
41698 {
41699 if (irq_remapping_enabled)
41700- panic(msg);
41701+ panic("%s", msg);
41702 }
41703
41704 static void ir_ack_apic_edge(struct irq_data *data)
41705@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
41706
41707 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
41708 {
41709- chip->irq_print_chip = ir_print_prefix;
41710- chip->irq_ack = ir_ack_apic_edge;
41711- chip->irq_eoi = ir_ack_apic_level;
41712- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41713+ pax_open_kernel();
41714+ *(void **)&chip->irq_print_chip = ir_print_prefix;
41715+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
41716+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
41717+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41718+ pax_close_kernel();
41719 }
41720
41721 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
41722diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
41723index d0e9480..d2b6340 100644
41724--- a/drivers/irqchip/irq-gic.c
41725+++ b/drivers/irqchip/irq-gic.c
41726@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
41727 * Supported arch specific GIC irq extension.
41728 * Default make them NULL.
41729 */
41730-struct irq_chip gic_arch_extn = {
41731+irq_chip_no_const gic_arch_extn = {
41732 .irq_eoi = NULL,
41733 .irq_mask = NULL,
41734 .irq_unmask = NULL,
41735@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
41736 chained_irq_exit(chip, desc);
41737 }
41738
41739-static struct irq_chip gic_chip = {
41740+static irq_chip_no_const gic_chip __read_only = {
41741 .name = "GIC",
41742 .irq_mask = gic_mask_irq,
41743 .irq_unmask = gic_unmask_irq,
41744diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
41745index ac6f72b..81150f2 100644
41746--- a/drivers/isdn/capi/capi.c
41747+++ b/drivers/isdn/capi/capi.c
41748@@ -81,8 +81,8 @@ struct capiminor {
41749
41750 struct capi20_appl *ap;
41751 u32 ncci;
41752- atomic_t datahandle;
41753- atomic_t msgid;
41754+ atomic_unchecked_t datahandle;
41755+ atomic_unchecked_t msgid;
41756
41757 struct tty_port port;
41758 int ttyinstop;
41759@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
41760 capimsg_setu16(s, 2, mp->ap->applid);
41761 capimsg_setu8 (s, 4, CAPI_DATA_B3);
41762 capimsg_setu8 (s, 5, CAPI_RESP);
41763- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
41764+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
41765 capimsg_setu32(s, 8, mp->ncci);
41766 capimsg_setu16(s, 12, datahandle);
41767 }
41768@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
41769 mp->outbytes -= len;
41770 spin_unlock_bh(&mp->outlock);
41771
41772- datahandle = atomic_inc_return(&mp->datahandle);
41773+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
41774 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
41775 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41776 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41777 capimsg_setu16(skb->data, 2, mp->ap->applid);
41778 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
41779 capimsg_setu8 (skb->data, 5, CAPI_REQ);
41780- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
41781+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
41782 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
41783 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
41784 capimsg_setu16(skb->data, 16, len); /* Data length */
41785diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
41786index 600c79b..3752bab 100644
41787--- a/drivers/isdn/gigaset/interface.c
41788+++ b/drivers/isdn/gigaset/interface.c
41789@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
41790 }
41791 tty->driver_data = cs;
41792
41793- ++cs->port.count;
41794+ atomic_inc(&cs->port.count);
41795
41796- if (cs->port.count == 1) {
41797+ if (atomic_read(&cs->port.count) == 1) {
41798 tty_port_tty_set(&cs->port, tty);
41799 cs->port.low_latency = 1;
41800 }
41801@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
41802
41803 if (!cs->connected)
41804 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
41805- else if (!cs->port.count)
41806+ else if (!atomic_read(&cs->port.count))
41807 dev_warn(cs->dev, "%s: device not opened\n", __func__);
41808- else if (!--cs->port.count)
41809+ else if (!atomic_dec_return(&cs->port.count))
41810 tty_port_tty_set(&cs->port, NULL);
41811
41812 mutex_unlock(&cs->mutex);
41813diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
41814index d0a41cb..f0cdb8c 100644
41815--- a/drivers/isdn/gigaset/usb-gigaset.c
41816+++ b/drivers/isdn/gigaset/usb-gigaset.c
41817@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
41818 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
41819 memcpy(cs->hw.usb->bchars, buf, 6);
41820 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
41821- 0, 0, &buf, 6, 2000);
41822+ 0, 0, buf, 6, 2000);
41823 }
41824
41825 static void gigaset_freebcshw(struct bc_state *bcs)
41826diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
41827index 4d9b195..455075c 100644
41828--- a/drivers/isdn/hardware/avm/b1.c
41829+++ b/drivers/isdn/hardware/avm/b1.c
41830@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
41831 }
41832 if (left) {
41833 if (t4file->user) {
41834- if (copy_from_user(buf, dp, left))
41835+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41836 return -EFAULT;
41837 } else {
41838 memcpy(buf, dp, left);
41839@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
41840 }
41841 if (left) {
41842 if (config->user) {
41843- if (copy_from_user(buf, dp, left))
41844+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41845 return -EFAULT;
41846 } else {
41847 memcpy(buf, dp, left);
41848diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
41849index 9bb12ba..d4262f7 100644
41850--- a/drivers/isdn/i4l/isdn_common.c
41851+++ b/drivers/isdn/i4l/isdn_common.c
41852@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
41853 } else
41854 return -EINVAL;
41855 case IIOCDBGVAR:
41856+ if (!capable(CAP_SYS_RAWIO))
41857+ return -EPERM;
41858 if (arg) {
41859 if (copy_to_user(argp, &dev, sizeof(ulong)))
41860 return -EFAULT;
41861diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
41862index 3c5f249..5fac4d0 100644
41863--- a/drivers/isdn/i4l/isdn_tty.c
41864+++ b/drivers/isdn/i4l/isdn_tty.c
41865@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
41866
41867 #ifdef ISDN_DEBUG_MODEM_OPEN
41868 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
41869- port->count);
41870+ atomic_read(&port->count));
41871 #endif
41872- port->count++;
41873+ atomic_inc(&port->count);
41874 port->tty = tty;
41875 /*
41876 * Start up serial port
41877@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41878 #endif
41879 return;
41880 }
41881- if ((tty->count == 1) && (port->count != 1)) {
41882+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
41883 /*
41884 * Uh, oh. tty->count is 1, which means that the tty
41885 * structure will be freed. Info->count should always
41886@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41887 * serial port won't be shutdown.
41888 */
41889 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
41890- "info->count is %d\n", port->count);
41891- port->count = 1;
41892+ "info->count is %d\n", atomic_read(&port->count));
41893+ atomic_set(&port->count, 1);
41894 }
41895- if (--port->count < 0) {
41896+ if (atomic_dec_return(&port->count) < 0) {
41897 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
41898- info->line, port->count);
41899- port->count = 0;
41900+ info->line, atomic_read(&port->count));
41901+ atomic_set(&port->count, 0);
41902 }
41903- if (port->count) {
41904+ if (atomic_read(&port->count)) {
41905 #ifdef ISDN_DEBUG_MODEM_OPEN
41906 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
41907 #endif
41908@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
41909 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
41910 return;
41911 isdn_tty_shutdown(info);
41912- port->count = 0;
41913+ atomic_set(&port->count, 0);
41914 port->flags &= ~ASYNC_NORMAL_ACTIVE;
41915 port->tty = NULL;
41916 wake_up_interruptible(&port->open_wait);
41917@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
41918 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
41919 modem_info *info = &dev->mdm.info[i];
41920
41921- if (info->port.count == 0)
41922+ if (atomic_read(&info->port.count) == 0)
41923 continue;
41924 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
41925 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
41926diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
41927index e74df7c..03a03ba 100644
41928--- a/drivers/isdn/icn/icn.c
41929+++ b/drivers/isdn/icn/icn.c
41930@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
41931 if (count > len)
41932 count = len;
41933 if (user) {
41934- if (copy_from_user(msg, buf, count))
41935+ if (count > sizeof msg || copy_from_user(msg, buf, count))
41936 return -EFAULT;
41937 } else
41938 memcpy(msg, buf, count);
41939diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
41940index a4f05c5..1433bc5 100644
41941--- a/drivers/isdn/mISDN/dsp_cmx.c
41942+++ b/drivers/isdn/mISDN/dsp_cmx.c
41943@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
41944 static u16 dsp_count; /* last sample count */
41945 static int dsp_count_valid; /* if we have last sample count */
41946
41947-void
41948+void __intentional_overflow(-1)
41949 dsp_cmx_send(void *arg)
41950 {
41951 struct dsp_conf *conf;
41952diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
41953index d93e245..e7ece6b 100644
41954--- a/drivers/leds/leds-clevo-mail.c
41955+++ b/drivers/leds/leds-clevo-mail.c
41956@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
41957 * detected as working, but in reality it is not) as low as
41958 * possible.
41959 */
41960-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
41961+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
41962 {
41963 .callback = clevo_mail_led_dmi_callback,
41964 .ident = "Clevo D410J",
41965diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
41966index 5b8f938..b73d657 100644
41967--- a/drivers/leds/leds-ss4200.c
41968+++ b/drivers/leds/leds-ss4200.c
41969@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
41970 * detected as working, but in reality it is not) as low as
41971 * possible.
41972 */
41973-static struct dmi_system_id nas_led_whitelist[] __initdata = {
41974+static struct dmi_system_id nas_led_whitelist[] __initconst = {
41975 {
41976 .callback = ss4200_led_dmi_callback,
41977 .ident = "Intel SS4200-E",
41978diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
41979index 0bf1e4e..b4bf44e 100644
41980--- a/drivers/lguest/core.c
41981+++ b/drivers/lguest/core.c
41982@@ -97,9 +97,17 @@ static __init int map_switcher(void)
41983 * The end address needs +1 because __get_vm_area allocates an
41984 * extra guard page, so we need space for that.
41985 */
41986+
41987+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
41988+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41989+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
41990+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41991+#else
41992 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41993 VM_ALLOC, switcher_addr, switcher_addr
41994 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41995+#endif
41996+
41997 if (!switcher_vma) {
41998 err = -ENOMEM;
41999 printk("lguest: could not map switcher pages high\n");
42000@@ -124,7 +132,7 @@ static __init int map_switcher(void)
42001 * Now the Switcher is mapped at the right address, we can't fail!
42002 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
42003 */
42004- memcpy(switcher_vma->addr, start_switcher_text,
42005+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
42006 end_switcher_text - start_switcher_text);
42007
42008 printk(KERN_INFO "lguest: mapped switcher at %p\n",
42009diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
42010index bfb39bb..08a603b 100644
42011--- a/drivers/lguest/page_tables.c
42012+++ b/drivers/lguest/page_tables.c
42013@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
42014 /*:*/
42015
42016 #ifdef CONFIG_X86_PAE
42017-static void release_pmd(pmd_t *spmd)
42018+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
42019 {
42020 /* If the entry's not present, there's nothing to release. */
42021 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
42022diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
42023index 5169239..47cb4db 100644
42024--- a/drivers/lguest/x86/core.c
42025+++ b/drivers/lguest/x86/core.c
42026@@ -59,7 +59,7 @@ static struct {
42027 /* Offset from where switcher.S was compiled to where we've copied it */
42028 static unsigned long switcher_offset(void)
42029 {
42030- return switcher_addr - (unsigned long)start_switcher_text;
42031+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
42032 }
42033
42034 /* This cpu's struct lguest_pages (after the Switcher text page) */
42035@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
42036 * These copies are pretty cheap, so we do them unconditionally: */
42037 /* Save the current Host top-level page directory.
42038 */
42039+
42040+#ifdef CONFIG_PAX_PER_CPU_PGD
42041+ pages->state.host_cr3 = read_cr3();
42042+#else
42043 pages->state.host_cr3 = __pa(current->mm->pgd);
42044+#endif
42045+
42046 /*
42047 * Set up the Guest's page tables to see this CPU's pages (and no
42048 * other CPU's pages).
42049@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
42050 * compiled-in switcher code and the high-mapped copy we just made.
42051 */
42052 for (i = 0; i < IDT_ENTRIES; i++)
42053- default_idt_entries[i] += switcher_offset();
42054+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
42055
42056 /*
42057 * Set up the Switcher's per-cpu areas.
42058@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
42059 * it will be undisturbed when we switch. To change %cs and jump we
42060 * need this structure to feed to Intel's "lcall" instruction.
42061 */
42062- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
42063+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
42064 lguest_entry.segment = LGUEST_CS;
42065
42066 /*
42067diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
42068index 40634b0..4f5855e 100644
42069--- a/drivers/lguest/x86/switcher_32.S
42070+++ b/drivers/lguest/x86/switcher_32.S
42071@@ -87,6 +87,7 @@
42072 #include <asm/page.h>
42073 #include <asm/segment.h>
42074 #include <asm/lguest.h>
42075+#include <asm/processor-flags.h>
42076
42077 // We mark the start of the code to copy
42078 // It's placed in .text tho it's never run here
42079@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
42080 // Changes type when we load it: damn Intel!
42081 // For after we switch over our page tables
42082 // That entry will be read-only: we'd crash.
42083+
42084+#ifdef CONFIG_PAX_KERNEXEC
42085+ mov %cr0, %edx
42086+ xor $X86_CR0_WP, %edx
42087+ mov %edx, %cr0
42088+#endif
42089+
42090 movl $(GDT_ENTRY_TSS*8), %edx
42091 ltr %dx
42092
42093@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
42094 // Let's clear it again for our return.
42095 // The GDT descriptor of the Host
42096 // Points to the table after two "size" bytes
42097- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
42098+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
42099 // Clear "used" from type field (byte 5, bit 2)
42100- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
42101+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
42102+
42103+#ifdef CONFIG_PAX_KERNEXEC
42104+ mov %cr0, %eax
42105+ xor $X86_CR0_WP, %eax
42106+ mov %eax, %cr0
42107+#endif
42108
42109 // Once our page table's switched, the Guest is live!
42110 // The Host fades as we run this final step.
42111@@ -295,13 +309,12 @@ deliver_to_host:
42112 // I consulted gcc, and it gave
42113 // These instructions, which I gladly credit:
42114 leal (%edx,%ebx,8), %eax
42115- movzwl (%eax),%edx
42116- movl 4(%eax), %eax
42117- xorw %ax, %ax
42118- orl %eax, %edx
42119+ movl 4(%eax), %edx
42120+ movw (%eax), %dx
42121 // Now the address of the handler's in %edx
42122 // We call it now: its "iret" drops us home.
42123- jmp *%edx
42124+ ljmp $__KERNEL_CS, $1f
42125+1: jmp *%edx
42126
42127 // Every interrupt can come to us here
42128 // But we must truly tell each apart.
42129diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
42130index 0003992..854bbce 100644
42131--- a/drivers/md/bcache/closure.h
42132+++ b/drivers/md/bcache/closure.h
42133@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
42134 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
42135 struct workqueue_struct *wq)
42136 {
42137- BUG_ON(object_is_on_stack(cl));
42138+ BUG_ON(object_starts_on_stack(cl));
42139 closure_set_ip(cl);
42140 cl->fn = fn;
42141 cl->wq = wq;
42142diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
42143index 547c4c5..5be1de4 100644
42144--- a/drivers/md/bcache/super.c
42145+++ b/drivers/md/bcache/super.c
42146@@ -1644,7 +1644,7 @@ err_unlock_gc:
42147 err:
42148 closure_sync(&op.cl);
42149 /* XXX: test this, it's broken */
42150- bch_cache_set_error(c, err);
42151+ bch_cache_set_error(c, "%s", err);
42152 }
42153
42154 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
42155diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
42156index a7fd821..9dcf6c3 100644
42157--- a/drivers/md/bitmap.c
42158+++ b/drivers/md/bitmap.c
42159@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
42160 chunk_kb ? "KB" : "B");
42161 if (bitmap->storage.file) {
42162 seq_printf(seq, ", file: ");
42163- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
42164+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
42165 }
42166
42167 seq_printf(seq, "\n");
42168diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
42169index afe0814..8cf3794 100644
42170--- a/drivers/md/dm-ioctl.c
42171+++ b/drivers/md/dm-ioctl.c
42172@@ -1745,7 +1745,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
42173 cmd == DM_LIST_VERSIONS_CMD)
42174 return 0;
42175
42176- if ((cmd == DM_DEV_CREATE_CMD)) {
42177+ if (cmd == DM_DEV_CREATE_CMD) {
42178 if (!*param->name) {
42179 DMWARN("name not supplied when creating device");
42180 return -EINVAL;
42181diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
42182index 9584443..9fc9ac9 100644
42183--- a/drivers/md/dm-raid1.c
42184+++ b/drivers/md/dm-raid1.c
42185@@ -40,7 +40,7 @@ enum dm_raid1_error {
42186
42187 struct mirror {
42188 struct mirror_set *ms;
42189- atomic_t error_count;
42190+ atomic_unchecked_t error_count;
42191 unsigned long error_type;
42192 struct dm_dev *dev;
42193 sector_t offset;
42194@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
42195 struct mirror *m;
42196
42197 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
42198- if (!atomic_read(&m->error_count))
42199+ if (!atomic_read_unchecked(&m->error_count))
42200 return m;
42201
42202 return NULL;
42203@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
42204 * simple way to tell if a device has encountered
42205 * errors.
42206 */
42207- atomic_inc(&m->error_count);
42208+ atomic_inc_unchecked(&m->error_count);
42209
42210 if (test_and_set_bit(error_type, &m->error_type))
42211 return;
42212@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
42213 struct mirror *m = get_default_mirror(ms);
42214
42215 do {
42216- if (likely(!atomic_read(&m->error_count)))
42217+ if (likely(!atomic_read_unchecked(&m->error_count)))
42218 return m;
42219
42220 if (m-- == ms->mirror)
42221@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
42222 {
42223 struct mirror *default_mirror = get_default_mirror(m->ms);
42224
42225- return !atomic_read(&default_mirror->error_count);
42226+ return !atomic_read_unchecked(&default_mirror->error_count);
42227 }
42228
42229 static int mirror_available(struct mirror_set *ms, struct bio *bio)
42230@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
42231 */
42232 if (likely(region_in_sync(ms, region, 1)))
42233 m = choose_mirror(ms, bio->bi_sector);
42234- else if (m && atomic_read(&m->error_count))
42235+ else if (m && atomic_read_unchecked(&m->error_count))
42236 m = NULL;
42237
42238 if (likely(m))
42239@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
42240 }
42241
42242 ms->mirror[mirror].ms = ms;
42243- atomic_set(&(ms->mirror[mirror].error_count), 0);
42244+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
42245 ms->mirror[mirror].error_type = 0;
42246 ms->mirror[mirror].offset = offset;
42247
42248@@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_target *ti)
42249 */
42250 static char device_status_char(struct mirror *m)
42251 {
42252- if (!atomic_read(&(m->error_count)))
42253+ if (!atomic_read_unchecked(&(m->error_count)))
42254 return 'A';
42255
42256 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
42257diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
42258index 28a9012..9c0f6a5 100644
42259--- a/drivers/md/dm-stats.c
42260+++ b/drivers/md/dm-stats.c
42261@@ -382,7 +382,7 @@ do_sync_free:
42262 synchronize_rcu_expedited();
42263 dm_stat_free(&s->rcu_head);
42264 } else {
42265- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
42266+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
42267 call_rcu(&s->rcu_head, dm_stat_free);
42268 }
42269 return 0;
42270@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
42271 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
42272 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
42273 ));
42274- ACCESS_ONCE(last->last_sector) = end_sector;
42275- ACCESS_ONCE(last->last_rw) = bi_rw;
42276+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
42277+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
42278 }
42279
42280 rcu_read_lock();
42281diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
42282index 73c1712..7347292 100644
42283--- a/drivers/md/dm-stripe.c
42284+++ b/drivers/md/dm-stripe.c
42285@@ -21,7 +21,7 @@ struct stripe {
42286 struct dm_dev *dev;
42287 sector_t physical_start;
42288
42289- atomic_t error_count;
42290+ atomic_unchecked_t error_count;
42291 };
42292
42293 struct stripe_c {
42294@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
42295 kfree(sc);
42296 return r;
42297 }
42298- atomic_set(&(sc->stripe[i].error_count), 0);
42299+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
42300 }
42301
42302 ti->private = sc;
42303@@ -327,7 +327,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
42304 DMEMIT("%d ", sc->stripes);
42305 for (i = 0; i < sc->stripes; i++) {
42306 DMEMIT("%s ", sc->stripe[i].dev->name);
42307- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
42308+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
42309 'D' : 'A';
42310 }
42311 buffer[i] = '\0';
42312@@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
42313 */
42314 for (i = 0; i < sc->stripes; i++)
42315 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
42316- atomic_inc(&(sc->stripe[i].error_count));
42317- if (atomic_read(&(sc->stripe[i].error_count)) <
42318+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
42319+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
42320 DM_IO_ERROR_THRESHOLD)
42321 schedule_work(&sc->trigger_event);
42322 }
42323diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
42324index 20a8cc0..5447b11 100644
42325--- a/drivers/md/dm-table.c
42326+++ b/drivers/md/dm-table.c
42327@@ -291,7 +291,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
42328 static int open_dev(struct dm_dev_internal *d, dev_t dev,
42329 struct mapped_device *md)
42330 {
42331- static char *_claim_ptr = "I belong to device-mapper";
42332+ static char _claim_ptr[] = "I belong to device-mapper";
42333 struct block_device *bdev;
42334
42335 int r;
42336@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
42337 if (!dev_size)
42338 return 0;
42339
42340- if ((start >= dev_size) || (start + len > dev_size)) {
42341+ if ((start >= dev_size) || (len > dev_size - start)) {
42342 DMWARN("%s: %s too small for target: "
42343 "start=%llu, len=%llu, dev_size=%llu",
42344 dm_device_name(ti->table->md), bdevname(bdev, b),
42345diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
42346index 8a30ad5..72792d3 100644
42347--- a/drivers/md/dm-thin-metadata.c
42348+++ b/drivers/md/dm-thin-metadata.c
42349@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
42350 {
42351 pmd->info.tm = pmd->tm;
42352 pmd->info.levels = 2;
42353- pmd->info.value_type.context = pmd->data_sm;
42354+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
42355 pmd->info.value_type.size = sizeof(__le64);
42356 pmd->info.value_type.inc = data_block_inc;
42357 pmd->info.value_type.dec = data_block_dec;
42358@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
42359
42360 pmd->bl_info.tm = pmd->tm;
42361 pmd->bl_info.levels = 1;
42362- pmd->bl_info.value_type.context = pmd->data_sm;
42363+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
42364 pmd->bl_info.value_type.size = sizeof(__le64);
42365 pmd->bl_info.value_type.inc = data_block_inc;
42366 pmd->bl_info.value_type.dec = data_block_dec;
42367diff --git a/drivers/md/dm.c b/drivers/md/dm.c
42368index b3e26c7..1efca94 100644
42369--- a/drivers/md/dm.c
42370+++ b/drivers/md/dm.c
42371@@ -179,9 +179,9 @@ struct mapped_device {
42372 /*
42373 * Event handling.
42374 */
42375- atomic_t event_nr;
42376+ atomic_unchecked_t event_nr;
42377 wait_queue_head_t eventq;
42378- atomic_t uevent_seq;
42379+ atomic_unchecked_t uevent_seq;
42380 struct list_head uevent_list;
42381 spinlock_t uevent_lock; /* Protect access to uevent_list */
42382
42383@@ -1985,8 +1985,8 @@ static struct mapped_device *alloc_dev(int minor)
42384 spin_lock_init(&md->deferred_lock);
42385 atomic_set(&md->holders, 1);
42386 atomic_set(&md->open_count, 0);
42387- atomic_set(&md->event_nr, 0);
42388- atomic_set(&md->uevent_seq, 0);
42389+ atomic_set_unchecked(&md->event_nr, 0);
42390+ atomic_set_unchecked(&md->uevent_seq, 0);
42391 INIT_LIST_HEAD(&md->uevent_list);
42392 spin_lock_init(&md->uevent_lock);
42393
42394@@ -2139,7 +2139,7 @@ static void event_callback(void *context)
42395
42396 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
42397
42398- atomic_inc(&md->event_nr);
42399+ atomic_inc_unchecked(&md->event_nr);
42400 wake_up(&md->eventq);
42401 }
42402
42403@@ -2832,18 +2832,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
42404
42405 uint32_t dm_next_uevent_seq(struct mapped_device *md)
42406 {
42407- return atomic_add_return(1, &md->uevent_seq);
42408+ return atomic_add_return_unchecked(1, &md->uevent_seq);
42409 }
42410
42411 uint32_t dm_get_event_nr(struct mapped_device *md)
42412 {
42413- return atomic_read(&md->event_nr);
42414+ return atomic_read_unchecked(&md->event_nr);
42415 }
42416
42417 int dm_wait_event(struct mapped_device *md, int event_nr)
42418 {
42419 return wait_event_interruptible(md->eventq,
42420- (event_nr != atomic_read(&md->event_nr)));
42421+ (event_nr != atomic_read_unchecked(&md->event_nr)));
42422 }
42423
42424 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
42425diff --git a/drivers/md/md.c b/drivers/md/md.c
42426index ba46d97..f8f5019 100644
42427--- a/drivers/md/md.c
42428+++ b/drivers/md/md.c
42429@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
42430 * start build, activate spare
42431 */
42432 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
42433-static atomic_t md_event_count;
42434+static atomic_unchecked_t md_event_count;
42435 void md_new_event(struct mddev *mddev)
42436 {
42437- atomic_inc(&md_event_count);
42438+ atomic_inc_unchecked(&md_event_count);
42439 wake_up(&md_event_waiters);
42440 }
42441 EXPORT_SYMBOL_GPL(md_new_event);
42442@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
42443 */
42444 static void md_new_event_inintr(struct mddev *mddev)
42445 {
42446- atomic_inc(&md_event_count);
42447+ atomic_inc_unchecked(&md_event_count);
42448 wake_up(&md_event_waiters);
42449 }
42450
42451@@ -1502,7 +1502,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
42452 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
42453 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
42454 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
42455- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
42456+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
42457
42458 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
42459 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
42460@@ -1746,7 +1746,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
42461 else
42462 sb->resync_offset = cpu_to_le64(0);
42463
42464- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
42465+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
42466
42467 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
42468 sb->size = cpu_to_le64(mddev->dev_sectors);
42469@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
42470 static ssize_t
42471 errors_show(struct md_rdev *rdev, char *page)
42472 {
42473- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
42474+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
42475 }
42476
42477 static ssize_t
42478@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
42479 char *e;
42480 unsigned long n = simple_strtoul(buf, &e, 10);
42481 if (*buf && (*e == 0 || *e == '\n')) {
42482- atomic_set(&rdev->corrected_errors, n);
42483+ atomic_set_unchecked(&rdev->corrected_errors, n);
42484 return len;
42485 }
42486 return -EINVAL;
42487@@ -3208,8 +3208,8 @@ int md_rdev_init(struct md_rdev *rdev)
42488 rdev->sb_loaded = 0;
42489 rdev->bb_page = NULL;
42490 atomic_set(&rdev->nr_pending, 0);
42491- atomic_set(&rdev->read_errors, 0);
42492- atomic_set(&rdev->corrected_errors, 0);
42493+ atomic_set_unchecked(&rdev->read_errors, 0);
42494+ atomic_set_unchecked(&rdev->corrected_errors, 0);
42495
42496 INIT_LIST_HEAD(&rdev->same_set);
42497 init_waitqueue_head(&rdev->blocked_wait);
42498@@ -7043,7 +7043,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
42499
42500 spin_unlock(&pers_lock);
42501 seq_printf(seq, "\n");
42502- seq->poll_event = atomic_read(&md_event_count);
42503+ seq->poll_event = atomic_read_unchecked(&md_event_count);
42504 return 0;
42505 }
42506 if (v == (void*)2) {
42507@@ -7146,7 +7146,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
42508 return error;
42509
42510 seq = file->private_data;
42511- seq->poll_event = atomic_read(&md_event_count);
42512+ seq->poll_event = atomic_read_unchecked(&md_event_count);
42513 return error;
42514 }
42515
42516@@ -7160,7 +7160,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
42517 /* always allow read */
42518 mask = POLLIN | POLLRDNORM;
42519
42520- if (seq->poll_event != atomic_read(&md_event_count))
42521+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
42522 mask |= POLLERR | POLLPRI;
42523 return mask;
42524 }
42525@@ -7204,7 +7204,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
42526 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
42527 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
42528 (int)part_stat_read(&disk->part0, sectors[1]) -
42529- atomic_read(&disk->sync_io);
42530+ atomic_read_unchecked(&disk->sync_io);
42531 /* sync IO will cause sync_io to increase before the disk_stats
42532 * as sync_io is counted when a request starts, and
42533 * disk_stats is counted when it completes.
42534diff --git a/drivers/md/md.h b/drivers/md/md.h
42535index 608050c..6e77db5d 100644
42536--- a/drivers/md/md.h
42537+++ b/drivers/md/md.h
42538@@ -94,13 +94,13 @@ struct md_rdev {
42539 * only maintained for arrays that
42540 * support hot removal
42541 */
42542- atomic_t read_errors; /* number of consecutive read errors that
42543+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
42544 * we have tried to ignore.
42545 */
42546 struct timespec last_read_error; /* monotonic time since our
42547 * last read error
42548 */
42549- atomic_t corrected_errors; /* number of corrected read errors,
42550+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
42551 * for reporting to userspace and storing
42552 * in superblock.
42553 */
42554@@ -446,7 +446,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
42555
42556 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
42557 {
42558- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
42559+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
42560 }
42561
42562 struct md_personality
42563diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
42564index 3e6d115..ffecdeb 100644
42565--- a/drivers/md/persistent-data/dm-space-map.h
42566+++ b/drivers/md/persistent-data/dm-space-map.h
42567@@ -71,6 +71,7 @@ struct dm_space_map {
42568 dm_sm_threshold_fn fn,
42569 void *context);
42570 };
42571+typedef struct dm_space_map __no_const dm_space_map_no_const;
42572
42573 /*----------------------------------------------------------------*/
42574
42575diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
42576index aacf6bf..67d63f2 100644
42577--- a/drivers/md/raid1.c
42578+++ b/drivers/md/raid1.c
42579@@ -1824,7 +1824,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
42580 if (r1_sync_page_io(rdev, sect, s,
42581 bio->bi_io_vec[idx].bv_page,
42582 READ) != 0)
42583- atomic_add(s, &rdev->corrected_errors);
42584+ atomic_add_unchecked(s, &rdev->corrected_errors);
42585 }
42586 sectors -= s;
42587 sect += s;
42588@@ -2051,7 +2051,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
42589 test_bit(In_sync, &rdev->flags)) {
42590 if (r1_sync_page_io(rdev, sect, s,
42591 conf->tmppage, READ)) {
42592- atomic_add(s, &rdev->corrected_errors);
42593+ atomic_add_unchecked(s, &rdev->corrected_errors);
42594 printk(KERN_INFO
42595 "md/raid1:%s: read error corrected "
42596 "(%d sectors at %llu on %s)\n",
42597diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
42598index 73dc8a3..bdd515a 100644
42599--- a/drivers/md/raid10.c
42600+++ b/drivers/md/raid10.c
42601@@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bio, int error)
42602 /* The write handler will notice the lack of
42603 * R10BIO_Uptodate and record any errors etc
42604 */
42605- atomic_add(r10_bio->sectors,
42606+ atomic_add_unchecked(r10_bio->sectors,
42607 &conf->mirrors[d].rdev->corrected_errors);
42608
42609 /* for reconstruct, we always reschedule after a read.
42610@@ -2321,7 +2321,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42611 {
42612 struct timespec cur_time_mon;
42613 unsigned long hours_since_last;
42614- unsigned int read_errors = atomic_read(&rdev->read_errors);
42615+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
42616
42617 ktime_get_ts(&cur_time_mon);
42618
42619@@ -2343,9 +2343,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42620 * overflowing the shift of read_errors by hours_since_last.
42621 */
42622 if (hours_since_last >= 8 * sizeof(read_errors))
42623- atomic_set(&rdev->read_errors, 0);
42624+ atomic_set_unchecked(&rdev->read_errors, 0);
42625 else
42626- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
42627+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
42628 }
42629
42630 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
42631@@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42632 return;
42633
42634 check_decay_read_errors(mddev, rdev);
42635- atomic_inc(&rdev->read_errors);
42636- if (atomic_read(&rdev->read_errors) > max_read_errors) {
42637+ atomic_inc_unchecked(&rdev->read_errors);
42638+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
42639 char b[BDEVNAME_SIZE];
42640 bdevname(rdev->bdev, b);
42641
42642@@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42643 "md/raid10:%s: %s: Raid device exceeded "
42644 "read_error threshold [cur %d:max %d]\n",
42645 mdname(mddev), b,
42646- atomic_read(&rdev->read_errors), max_read_errors);
42647+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
42648 printk(KERN_NOTICE
42649 "md/raid10:%s: %s: Failing raid device\n",
42650 mdname(mddev), b);
42651@@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42652 sect +
42653 choose_data_offset(r10_bio, rdev)),
42654 bdevname(rdev->bdev, b));
42655- atomic_add(s, &rdev->corrected_errors);
42656+ atomic_add_unchecked(s, &rdev->corrected_errors);
42657 }
42658
42659 rdev_dec_pending(rdev, mddev);
42660diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
42661index 8a0665d..984c46d 100644
42662--- a/drivers/md/raid5.c
42663+++ b/drivers/md/raid5.c
42664@@ -1887,21 +1887,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
42665 mdname(conf->mddev), STRIPE_SECTORS,
42666 (unsigned long long)s,
42667 bdevname(rdev->bdev, b));
42668- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
42669+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
42670 clear_bit(R5_ReadError, &sh->dev[i].flags);
42671 clear_bit(R5_ReWrite, &sh->dev[i].flags);
42672 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
42673 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
42674
42675- if (atomic_read(&rdev->read_errors))
42676- atomic_set(&rdev->read_errors, 0);
42677+ if (atomic_read_unchecked(&rdev->read_errors))
42678+ atomic_set_unchecked(&rdev->read_errors, 0);
42679 } else {
42680 const char *bdn = bdevname(rdev->bdev, b);
42681 int retry = 0;
42682 int set_bad = 0;
42683
42684 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
42685- atomic_inc(&rdev->read_errors);
42686+ atomic_inc_unchecked(&rdev->read_errors);
42687 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
42688 printk_ratelimited(
42689 KERN_WARNING
42690@@ -1929,7 +1929,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
42691 mdname(conf->mddev),
42692 (unsigned long long)s,
42693 bdn);
42694- } else if (atomic_read(&rdev->read_errors)
42695+ } else if (atomic_read_unchecked(&rdev->read_errors)
42696 > conf->max_nr_stripes)
42697 printk(KERN_WARNING
42698 "md/raid:%s: Too many read errors, failing device %s.\n",
42699diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
42700index 401ef64..836e563 100644
42701--- a/drivers/media/dvb-core/dvbdev.c
42702+++ b/drivers/media/dvb-core/dvbdev.c
42703@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
42704 const struct dvb_device *template, void *priv, int type)
42705 {
42706 struct dvb_device *dvbdev;
42707- struct file_operations *dvbdevfops;
42708+ file_operations_no_const *dvbdevfops;
42709 struct device *clsdev;
42710 int minor;
42711 int id;
42712diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
42713index 9b6c3bb..baeb5c7 100644
42714--- a/drivers/media/dvb-frontends/dib3000.h
42715+++ b/drivers/media/dvb-frontends/dib3000.h
42716@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
42717 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
42718 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
42719 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
42720-};
42721+} __no_const;
42722
42723 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
42724 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
42725diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
42726index ecf21d9..b992428d 100644
42727--- a/drivers/media/pci/cx88/cx88-video.c
42728+++ b/drivers/media/pci/cx88/cx88-video.c
42729@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
42730
42731 /* ------------------------------------------------------------------ */
42732
42733-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42734-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42735-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42736+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42737+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42738+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42739
42740 module_param_array(video_nr, int, NULL, 0444);
42741 module_param_array(vbi_nr, int, NULL, 0444);
42742diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
42743index c08ae3e..eb59af1 100644
42744--- a/drivers/media/pci/ivtv/ivtv-driver.c
42745+++ b/drivers/media/pci/ivtv/ivtv-driver.c
42746@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
42747 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
42748
42749 /* ivtv instance counter */
42750-static atomic_t ivtv_instance = ATOMIC_INIT(0);
42751+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
42752
42753 /* Parameter declarations */
42754 static int cardtype[IVTV_MAX_CARDS];
42755diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
42756index dfd0a21..6bbb465 100644
42757--- a/drivers/media/platform/omap/omap_vout.c
42758+++ b/drivers/media/platform/omap/omap_vout.c
42759@@ -63,7 +63,6 @@ enum omap_vout_channels {
42760 OMAP_VIDEO2,
42761 };
42762
42763-static struct videobuf_queue_ops video_vbq_ops;
42764 /* Variables configurable through module params*/
42765 static u32 video1_numbuffers = 3;
42766 static u32 video2_numbuffers = 3;
42767@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
42768 {
42769 struct videobuf_queue *q;
42770 struct omap_vout_device *vout = NULL;
42771+ static struct videobuf_queue_ops video_vbq_ops = {
42772+ .buf_setup = omap_vout_buffer_setup,
42773+ .buf_prepare = omap_vout_buffer_prepare,
42774+ .buf_release = omap_vout_buffer_release,
42775+ .buf_queue = omap_vout_buffer_queue,
42776+ };
42777
42778 vout = video_drvdata(file);
42779 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
42780@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
42781 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
42782
42783 q = &vout->vbq;
42784- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
42785- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
42786- video_vbq_ops.buf_release = omap_vout_buffer_release;
42787- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
42788 spin_lock_init(&vout->vbq_lock);
42789
42790 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
42791diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
42792index 04e6490..2df65bf 100644
42793--- a/drivers/media/platform/s5p-tv/mixer.h
42794+++ b/drivers/media/platform/s5p-tv/mixer.h
42795@@ -156,7 +156,7 @@ struct mxr_layer {
42796 /** layer index (unique identifier) */
42797 int idx;
42798 /** callbacks for layer methods */
42799- struct mxr_layer_ops ops;
42800+ struct mxr_layer_ops *ops;
42801 /** format array */
42802 const struct mxr_format **fmt_array;
42803 /** size of format array */
42804diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42805index b93a21f..2535195 100644
42806--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42807+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42808@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
42809 {
42810 struct mxr_layer *layer;
42811 int ret;
42812- struct mxr_layer_ops ops = {
42813+ static struct mxr_layer_ops ops = {
42814 .release = mxr_graph_layer_release,
42815 .buffer_set = mxr_graph_buffer_set,
42816 .stream_set = mxr_graph_stream_set,
42817diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
42818index b713403..53cb5ad 100644
42819--- a/drivers/media/platform/s5p-tv/mixer_reg.c
42820+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
42821@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
42822 layer->update_buf = next;
42823 }
42824
42825- layer->ops.buffer_set(layer, layer->update_buf);
42826+ layer->ops->buffer_set(layer, layer->update_buf);
42827
42828 if (done && done != layer->shadow_buf)
42829 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
42830diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
42831index 641b1f0..49cff30 100644
42832--- a/drivers/media/platform/s5p-tv/mixer_video.c
42833+++ b/drivers/media/platform/s5p-tv/mixer_video.c
42834@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
42835 layer->geo.src.height = layer->geo.src.full_height;
42836
42837 mxr_geometry_dump(mdev, &layer->geo);
42838- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42839+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42840 mxr_geometry_dump(mdev, &layer->geo);
42841 }
42842
42843@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
42844 layer->geo.dst.full_width = mbus_fmt.width;
42845 layer->geo.dst.full_height = mbus_fmt.height;
42846 layer->geo.dst.field = mbus_fmt.field;
42847- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42848+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42849
42850 mxr_geometry_dump(mdev, &layer->geo);
42851 }
42852@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
42853 /* set source size to highest accepted value */
42854 geo->src.full_width = max(geo->dst.full_width, pix->width);
42855 geo->src.full_height = max(geo->dst.full_height, pix->height);
42856- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42857+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42858 mxr_geometry_dump(mdev, &layer->geo);
42859 /* set cropping to total visible screen */
42860 geo->src.width = pix->width;
42861@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
42862 geo->src.x_offset = 0;
42863 geo->src.y_offset = 0;
42864 /* assure consistency of geometry */
42865- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42866+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42867 mxr_geometry_dump(mdev, &layer->geo);
42868 /* set full size to lowest possible value */
42869 geo->src.full_width = 0;
42870 geo->src.full_height = 0;
42871- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42872+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42873 mxr_geometry_dump(mdev, &layer->geo);
42874
42875 /* returning results */
42876@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
42877 target->width = s->r.width;
42878 target->height = s->r.height;
42879
42880- layer->ops.fix_geometry(layer, stage, s->flags);
42881+ layer->ops->fix_geometry(layer, stage, s->flags);
42882
42883 /* retrieve update selection rectangle */
42884 res.left = target->x_offset;
42885@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
42886 mxr_output_get(mdev);
42887
42888 mxr_layer_update_output(layer);
42889- layer->ops.format_set(layer);
42890+ layer->ops->format_set(layer);
42891 /* enabling layer in hardware */
42892 spin_lock_irqsave(&layer->enq_slock, flags);
42893 layer->state = MXR_LAYER_STREAMING;
42894 spin_unlock_irqrestore(&layer->enq_slock, flags);
42895
42896- layer->ops.stream_set(layer, MXR_ENABLE);
42897+ layer->ops->stream_set(layer, MXR_ENABLE);
42898 mxr_streamer_get(mdev);
42899
42900 return 0;
42901@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
42902 spin_unlock_irqrestore(&layer->enq_slock, flags);
42903
42904 /* disabling layer in hardware */
42905- layer->ops.stream_set(layer, MXR_DISABLE);
42906+ layer->ops->stream_set(layer, MXR_DISABLE);
42907 /* remove one streamer */
42908 mxr_streamer_put(mdev);
42909 /* allow changes in output configuration */
42910@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
42911
42912 void mxr_layer_release(struct mxr_layer *layer)
42913 {
42914- if (layer->ops.release)
42915- layer->ops.release(layer);
42916+ if (layer->ops->release)
42917+ layer->ops->release(layer);
42918 }
42919
42920 void mxr_base_layer_release(struct mxr_layer *layer)
42921@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
42922
42923 layer->mdev = mdev;
42924 layer->idx = idx;
42925- layer->ops = *ops;
42926+ layer->ops = ops;
42927
42928 spin_lock_init(&layer->enq_slock);
42929 INIT_LIST_HEAD(&layer->enq_list);
42930diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42931index 3d13a63..da31bf1 100644
42932--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42933+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42934@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
42935 {
42936 struct mxr_layer *layer;
42937 int ret;
42938- struct mxr_layer_ops ops = {
42939+ static struct mxr_layer_ops ops = {
42940 .release = mxr_vp_layer_release,
42941 .buffer_set = mxr_vp_buffer_set,
42942 .stream_set = mxr_vp_stream_set,
42943diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
42944index 545c04c..a14bded 100644
42945--- a/drivers/media/radio/radio-cadet.c
42946+++ b/drivers/media/radio/radio-cadet.c
42947@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42948 unsigned char readbuf[RDS_BUFFER];
42949 int i = 0;
42950
42951+ if (count > RDS_BUFFER)
42952+ return -EFAULT;
42953 mutex_lock(&dev->lock);
42954 if (dev->rdsstat == 0)
42955 cadet_start_rds(dev);
42956@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42957 while (i < count && dev->rdsin != dev->rdsout)
42958 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
42959
42960- if (i && copy_to_user(data, readbuf, i))
42961+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
42962 i = -EFAULT;
42963 unlock:
42964 mutex_unlock(&dev->lock);
42965diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
42966index 5236035..c622c74 100644
42967--- a/drivers/media/radio/radio-maxiradio.c
42968+++ b/drivers/media/radio/radio-maxiradio.c
42969@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
42970 /* TEA5757 pin mappings */
42971 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
42972
42973-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
42974+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
42975
42976 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
42977 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
42978diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
42979index b914772..7ddbf9b 100644
42980--- a/drivers/media/radio/radio-shark.c
42981+++ b/drivers/media/radio/radio-shark.c
42982@@ -79,7 +79,7 @@ struct shark_device {
42983 u32 last_val;
42984 };
42985
42986-static atomic_t shark_instance = ATOMIC_INIT(0);
42987+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
42988
42989 static void shark_write_val(struct snd_tea575x *tea, u32 val)
42990 {
42991diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
42992index 9fb6697..f167415 100644
42993--- a/drivers/media/radio/radio-shark2.c
42994+++ b/drivers/media/radio/radio-shark2.c
42995@@ -74,7 +74,7 @@ struct shark_device {
42996 u8 *transfer_buffer;
42997 };
42998
42999-static atomic_t shark_instance = ATOMIC_INIT(0);
43000+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
43001
43002 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
43003 {
43004diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
43005index 9c9084c..a9e8dfb 100644
43006--- a/drivers/media/radio/radio-si476x.c
43007+++ b/drivers/media/radio/radio-si476x.c
43008@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
43009 struct si476x_radio *radio;
43010 struct v4l2_ctrl *ctrl;
43011
43012- static atomic_t instance = ATOMIC_INIT(0);
43013+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
43014
43015 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
43016 if (!radio)
43017diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
43018index 46da365..3ba4206 100644
43019--- a/drivers/media/rc/rc-main.c
43020+++ b/drivers/media/rc/rc-main.c
43021@@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
43022 int rc_register_device(struct rc_dev *dev)
43023 {
43024 static bool raw_init = false; /* raw decoders loaded? */
43025- static atomic_t devno = ATOMIC_INIT(0);
43026+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
43027 struct rc_map *rc_map;
43028 const char *path;
43029 int rc;
43030@@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *dev)
43031 */
43032 mutex_lock(&dev->lock);
43033
43034- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
43035+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
43036 dev_set_name(&dev->dev, "rc%ld", dev->devno);
43037 dev_set_drvdata(&dev->dev, dev);
43038 rc = device_add(&dev->dev);
43039diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
43040index 20e345d..da56fe4 100644
43041--- a/drivers/media/usb/dvb-usb/cxusb.c
43042+++ b/drivers/media/usb/dvb-usb/cxusb.c
43043@@ -1101,7 +1101,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
43044
43045 struct dib0700_adapter_state {
43046 int (*set_param_save) (struct dvb_frontend *);
43047-};
43048+} __no_const;
43049
43050 static int dib7070_set_param_override(struct dvb_frontend *fe)
43051 {
43052diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
43053index 71b22f5..a63b33f 100644
43054--- a/drivers/media/usb/dvb-usb/dw2102.c
43055+++ b/drivers/media/usb/dvb-usb/dw2102.c
43056@@ -121,7 +121,7 @@ struct su3000_state {
43057
43058 struct s6x0_state {
43059 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
43060-};
43061+} __no_const;
43062
43063 /* debug */
43064 static int dvb_usb_dw2102_debug;
43065diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43066index 8f7a6a4..59502dd 100644
43067--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43068+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43069@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
43070 __u32 reserved;
43071 };
43072
43073-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43074+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43075 enum v4l2_memory memory)
43076 {
43077 void __user *up_pln;
43078@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43079 return 0;
43080 }
43081
43082-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43083+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43084 enum v4l2_memory memory)
43085 {
43086 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
43087@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
43088 put_user(kp->start_block, &up->start_block) ||
43089 put_user(kp->blocks, &up->blocks) ||
43090 put_user(tmp, &up->edid) ||
43091- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
43092+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
43093 return -EFAULT;
43094 return 0;
43095 }
43096diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
43097index 02d1b63..5fd6b16 100644
43098--- a/drivers/media/v4l2-core/v4l2-device.c
43099+++ b/drivers/media/v4l2-core/v4l2-device.c
43100@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
43101 EXPORT_SYMBOL_GPL(v4l2_device_put);
43102
43103 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
43104- atomic_t *instance)
43105+ atomic_unchecked_t *instance)
43106 {
43107- int num = atomic_inc_return(instance) - 1;
43108+ int num = atomic_inc_return_unchecked(instance) - 1;
43109 int len = strlen(basename);
43110
43111 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
43112diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
43113index 68e6b5e..d8b923e 100644
43114--- a/drivers/media/v4l2-core/v4l2-ioctl.c
43115+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
43116@@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
43117 struct file *file, void *fh, void *p);
43118 } u;
43119 void (*debug)(const void *arg, bool write_only);
43120-};
43121+} __do_const;
43122+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
43123
43124 /* This control needs a priority check */
43125 #define INFO_FL_PRIO (1 << 0)
43126@@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file *file,
43127 struct video_device *vfd = video_devdata(file);
43128 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
43129 bool write_only = false;
43130- struct v4l2_ioctl_info default_info;
43131+ v4l2_ioctl_info_no_const default_info;
43132 const struct v4l2_ioctl_info *info;
43133 void *fh = file->private_data;
43134 struct v4l2_fh *vfh = NULL;
43135@@ -2194,7 +2195,7 @@ done:
43136 }
43137
43138 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
43139- void * __user *user_ptr, void ***kernel_ptr)
43140+ void __user **user_ptr, void ***kernel_ptr)
43141 {
43142 int ret = 0;
43143
43144@@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
43145 ret = -EINVAL;
43146 break;
43147 }
43148- *user_ptr = (void __user *)buf->m.planes;
43149+ *user_ptr = (void __force_user *)buf->m.planes;
43150 *kernel_ptr = (void *)&buf->m.planes;
43151 *array_size = sizeof(struct v4l2_plane) * buf->length;
43152 ret = 1;
43153@@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
43154 ret = -EINVAL;
43155 break;
43156 }
43157- *user_ptr = (void __user *)ctrls->controls;
43158+ *user_ptr = (void __force_user *)ctrls->controls;
43159 *kernel_ptr = (void *)&ctrls->controls;
43160 *array_size = sizeof(struct v4l2_ext_control)
43161 * ctrls->count;
43162diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
43163index 767ff4d..c69d259 100644
43164--- a/drivers/message/fusion/mptbase.c
43165+++ b/drivers/message/fusion/mptbase.c
43166@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
43167 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
43168 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
43169
43170+#ifdef CONFIG_GRKERNSEC_HIDESYM
43171+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
43172+#else
43173 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
43174 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
43175+#endif
43176+
43177 /*
43178 * Rounding UP to nearest 4-kB boundary here...
43179 */
43180@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
43181 ioc->facts.GlobalCredits);
43182
43183 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
43184+#ifdef CONFIG_GRKERNSEC_HIDESYM
43185+ NULL, NULL);
43186+#else
43187 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
43188+#endif
43189 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
43190 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
43191 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
43192diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
43193index dd239bd..689c4f7 100644
43194--- a/drivers/message/fusion/mptsas.c
43195+++ b/drivers/message/fusion/mptsas.c
43196@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
43197 return 0;
43198 }
43199
43200+static inline void
43201+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
43202+{
43203+ if (phy_info->port_details) {
43204+ phy_info->port_details->rphy = rphy;
43205+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
43206+ ioc->name, rphy));
43207+ }
43208+
43209+ if (rphy) {
43210+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
43211+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
43212+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
43213+ ioc->name, rphy, rphy->dev.release));
43214+ }
43215+}
43216+
43217 /* no mutex */
43218 static void
43219 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
43220@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
43221 return NULL;
43222 }
43223
43224-static inline void
43225-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
43226-{
43227- if (phy_info->port_details) {
43228- phy_info->port_details->rphy = rphy;
43229- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
43230- ioc->name, rphy));
43231- }
43232-
43233- if (rphy) {
43234- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
43235- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
43236- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
43237- ioc->name, rphy, rphy->dev.release));
43238- }
43239-}
43240-
43241 static inline struct sas_port *
43242 mptsas_get_port(struct mptsas_phyinfo *phy_info)
43243 {
43244diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
43245index 727819c..ad74694 100644
43246--- a/drivers/message/fusion/mptscsih.c
43247+++ b/drivers/message/fusion/mptscsih.c
43248@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
43249
43250 h = shost_priv(SChost);
43251
43252- if (h) {
43253- if (h->info_kbuf == NULL)
43254- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
43255- return h->info_kbuf;
43256- h->info_kbuf[0] = '\0';
43257+ if (!h)
43258+ return NULL;
43259
43260- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
43261- h->info_kbuf[size-1] = '\0';
43262- }
43263+ if (h->info_kbuf == NULL)
43264+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
43265+ return h->info_kbuf;
43266+ h->info_kbuf[0] = '\0';
43267+
43268+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
43269+ h->info_kbuf[size-1] = '\0';
43270
43271 return h->info_kbuf;
43272 }
43273diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
43274index b7d87cd..3fb36da 100644
43275--- a/drivers/message/i2o/i2o_proc.c
43276+++ b/drivers/message/i2o/i2o_proc.c
43277@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
43278 "Array Controller Device"
43279 };
43280
43281-static char *chtostr(char *tmp, u8 *chars, int n)
43282-{
43283- tmp[0] = 0;
43284- return strncat(tmp, (char *)chars, n);
43285-}
43286-
43287 static int i2o_report_query_status(struct seq_file *seq, int block_status,
43288 char *group)
43289 {
43290@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
43291 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
43292 {
43293 struct i2o_controller *c = (struct i2o_controller *)seq->private;
43294- static u32 work32[5];
43295- static u8 *work8 = (u8 *) work32;
43296- static u16 *work16 = (u16 *) work32;
43297+ u32 work32[5];
43298+ u8 *work8 = (u8 *) work32;
43299+ u16 *work16 = (u16 *) work32;
43300 int token;
43301 u32 hwcap;
43302
43303@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
43304 } *result;
43305
43306 i2o_exec_execute_ddm_table ddm_table;
43307- char tmp[28 + 1];
43308
43309 result = kmalloc(sizeof(*result), GFP_KERNEL);
43310 if (!result)
43311@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
43312
43313 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
43314 seq_printf(seq, "%-#8x", ddm_table.module_id);
43315- seq_printf(seq, "%-29s",
43316- chtostr(tmp, ddm_table.module_name_version, 28));
43317+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
43318 seq_printf(seq, "%9d ", ddm_table.data_size);
43319 seq_printf(seq, "%8d", ddm_table.code_size);
43320
43321@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
43322
43323 i2o_driver_result_table *result;
43324 i2o_driver_store_table *dst;
43325- char tmp[28 + 1];
43326
43327 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
43328 if (result == NULL)
43329@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
43330
43331 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
43332 seq_printf(seq, "%-#8x", dst->module_id);
43333- seq_printf(seq, "%-29s",
43334- chtostr(tmp, dst->module_name_version, 28));
43335- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
43336+ seq_printf(seq, "%-.28s", dst->module_name_version);
43337+ seq_printf(seq, "%-.8s", dst->date);
43338 seq_printf(seq, "%8d ", dst->module_size);
43339 seq_printf(seq, "%8d ", dst->mpb_size);
43340 seq_printf(seq, "0x%04x", dst->module_flags);
43341@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
43342 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
43343 {
43344 struct i2o_device *d = (struct i2o_device *)seq->private;
43345- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
43346+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
43347 // == (allow) 512d bytes (max)
43348- static u16 *work16 = (u16 *) work32;
43349+ u16 *work16 = (u16 *) work32;
43350 int token;
43351- char tmp[16 + 1];
43352
43353 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
43354
43355@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
43356 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
43357 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
43358 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
43359- seq_printf(seq, "Vendor info : %s\n",
43360- chtostr(tmp, (u8 *) (work32 + 2), 16));
43361- seq_printf(seq, "Product info : %s\n",
43362- chtostr(tmp, (u8 *) (work32 + 6), 16));
43363- seq_printf(seq, "Description : %s\n",
43364- chtostr(tmp, (u8 *) (work32 + 10), 16));
43365- seq_printf(seq, "Product rev. : %s\n",
43366- chtostr(tmp, (u8 *) (work32 + 14), 8));
43367+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
43368+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
43369+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
43370+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
43371
43372 seq_printf(seq, "Serial number : ");
43373 print_serial_number(seq, (u8 *) (work32 + 16),
43374@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
43375 u8 pad[256]; // allow up to 256 byte (max) serial number
43376 } result;
43377
43378- char tmp[24 + 1];
43379-
43380 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
43381
43382 if (token < 0) {
43383@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
43384 }
43385
43386 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
43387- seq_printf(seq, "Module name : %s\n",
43388- chtostr(tmp, result.module_name, 24));
43389- seq_printf(seq, "Module revision : %s\n",
43390- chtostr(tmp, result.module_rev, 8));
43391+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
43392+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
43393
43394 seq_printf(seq, "Serial number : ");
43395 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
43396@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
43397 u8 instance_number[4];
43398 } result;
43399
43400- char tmp[64 + 1];
43401-
43402 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
43403
43404 if (token < 0) {
43405@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
43406 return 0;
43407 }
43408
43409- seq_printf(seq, "Device name : %s\n",
43410- chtostr(tmp, result.device_name, 64));
43411- seq_printf(seq, "Service name : %s\n",
43412- chtostr(tmp, result.service_name, 64));
43413- seq_printf(seq, "Physical name : %s\n",
43414- chtostr(tmp, result.physical_location, 64));
43415- seq_printf(seq, "Instance number : %s\n",
43416- chtostr(tmp, result.instance_number, 4));
43417+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
43418+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
43419+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
43420+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
43421
43422 return 0;
43423 }
43424@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
43425 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
43426 {
43427 struct i2o_device *d = (struct i2o_device *)seq->private;
43428- static u32 work32[12];
43429- static u16 *work16 = (u16 *) work32;
43430- static u8 *work8 = (u8 *) work32;
43431+ u32 work32[12];
43432+ u16 *work16 = (u16 *) work32;
43433+ u8 *work8 = (u8 *) work32;
43434 int token;
43435
43436 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
43437diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
43438index a8c08f3..155fe3d 100644
43439--- a/drivers/message/i2o/iop.c
43440+++ b/drivers/message/i2o/iop.c
43441@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
43442
43443 spin_lock_irqsave(&c->context_list_lock, flags);
43444
43445- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
43446- atomic_inc(&c->context_list_counter);
43447+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
43448+ atomic_inc_unchecked(&c->context_list_counter);
43449
43450- entry->context = atomic_read(&c->context_list_counter);
43451+ entry->context = atomic_read_unchecked(&c->context_list_counter);
43452
43453 list_add(&entry->list, &c->context_list);
43454
43455@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
43456
43457 #if BITS_PER_LONG == 64
43458 spin_lock_init(&c->context_list_lock);
43459- atomic_set(&c->context_list_counter, 0);
43460+ atomic_set_unchecked(&c->context_list_counter, 0);
43461 INIT_LIST_HEAD(&c->context_list);
43462 #endif
43463
43464diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
43465index fcbb2e9..2635e11 100644
43466--- a/drivers/mfd/janz-cmodio.c
43467+++ b/drivers/mfd/janz-cmodio.c
43468@@ -13,6 +13,7 @@
43469
43470 #include <linux/kernel.h>
43471 #include <linux/module.h>
43472+#include <linux/slab.h>
43473 #include <linux/init.h>
43474 #include <linux/pci.h>
43475 #include <linux/interrupt.h>
43476diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
43477index de7fb80..7c1b931 100644
43478--- a/drivers/mfd/max8925-i2c.c
43479+++ b/drivers/mfd/max8925-i2c.c
43480@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
43481 const struct i2c_device_id *id)
43482 {
43483 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
43484- static struct max8925_chip *chip;
43485+ struct max8925_chip *chip;
43486 struct device_node *node = client->dev.of_node;
43487
43488 if (node && !pdata) {
43489diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
43490index d792772..cd73ba3 100644
43491--- a/drivers/mfd/tps65910.c
43492+++ b/drivers/mfd/tps65910.c
43493@@ -229,7 +229,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
43494 struct tps65910_platform_data *pdata)
43495 {
43496 int ret = 0;
43497- static struct regmap_irq_chip *tps6591x_irqs_chip;
43498+ struct regmap_irq_chip *tps6591x_irqs_chip;
43499
43500 if (!irq) {
43501 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
43502diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
43503index 9aa6d1e..1631bfc 100644
43504--- a/drivers/mfd/twl4030-irq.c
43505+++ b/drivers/mfd/twl4030-irq.c
43506@@ -35,6 +35,7 @@
43507 #include <linux/of.h>
43508 #include <linux/irqdomain.h>
43509 #include <linux/i2c/twl.h>
43510+#include <asm/pgtable.h>
43511
43512 #include "twl-core.h"
43513
43514@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
43515 * Install an irq handler for each of the SIH modules;
43516 * clone dummy irq_chip since PIH can't *do* anything
43517 */
43518- twl4030_irq_chip = dummy_irq_chip;
43519- twl4030_irq_chip.name = "twl4030";
43520+ pax_open_kernel();
43521+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
43522+ *(const char **)&twl4030_irq_chip.name = "twl4030";
43523
43524- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
43525+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
43526+ pax_close_kernel();
43527
43528 for (i = irq_base; i < irq_end; i++) {
43529 irq_set_chip_and_handler(i, &twl4030_irq_chip,
43530diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
43531index 464419b..64bae8d 100644
43532--- a/drivers/misc/c2port/core.c
43533+++ b/drivers/misc/c2port/core.c
43534@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
43535 goto error_idr_alloc;
43536 c2dev->id = ret;
43537
43538- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
43539+ pax_open_kernel();
43540+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
43541+ pax_close_kernel();
43542
43543 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
43544 "c2port%d", c2dev->id);
43545diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
43546index 36f5d52..32311c3 100644
43547--- a/drivers/misc/kgdbts.c
43548+++ b/drivers/misc/kgdbts.c
43549@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
43550 char before[BREAK_INSTR_SIZE];
43551 char after[BREAK_INSTR_SIZE];
43552
43553- probe_kernel_read(before, (char *)kgdbts_break_test,
43554+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
43555 BREAK_INSTR_SIZE);
43556 init_simple_test();
43557 ts.tst = plant_and_detach_test;
43558@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
43559 /* Activate test with initial breakpoint */
43560 if (!is_early)
43561 kgdb_breakpoint();
43562- probe_kernel_read(after, (char *)kgdbts_break_test,
43563+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
43564 BREAK_INSTR_SIZE);
43565 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
43566 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
43567diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
43568index 036effe..b3a6336 100644
43569--- a/drivers/misc/lis3lv02d/lis3lv02d.c
43570+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
43571@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
43572 * the lid is closed. This leads to interrupts as soon as a little move
43573 * is done.
43574 */
43575- atomic_inc(&lis3->count);
43576+ atomic_inc_unchecked(&lis3->count);
43577
43578 wake_up_interruptible(&lis3->misc_wait);
43579 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
43580@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
43581 if (lis3->pm_dev)
43582 pm_runtime_get_sync(lis3->pm_dev);
43583
43584- atomic_set(&lis3->count, 0);
43585+ atomic_set_unchecked(&lis3->count, 0);
43586 return 0;
43587 }
43588
43589@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
43590 add_wait_queue(&lis3->misc_wait, &wait);
43591 while (true) {
43592 set_current_state(TASK_INTERRUPTIBLE);
43593- data = atomic_xchg(&lis3->count, 0);
43594+ data = atomic_xchg_unchecked(&lis3->count, 0);
43595 if (data)
43596 break;
43597
43598@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
43599 struct lis3lv02d, miscdev);
43600
43601 poll_wait(file, &lis3->misc_wait, wait);
43602- if (atomic_read(&lis3->count))
43603+ if (atomic_read_unchecked(&lis3->count))
43604 return POLLIN | POLLRDNORM;
43605 return 0;
43606 }
43607diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
43608index c439c82..1f20f57 100644
43609--- a/drivers/misc/lis3lv02d/lis3lv02d.h
43610+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
43611@@ -297,7 +297,7 @@ struct lis3lv02d {
43612 struct input_polled_dev *idev; /* input device */
43613 struct platform_device *pdev; /* platform device */
43614 struct regulator_bulk_data regulators[2];
43615- atomic_t count; /* interrupt count after last read */
43616+ atomic_unchecked_t count; /* interrupt count after last read */
43617 union axis_conversion ac; /* hw -> logical axis */
43618 int mapped_btns[3];
43619
43620diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
43621index 2f30bad..c4c13d0 100644
43622--- a/drivers/misc/sgi-gru/gruhandles.c
43623+++ b/drivers/misc/sgi-gru/gruhandles.c
43624@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
43625 unsigned long nsec;
43626
43627 nsec = CLKS2NSEC(clks);
43628- atomic_long_inc(&mcs_op_statistics[op].count);
43629- atomic_long_add(nsec, &mcs_op_statistics[op].total);
43630+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
43631+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
43632 if (mcs_op_statistics[op].max < nsec)
43633 mcs_op_statistics[op].max = nsec;
43634 }
43635diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
43636index 4f76359..cdfcb2e 100644
43637--- a/drivers/misc/sgi-gru/gruprocfs.c
43638+++ b/drivers/misc/sgi-gru/gruprocfs.c
43639@@ -32,9 +32,9 @@
43640
43641 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
43642
43643-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
43644+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
43645 {
43646- unsigned long val = atomic_long_read(v);
43647+ unsigned long val = atomic_long_read_unchecked(v);
43648
43649 seq_printf(s, "%16lu %s\n", val, id);
43650 }
43651@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
43652
43653 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
43654 for (op = 0; op < mcsop_last; op++) {
43655- count = atomic_long_read(&mcs_op_statistics[op].count);
43656- total = atomic_long_read(&mcs_op_statistics[op].total);
43657+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
43658+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
43659 max = mcs_op_statistics[op].max;
43660 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
43661 count ? total / count : 0, max);
43662diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
43663index 5c3ce24..4915ccb 100644
43664--- a/drivers/misc/sgi-gru/grutables.h
43665+++ b/drivers/misc/sgi-gru/grutables.h
43666@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
43667 * GRU statistics.
43668 */
43669 struct gru_stats_s {
43670- atomic_long_t vdata_alloc;
43671- atomic_long_t vdata_free;
43672- atomic_long_t gts_alloc;
43673- atomic_long_t gts_free;
43674- atomic_long_t gms_alloc;
43675- atomic_long_t gms_free;
43676- atomic_long_t gts_double_allocate;
43677- atomic_long_t assign_context;
43678- atomic_long_t assign_context_failed;
43679- atomic_long_t free_context;
43680- atomic_long_t load_user_context;
43681- atomic_long_t load_kernel_context;
43682- atomic_long_t lock_kernel_context;
43683- atomic_long_t unlock_kernel_context;
43684- atomic_long_t steal_user_context;
43685- atomic_long_t steal_kernel_context;
43686- atomic_long_t steal_context_failed;
43687- atomic_long_t nopfn;
43688- atomic_long_t asid_new;
43689- atomic_long_t asid_next;
43690- atomic_long_t asid_wrap;
43691- atomic_long_t asid_reuse;
43692- atomic_long_t intr;
43693- atomic_long_t intr_cbr;
43694- atomic_long_t intr_tfh;
43695- atomic_long_t intr_spurious;
43696- atomic_long_t intr_mm_lock_failed;
43697- atomic_long_t call_os;
43698- atomic_long_t call_os_wait_queue;
43699- atomic_long_t user_flush_tlb;
43700- atomic_long_t user_unload_context;
43701- atomic_long_t user_exception;
43702- atomic_long_t set_context_option;
43703- atomic_long_t check_context_retarget_intr;
43704- atomic_long_t check_context_unload;
43705- atomic_long_t tlb_dropin;
43706- atomic_long_t tlb_preload_page;
43707- atomic_long_t tlb_dropin_fail_no_asid;
43708- atomic_long_t tlb_dropin_fail_upm;
43709- atomic_long_t tlb_dropin_fail_invalid;
43710- atomic_long_t tlb_dropin_fail_range_active;
43711- atomic_long_t tlb_dropin_fail_idle;
43712- atomic_long_t tlb_dropin_fail_fmm;
43713- atomic_long_t tlb_dropin_fail_no_exception;
43714- atomic_long_t tfh_stale_on_fault;
43715- atomic_long_t mmu_invalidate_range;
43716- atomic_long_t mmu_invalidate_page;
43717- atomic_long_t flush_tlb;
43718- atomic_long_t flush_tlb_gru;
43719- atomic_long_t flush_tlb_gru_tgh;
43720- atomic_long_t flush_tlb_gru_zero_asid;
43721+ atomic_long_unchecked_t vdata_alloc;
43722+ atomic_long_unchecked_t vdata_free;
43723+ atomic_long_unchecked_t gts_alloc;
43724+ atomic_long_unchecked_t gts_free;
43725+ atomic_long_unchecked_t gms_alloc;
43726+ atomic_long_unchecked_t gms_free;
43727+ atomic_long_unchecked_t gts_double_allocate;
43728+ atomic_long_unchecked_t assign_context;
43729+ atomic_long_unchecked_t assign_context_failed;
43730+ atomic_long_unchecked_t free_context;
43731+ atomic_long_unchecked_t load_user_context;
43732+ atomic_long_unchecked_t load_kernel_context;
43733+ atomic_long_unchecked_t lock_kernel_context;
43734+ atomic_long_unchecked_t unlock_kernel_context;
43735+ atomic_long_unchecked_t steal_user_context;
43736+ atomic_long_unchecked_t steal_kernel_context;
43737+ atomic_long_unchecked_t steal_context_failed;
43738+ atomic_long_unchecked_t nopfn;
43739+ atomic_long_unchecked_t asid_new;
43740+ atomic_long_unchecked_t asid_next;
43741+ atomic_long_unchecked_t asid_wrap;
43742+ atomic_long_unchecked_t asid_reuse;
43743+ atomic_long_unchecked_t intr;
43744+ atomic_long_unchecked_t intr_cbr;
43745+ atomic_long_unchecked_t intr_tfh;
43746+ atomic_long_unchecked_t intr_spurious;
43747+ atomic_long_unchecked_t intr_mm_lock_failed;
43748+ atomic_long_unchecked_t call_os;
43749+ atomic_long_unchecked_t call_os_wait_queue;
43750+ atomic_long_unchecked_t user_flush_tlb;
43751+ atomic_long_unchecked_t user_unload_context;
43752+ atomic_long_unchecked_t user_exception;
43753+ atomic_long_unchecked_t set_context_option;
43754+ atomic_long_unchecked_t check_context_retarget_intr;
43755+ atomic_long_unchecked_t check_context_unload;
43756+ atomic_long_unchecked_t tlb_dropin;
43757+ atomic_long_unchecked_t tlb_preload_page;
43758+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
43759+ atomic_long_unchecked_t tlb_dropin_fail_upm;
43760+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
43761+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
43762+ atomic_long_unchecked_t tlb_dropin_fail_idle;
43763+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
43764+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
43765+ atomic_long_unchecked_t tfh_stale_on_fault;
43766+ atomic_long_unchecked_t mmu_invalidate_range;
43767+ atomic_long_unchecked_t mmu_invalidate_page;
43768+ atomic_long_unchecked_t flush_tlb;
43769+ atomic_long_unchecked_t flush_tlb_gru;
43770+ atomic_long_unchecked_t flush_tlb_gru_tgh;
43771+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
43772
43773- atomic_long_t copy_gpa;
43774- atomic_long_t read_gpa;
43775+ atomic_long_unchecked_t copy_gpa;
43776+ atomic_long_unchecked_t read_gpa;
43777
43778- atomic_long_t mesq_receive;
43779- atomic_long_t mesq_receive_none;
43780- atomic_long_t mesq_send;
43781- atomic_long_t mesq_send_failed;
43782- atomic_long_t mesq_noop;
43783- atomic_long_t mesq_send_unexpected_error;
43784- atomic_long_t mesq_send_lb_overflow;
43785- atomic_long_t mesq_send_qlimit_reached;
43786- atomic_long_t mesq_send_amo_nacked;
43787- atomic_long_t mesq_send_put_nacked;
43788- atomic_long_t mesq_page_overflow;
43789- atomic_long_t mesq_qf_locked;
43790- atomic_long_t mesq_qf_noop_not_full;
43791- atomic_long_t mesq_qf_switch_head_failed;
43792- atomic_long_t mesq_qf_unexpected_error;
43793- atomic_long_t mesq_noop_unexpected_error;
43794- atomic_long_t mesq_noop_lb_overflow;
43795- atomic_long_t mesq_noop_qlimit_reached;
43796- atomic_long_t mesq_noop_amo_nacked;
43797- atomic_long_t mesq_noop_put_nacked;
43798- atomic_long_t mesq_noop_page_overflow;
43799+ atomic_long_unchecked_t mesq_receive;
43800+ atomic_long_unchecked_t mesq_receive_none;
43801+ atomic_long_unchecked_t mesq_send;
43802+ atomic_long_unchecked_t mesq_send_failed;
43803+ atomic_long_unchecked_t mesq_noop;
43804+ atomic_long_unchecked_t mesq_send_unexpected_error;
43805+ atomic_long_unchecked_t mesq_send_lb_overflow;
43806+ atomic_long_unchecked_t mesq_send_qlimit_reached;
43807+ atomic_long_unchecked_t mesq_send_amo_nacked;
43808+ atomic_long_unchecked_t mesq_send_put_nacked;
43809+ atomic_long_unchecked_t mesq_page_overflow;
43810+ atomic_long_unchecked_t mesq_qf_locked;
43811+ atomic_long_unchecked_t mesq_qf_noop_not_full;
43812+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
43813+ atomic_long_unchecked_t mesq_qf_unexpected_error;
43814+ atomic_long_unchecked_t mesq_noop_unexpected_error;
43815+ atomic_long_unchecked_t mesq_noop_lb_overflow;
43816+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
43817+ atomic_long_unchecked_t mesq_noop_amo_nacked;
43818+ atomic_long_unchecked_t mesq_noop_put_nacked;
43819+ atomic_long_unchecked_t mesq_noop_page_overflow;
43820
43821 };
43822
43823@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
43824 tghop_invalidate, mcsop_last};
43825
43826 struct mcs_op_statistic {
43827- atomic_long_t count;
43828- atomic_long_t total;
43829+ atomic_long_unchecked_t count;
43830+ atomic_long_unchecked_t total;
43831 unsigned long max;
43832 };
43833
43834@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
43835
43836 #define STAT(id) do { \
43837 if (gru_options & OPT_STATS) \
43838- atomic_long_inc(&gru_stats.id); \
43839+ atomic_long_inc_unchecked(&gru_stats.id); \
43840 } while (0)
43841
43842 #ifdef CONFIG_SGI_GRU_DEBUG
43843diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
43844index c862cd4..0d176fe 100644
43845--- a/drivers/misc/sgi-xp/xp.h
43846+++ b/drivers/misc/sgi-xp/xp.h
43847@@ -288,7 +288,7 @@ struct xpc_interface {
43848 xpc_notify_func, void *);
43849 void (*received) (short, int, void *);
43850 enum xp_retval (*partid_to_nasids) (short, void *);
43851-};
43852+} __no_const;
43853
43854 extern struct xpc_interface xpc_interface;
43855
43856diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
43857index b94d5f7..7f494c5 100644
43858--- a/drivers/misc/sgi-xp/xpc.h
43859+++ b/drivers/misc/sgi-xp/xpc.h
43860@@ -835,6 +835,7 @@ struct xpc_arch_operations {
43861 void (*received_payload) (struct xpc_channel *, void *);
43862 void (*notify_senders_of_disconnect) (struct xpc_channel *);
43863 };
43864+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
43865
43866 /* struct xpc_partition act_state values (for XPC HB) */
43867
43868@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
43869 /* found in xpc_main.c */
43870 extern struct device *xpc_part;
43871 extern struct device *xpc_chan;
43872-extern struct xpc_arch_operations xpc_arch_ops;
43873+extern xpc_arch_operations_no_const xpc_arch_ops;
43874 extern int xpc_disengage_timelimit;
43875 extern int xpc_disengage_timedout;
43876 extern int xpc_activate_IRQ_rcvd;
43877diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
43878index 82dc574..8539ab2 100644
43879--- a/drivers/misc/sgi-xp/xpc_main.c
43880+++ b/drivers/misc/sgi-xp/xpc_main.c
43881@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
43882 .notifier_call = xpc_system_die,
43883 };
43884
43885-struct xpc_arch_operations xpc_arch_ops;
43886+xpc_arch_operations_no_const xpc_arch_ops;
43887
43888 /*
43889 * Timer function to enforce the timelimit on the partition disengage.
43890@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
43891
43892 if (((die_args->trapnr == X86_TRAP_MF) ||
43893 (die_args->trapnr == X86_TRAP_XF)) &&
43894- !user_mode_vm(die_args->regs))
43895+ !user_mode(die_args->regs))
43896 xpc_die_deactivate();
43897
43898 break;
43899diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
43900index ef18348..1b53cf0 100644
43901--- a/drivers/mmc/core/mmc_ops.c
43902+++ b/drivers/mmc/core/mmc_ops.c
43903@@ -213,7 +213,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
43904 void *data_buf;
43905 int is_on_stack;
43906
43907- is_on_stack = object_is_on_stack(buf);
43908+ is_on_stack = object_starts_on_stack(buf);
43909 if (is_on_stack) {
43910 /*
43911 * dma onto stack is unsafe/nonportable, but callers to this
43912diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
43913index 81b2994..dce857e 100644
43914--- a/drivers/mmc/host/dw_mmc.h
43915+++ b/drivers/mmc/host/dw_mmc.h
43916@@ -203,5 +203,5 @@ struct dw_mci_drv_data {
43917 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
43918 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
43919 int (*parse_dt)(struct dw_mci *host);
43920-};
43921+} __do_const;
43922 #endif /* _DW_MMC_H_ */
43923diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
43924index c3785ed..1984c44 100644
43925--- a/drivers/mmc/host/mmci.c
43926+++ b/drivers/mmc/host/mmci.c
43927@@ -1482,7 +1482,9 @@ static int mmci_probe(struct amba_device *dev,
43928 }
43929
43930 if (variant->busy_detect) {
43931- mmci_ops.card_busy = mmci_card_busy;
43932+ pax_open_kernel();
43933+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
43934+ pax_close_kernel();
43935 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
43936 }
43937
43938diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
43939index 6debda9..2ba7427 100644
43940--- a/drivers/mmc/host/sdhci-s3c.c
43941+++ b/drivers/mmc/host/sdhci-s3c.c
43942@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
43943 * we can use overriding functions instead of default.
43944 */
43945 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
43946- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43947- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43948- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43949+ pax_open_kernel();
43950+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43951+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43952+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43953+ pax_close_kernel();
43954 }
43955
43956 /* It supports additional host capabilities if needed */
43957diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
43958index 096993f..f02c23b 100644
43959--- a/drivers/mtd/chips/cfi_cmdset_0020.c
43960+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
43961@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
43962 size_t totlen = 0, thislen;
43963 int ret = 0;
43964 size_t buflen = 0;
43965- static char *buffer;
43966+ char *buffer;
43967
43968 if (!ECCBUF_SIZE) {
43969 /* We should fall back to a general writev implementation.
43970diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
43971index 2ed2bb3..2d0b82e 100644
43972--- a/drivers/mtd/nand/denali.c
43973+++ b/drivers/mtd/nand/denali.c
43974@@ -24,6 +24,7 @@
43975 #include <linux/slab.h>
43976 #include <linux/mtd/mtd.h>
43977 #include <linux/module.h>
43978+#include <linux/slab.h>
43979
43980 #include "denali.h"
43981
43982diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
43983index 51b9d6a..52af9a7 100644
43984--- a/drivers/mtd/nftlmount.c
43985+++ b/drivers/mtd/nftlmount.c
43986@@ -24,6 +24,7 @@
43987 #include <asm/errno.h>
43988 #include <linux/delay.h>
43989 #include <linux/slab.h>
43990+#include <linux/sched.h>
43991 #include <linux/mtd/mtd.h>
43992 #include <linux/mtd/nand.h>
43993 #include <linux/mtd/nftl.h>
43994diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
43995index 4b8e895..6b3c498 100644
43996--- a/drivers/mtd/sm_ftl.c
43997+++ b/drivers/mtd/sm_ftl.c
43998@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
43999 #define SM_CIS_VENDOR_OFFSET 0x59
44000 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
44001 {
44002- struct attribute_group *attr_group;
44003+ attribute_group_no_const *attr_group;
44004 struct attribute **attributes;
44005 struct sm_sysfs_attribute *vendor_attribute;
44006
44007diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
44008index dd8057d..22aaf36 100644
44009--- a/drivers/net/bonding/bond_main.c
44010+++ b/drivers/net/bonding/bond_main.c
44011@@ -4511,7 +4511,7 @@ static unsigned int bond_get_num_tx_queues(void)
44012 return tx_queues;
44013 }
44014
44015-static struct rtnl_link_ops bond_link_ops __read_mostly = {
44016+static struct rtnl_link_ops bond_link_ops = {
44017 .kind = "bond",
44018 .priv_size = sizeof(struct bonding),
44019 .setup = bond_setup,
44020@@ -4636,8 +4636,8 @@ static void __exit bonding_exit(void)
44021
44022 bond_destroy_debugfs();
44023
44024- rtnl_link_unregister(&bond_link_ops);
44025 unregister_pernet_subsys(&bond_net_ops);
44026+ rtnl_link_unregister(&bond_link_ops);
44027
44028 #ifdef CONFIG_NET_POLL_CONTROLLER
44029 /*
44030diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
44031index f92f001..0b2f9bf 100644
44032--- a/drivers/net/ethernet/8390/ax88796.c
44033+++ b/drivers/net/ethernet/8390/ax88796.c
44034@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
44035 if (ax->plat->reg_offsets)
44036 ei_local->reg_offset = ax->plat->reg_offsets;
44037 else {
44038+ resource_size_t _mem_size = mem_size;
44039+ do_div(_mem_size, 0x18);
44040 ei_local->reg_offset = ax->reg_offsets;
44041 for (ret = 0; ret < 0x18; ret++)
44042- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
44043+ ax->reg_offsets[ret] = _mem_size * ret;
44044 }
44045
44046 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
44047diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
44048index da8fcaa..f4b5d3b 100644
44049--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
44050+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
44051@@ -1138,7 +1138,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
44052 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
44053 {
44054 /* RX_MODE controlling object */
44055- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
44056+ bnx2x_init_rx_mode_obj(bp);
44057
44058 /* multicast configuration controlling object */
44059 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
44060diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
44061index 9fbeee5..5e3e37a 100644
44062--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
44063+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
44064@@ -2590,15 +2590,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
44065 return rc;
44066 }
44067
44068-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
44069- struct bnx2x_rx_mode_obj *o)
44070+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
44071 {
44072 if (CHIP_IS_E1x(bp)) {
44073- o->wait_comp = bnx2x_empty_rx_mode_wait;
44074- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
44075+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
44076+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
44077 } else {
44078- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
44079- o->config_rx_mode = bnx2x_set_rx_mode_e2;
44080+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
44081+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
44082 }
44083 }
44084
44085diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
44086index 658f4e3..15074a6 100644
44087--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
44088+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
44089@@ -1325,8 +1325,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
44090
44091 /********************* RX MODE ****************/
44092
44093-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
44094- struct bnx2x_rx_mode_obj *o);
44095+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
44096
44097 /**
44098 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
44099diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
44100index 7025780..e55a71c 100644
44101--- a/drivers/net/ethernet/broadcom/tg3.h
44102+++ b/drivers/net/ethernet/broadcom/tg3.h
44103@@ -147,6 +147,7 @@
44104 #define CHIPREV_ID_5750_A0 0x4000
44105 #define CHIPREV_ID_5750_A1 0x4001
44106 #define CHIPREV_ID_5750_A3 0x4003
44107+#define CHIPREV_ID_5750_C1 0x4201
44108 #define CHIPREV_ID_5750_C2 0x4202
44109 #define CHIPREV_ID_5752_A0_HW 0x5000
44110 #define CHIPREV_ID_5752_A0 0x6000
44111diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
44112index 8cffcdf..aadf043 100644
44113--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
44114+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
44115@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
44116 */
44117 struct l2t_skb_cb {
44118 arp_failure_handler_func arp_failure_handler;
44119-};
44120+} __no_const;
44121
44122 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
44123
44124diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
44125index c73cabd..cd278b1 100644
44126--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
44127+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
44128@@ -2186,7 +2186,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
44129
44130 int i;
44131 struct adapter *ap = netdev2adap(dev);
44132- static const unsigned int *reg_ranges;
44133+ const unsigned int *reg_ranges;
44134 int arr_size = 0, buf_size = 0;
44135
44136 if (is_t4(ap->chip)) {
44137diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
44138index 263b92c..f05134b 100644
44139--- a/drivers/net/ethernet/dec/tulip/de4x5.c
44140+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
44141@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
44142 for (i=0; i<ETH_ALEN; i++) {
44143 tmp.addr[i] = dev->dev_addr[i];
44144 }
44145- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
44146+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
44147 break;
44148
44149 case DE4X5_SET_HWADDR: /* Set the hardware address */
44150@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
44151 spin_lock_irqsave(&lp->lock, flags);
44152 memcpy(&statbuf, &lp->pktStats, ioc->len);
44153 spin_unlock_irqrestore(&lp->lock, flags);
44154- if (copy_to_user(ioc->data, &statbuf, ioc->len))
44155+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
44156 return -EFAULT;
44157 break;
44158 }
44159diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
44160index 2c38cc4..0323f6e 100644
44161--- a/drivers/net/ethernet/emulex/benet/be_main.c
44162+++ b/drivers/net/ethernet/emulex/benet/be_main.c
44163@@ -470,7 +470,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
44164
44165 if (wrapped)
44166 newacc += 65536;
44167- ACCESS_ONCE(*acc) = newacc;
44168+ ACCESS_ONCE_RW(*acc) = newacc;
44169 }
44170
44171 static void populate_erx_stats(struct be_adapter *adapter,
44172diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
44173index 212f44b..fb69959 100644
44174--- a/drivers/net/ethernet/faraday/ftgmac100.c
44175+++ b/drivers/net/ethernet/faraday/ftgmac100.c
44176@@ -31,6 +31,8 @@
44177 #include <linux/netdevice.h>
44178 #include <linux/phy.h>
44179 #include <linux/platform_device.h>
44180+#include <linux/interrupt.h>
44181+#include <linux/irqreturn.h>
44182 #include <net/ip.h>
44183
44184 #include "ftgmac100.h"
44185diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
44186index 8be5b40..081bc1b 100644
44187--- a/drivers/net/ethernet/faraday/ftmac100.c
44188+++ b/drivers/net/ethernet/faraday/ftmac100.c
44189@@ -31,6 +31,8 @@
44190 #include <linux/module.h>
44191 #include <linux/netdevice.h>
44192 #include <linux/platform_device.h>
44193+#include <linux/interrupt.h>
44194+#include <linux/irqreturn.h>
44195
44196 #include "ftmac100.h"
44197
44198diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
44199index 5184e2a..acb28c3 100644
44200--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
44201+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
44202@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
44203 }
44204
44205 /* update the base incval used to calculate frequency adjustment */
44206- ACCESS_ONCE(adapter->base_incval) = incval;
44207+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
44208 smp_mb();
44209
44210 /* need lock to prevent incorrect read while modifying cyclecounter */
44211diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
44212index fbe5363..266b4e3 100644
44213--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
44214+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
44215@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
44216 struct __vxge_hw_fifo *fifo;
44217 struct vxge_hw_fifo_config *config;
44218 u32 txdl_size, txdl_per_memblock;
44219- struct vxge_hw_mempool_cbs fifo_mp_callback;
44220+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
44221+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
44222+ };
44223+
44224 struct __vxge_hw_virtualpath *vpath;
44225
44226 if ((vp == NULL) || (attr == NULL)) {
44227@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
44228 goto exit;
44229 }
44230
44231- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
44232-
44233 fifo->mempool =
44234 __vxge_hw_mempool_create(vpath->hldev,
44235 fifo->config->memblock_size,
44236diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
44237index f09e787..f3916a8 100644
44238--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
44239+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
44240@@ -2055,7 +2055,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
44241
44242 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
44243 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
44244- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
44245+ pax_open_kernel();
44246+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
44247+ pax_close_kernel();
44248 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
44249 } else {
44250 return -EIO;
44251diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
44252index 0248a4c..9648d96 100644
44253--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
44254+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
44255@@ -191,17 +191,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
44256 case QLCNIC_NON_PRIV_FUNC:
44257 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
44258 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
44259- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
44260+ pax_open_kernel();
44261+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
44262+ pax_close_kernel();
44263 break;
44264 case QLCNIC_PRIV_FUNC:
44265 ahw->op_mode = QLCNIC_PRIV_FUNC;
44266 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
44267- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
44268+ pax_open_kernel();
44269+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
44270+ pax_close_kernel();
44271 break;
44272 case QLCNIC_MGMT_FUNC:
44273 ahw->op_mode = QLCNIC_MGMT_FUNC;
44274 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
44275- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
44276+ pax_open_kernel();
44277+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
44278+ pax_close_kernel();
44279 break;
44280 default:
44281 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
44282diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
44283index 1551360..ed6510f 100644
44284--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
44285+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
44286@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
44287 struct qlcnic_dump_entry *entry;
44288 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
44289 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
44290- static const struct qlcnic_dump_operations *fw_dump_ops;
44291+ const struct qlcnic_dump_operations *fw_dump_ops;
44292 struct device *dev = &adapter->pdev->dev;
44293 struct qlcnic_hardware_context *ahw;
44294 void *temp_buffer;
44295diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
44296index fb3f8dc..9d2ff38 100644
44297--- a/drivers/net/ethernet/realtek/r8169.c
44298+++ b/drivers/net/ethernet/realtek/r8169.c
44299@@ -759,22 +759,22 @@ struct rtl8169_private {
44300 struct mdio_ops {
44301 void (*write)(struct rtl8169_private *, int, int);
44302 int (*read)(struct rtl8169_private *, int);
44303- } mdio_ops;
44304+ } __no_const mdio_ops;
44305
44306 struct pll_power_ops {
44307 void (*down)(struct rtl8169_private *);
44308 void (*up)(struct rtl8169_private *);
44309- } pll_power_ops;
44310+ } __no_const pll_power_ops;
44311
44312 struct jumbo_ops {
44313 void (*enable)(struct rtl8169_private *);
44314 void (*disable)(struct rtl8169_private *);
44315- } jumbo_ops;
44316+ } __no_const jumbo_ops;
44317
44318 struct csi_ops {
44319 void (*write)(struct rtl8169_private *, int, int);
44320 u32 (*read)(struct rtl8169_private *, int);
44321- } csi_ops;
44322+ } __no_const csi_ops;
44323
44324 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
44325 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
44326diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
44327index 03acf57..e1251ff 100644
44328--- a/drivers/net/ethernet/sfc/ptp.c
44329+++ b/drivers/net/ethernet/sfc/ptp.c
44330@@ -539,7 +539,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
44331 ptp->start.dma_addr);
44332
44333 /* Clear flag that signals MC ready */
44334- ACCESS_ONCE(*start) = 0;
44335+ ACCESS_ONCE_RW(*start) = 0;
44336 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
44337 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
44338 EFX_BUG_ON_PARANOID(rc);
44339diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
44340index 50617c5..b13724c 100644
44341--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
44342+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
44343@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
44344
44345 writel(value, ioaddr + MMC_CNTRL);
44346
44347- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
44348- MMC_CNTRL, value);
44349+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
44350+// MMC_CNTRL, value);
44351 }
44352
44353 /* To mask all all interrupts.*/
44354diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
44355index 3169252..5d78c1d 100644
44356--- a/drivers/net/hamradio/hdlcdrv.c
44357+++ b/drivers/net/hamradio/hdlcdrv.c
44358@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44359 case HDLCDRVCTL_CALIBRATE:
44360 if(!capable(CAP_SYS_RAWIO))
44361 return -EPERM;
44362+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
44363+ return -EINVAL;
44364 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
44365 return 0;
44366
44367diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
44368index e6fe0d8..2b7d752 100644
44369--- a/drivers/net/hyperv/hyperv_net.h
44370+++ b/drivers/net/hyperv/hyperv_net.h
44371@@ -101,7 +101,7 @@ struct rndis_device {
44372
44373 enum rndis_device_state state;
44374 bool link_state;
44375- atomic_t new_req_id;
44376+ atomic_unchecked_t new_req_id;
44377
44378 spinlock_t request_lock;
44379 struct list_head req_list;
44380diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
44381index 0775f0a..d4fb316 100644
44382--- a/drivers/net/hyperv/rndis_filter.c
44383+++ b/drivers/net/hyperv/rndis_filter.c
44384@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
44385 * template
44386 */
44387 set = &rndis_msg->msg.set_req;
44388- set->req_id = atomic_inc_return(&dev->new_req_id);
44389+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
44390
44391 /* Add to the request list */
44392 spin_lock_irqsave(&dev->request_lock, flags);
44393@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
44394
44395 /* Setup the rndis set */
44396 halt = &request->request_msg.msg.halt_req;
44397- halt->req_id = atomic_inc_return(&dev->new_req_id);
44398+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
44399
44400 /* Ignore return since this msg is optional. */
44401 rndis_filter_send_request(dev, request);
44402diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
44403index bf0d55e..82bcfbd1 100644
44404--- a/drivers/net/ieee802154/fakehard.c
44405+++ b/drivers/net/ieee802154/fakehard.c
44406@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
44407 phy->transmit_power = 0xbf;
44408
44409 dev->netdev_ops = &fake_ops;
44410- dev->ml_priv = &fake_mlme;
44411+ dev->ml_priv = (void *)&fake_mlme;
44412
44413 priv = netdev_priv(dev);
44414 priv->phy = phy;
44415diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
44416index 9bf46bd..bfdaa84 100644
44417--- a/drivers/net/macvlan.c
44418+++ b/drivers/net/macvlan.c
44419@@ -939,13 +939,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
44420 int macvlan_link_register(struct rtnl_link_ops *ops)
44421 {
44422 /* common fields */
44423- ops->priv_size = sizeof(struct macvlan_dev);
44424- ops->validate = macvlan_validate;
44425- ops->maxtype = IFLA_MACVLAN_MAX;
44426- ops->policy = macvlan_policy;
44427- ops->changelink = macvlan_changelink;
44428- ops->get_size = macvlan_get_size;
44429- ops->fill_info = macvlan_fill_info;
44430+ pax_open_kernel();
44431+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
44432+ *(void **)&ops->validate = macvlan_validate;
44433+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
44434+ *(const void **)&ops->policy = macvlan_policy;
44435+ *(void **)&ops->changelink = macvlan_changelink;
44436+ *(void **)&ops->get_size = macvlan_get_size;
44437+ *(void **)&ops->fill_info = macvlan_fill_info;
44438+ pax_close_kernel();
44439
44440 return rtnl_link_register(ops);
44441 };
44442@@ -1001,7 +1003,7 @@ static int macvlan_device_event(struct notifier_block *unused,
44443 return NOTIFY_DONE;
44444 }
44445
44446-static struct notifier_block macvlan_notifier_block __read_mostly = {
44447+static struct notifier_block macvlan_notifier_block = {
44448 .notifier_call = macvlan_device_event,
44449 };
44450
44451diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
44452index dc76670..e18f39c 100644
44453--- a/drivers/net/macvtap.c
44454+++ b/drivers/net/macvtap.c
44455@@ -1189,7 +1189,7 @@ static int macvtap_device_event(struct notifier_block *unused,
44456 return NOTIFY_DONE;
44457 }
44458
44459-static struct notifier_block macvtap_notifier_block __read_mostly = {
44460+static struct notifier_block macvtap_notifier_block = {
44461 .notifier_call = macvtap_device_event,
44462 };
44463
44464diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
44465index daec9b0..6428fcb 100644
44466--- a/drivers/net/phy/mdio-bitbang.c
44467+++ b/drivers/net/phy/mdio-bitbang.c
44468@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
44469 struct mdiobb_ctrl *ctrl = bus->priv;
44470
44471 module_put(ctrl->ops->owner);
44472+ mdiobus_unregister(bus);
44473 mdiobus_free(bus);
44474 }
44475 EXPORT_SYMBOL(free_mdio_bitbang);
44476diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
44477index 72ff14b..11d442d 100644
44478--- a/drivers/net/ppp/ppp_generic.c
44479+++ b/drivers/net/ppp/ppp_generic.c
44480@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44481 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
44482 struct ppp_stats stats;
44483 struct ppp_comp_stats cstats;
44484- char *vers;
44485
44486 switch (cmd) {
44487 case SIOCGPPPSTATS:
44488@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44489 break;
44490
44491 case SIOCGPPPVER:
44492- vers = PPP_VERSION;
44493- if (copy_to_user(addr, vers, strlen(vers) + 1))
44494+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
44495 break;
44496 err = 0;
44497 break;
44498diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
44499index 1252d9c..80e660b 100644
44500--- a/drivers/net/slip/slhc.c
44501+++ b/drivers/net/slip/slhc.c
44502@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
44503 register struct tcphdr *thp;
44504 register struct iphdr *ip;
44505 register struct cstate *cs;
44506- int len, hdrlen;
44507+ long len, hdrlen;
44508 unsigned char *cp = icp;
44509
44510 /* We've got a compressed packet; read the change byte */
44511diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
44512index 6327df2..e6e1ebe 100644
44513--- a/drivers/net/team/team.c
44514+++ b/drivers/net/team/team.c
44515@@ -2873,7 +2873,7 @@ static int team_device_event(struct notifier_block *unused,
44516 return NOTIFY_DONE;
44517 }
44518
44519-static struct notifier_block team_notifier_block __read_mostly = {
44520+static struct notifier_block team_notifier_block = {
44521 .notifier_call = team_device_event,
44522 };
44523
44524diff --git a/drivers/net/tun.c b/drivers/net/tun.c
44525index 782e38b..d076fdc 100644
44526--- a/drivers/net/tun.c
44527+++ b/drivers/net/tun.c
44528@@ -1834,7 +1834,7 @@ unlock:
44529 }
44530
44531 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
44532- unsigned long arg, int ifreq_len)
44533+ unsigned long arg, size_t ifreq_len)
44534 {
44535 struct tun_file *tfile = file->private_data;
44536 struct tun_struct *tun;
44537@@ -1847,6 +1847,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
44538 unsigned int ifindex;
44539 int ret;
44540
44541+ if (ifreq_len > sizeof ifr)
44542+ return -EFAULT;
44543+
44544 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
44545 if (copy_from_user(&ifr, argp, ifreq_len))
44546 return -EFAULT;
44547diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
44548index 86292e6..8d34433 100644
44549--- a/drivers/net/usb/hso.c
44550+++ b/drivers/net/usb/hso.c
44551@@ -71,7 +71,7 @@
44552 #include <asm/byteorder.h>
44553 #include <linux/serial_core.h>
44554 #include <linux/serial.h>
44555-
44556+#include <asm/local.h>
44557
44558 #define MOD_AUTHOR "Option Wireless"
44559 #define MOD_DESCRIPTION "USB High Speed Option driver"
44560@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
44561 struct urb *urb;
44562
44563 urb = serial->rx_urb[0];
44564- if (serial->port.count > 0) {
44565+ if (atomic_read(&serial->port.count) > 0) {
44566 count = put_rxbuf_data(urb, serial);
44567 if (count == -1)
44568 return;
44569@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
44570 DUMP1(urb->transfer_buffer, urb->actual_length);
44571
44572 /* Anyone listening? */
44573- if (serial->port.count == 0)
44574+ if (atomic_read(&serial->port.count) == 0)
44575 return;
44576
44577 if (status == 0) {
44578@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
44579 tty_port_tty_set(&serial->port, tty);
44580
44581 /* check for port already opened, if not set the termios */
44582- serial->port.count++;
44583- if (serial->port.count == 1) {
44584+ if (atomic_inc_return(&serial->port.count) == 1) {
44585 serial->rx_state = RX_IDLE;
44586 /* Force default termio settings */
44587 _hso_serial_set_termios(tty, NULL);
44588@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
44589 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
44590 if (result) {
44591 hso_stop_serial_device(serial->parent);
44592- serial->port.count--;
44593+ atomic_dec(&serial->port.count);
44594 kref_put(&serial->parent->ref, hso_serial_ref_free);
44595 }
44596 } else {
44597@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
44598
44599 /* reset the rts and dtr */
44600 /* do the actual close */
44601- serial->port.count--;
44602+ atomic_dec(&serial->port.count);
44603
44604- if (serial->port.count <= 0) {
44605- serial->port.count = 0;
44606+ if (atomic_read(&serial->port.count) <= 0) {
44607+ atomic_set(&serial->port.count, 0);
44608 tty_port_tty_set(&serial->port, NULL);
44609 if (!usb_gone)
44610 hso_stop_serial_device(serial->parent);
44611@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
44612
44613 /* the actual setup */
44614 spin_lock_irqsave(&serial->serial_lock, flags);
44615- if (serial->port.count)
44616+ if (atomic_read(&serial->port.count))
44617 _hso_serial_set_termios(tty, old);
44618 else
44619 tty->termios = *old;
44620@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
44621 D1("Pending read interrupt on port %d\n", i);
44622 spin_lock(&serial->serial_lock);
44623 if (serial->rx_state == RX_IDLE &&
44624- serial->port.count > 0) {
44625+ atomic_read(&serial->port.count) > 0) {
44626 /* Setup and send a ctrl req read on
44627 * port i */
44628 if (!serial->rx_urb_filled[0]) {
44629@@ -3062,7 +3061,7 @@ static int hso_resume(struct usb_interface *iface)
44630 /* Start all serial ports */
44631 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
44632 if (serial_table[i] && (serial_table[i]->interface == iface)) {
44633- if (dev2ser(serial_table[i])->port.count) {
44634+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
44635 result =
44636 hso_start_serial_device(serial_table[i], GFP_NOIO);
44637 hso_kick_transmit(dev2ser(serial_table[i]));
44638diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
44639index a79e9d3..78cd4fa 100644
44640--- a/drivers/net/usb/sierra_net.c
44641+++ b/drivers/net/usb/sierra_net.c
44642@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
44643 /* atomic counter partially included in MAC address to make sure 2 devices
44644 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
44645 */
44646-static atomic_t iface_counter = ATOMIC_INIT(0);
44647+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
44648
44649 /*
44650 * SYNC Timer Delay definition used to set the expiry time
44651@@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
44652 dev->net->netdev_ops = &sierra_net_device_ops;
44653
44654 /* change MAC addr to include, ifacenum, and to be unique */
44655- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
44656+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
44657 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
44658
44659 /* we will have to manufacture ethernet headers, prepare template */
44660diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
44661index 2ef5b62..6fa0ec3 100644
44662--- a/drivers/net/vxlan.c
44663+++ b/drivers/net/vxlan.c
44664@@ -2615,7 +2615,7 @@ nla_put_failure:
44665 return -EMSGSIZE;
44666 }
44667
44668-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
44669+static struct rtnl_link_ops vxlan_link_ops = {
44670 .kind = "vxlan",
44671 .maxtype = IFLA_VXLAN_MAX,
44672 .policy = vxlan_policy,
44673diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
44674index 0b60295..b8bfa5b 100644
44675--- a/drivers/net/wimax/i2400m/rx.c
44676+++ b/drivers/net/wimax/i2400m/rx.c
44677@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
44678 if (i2400m->rx_roq == NULL)
44679 goto error_roq_alloc;
44680
44681- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
44682+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
44683 GFP_KERNEL);
44684 if (rd == NULL) {
44685 result = -ENOMEM;
44686diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
44687index 7fe1964..7016de0 100644
44688--- a/drivers/net/wireless/airo.c
44689+++ b/drivers/net/wireless/airo.c
44690@@ -7844,7 +7844,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
44691 struct airo_info *ai = dev->ml_priv;
44692 int ridcode;
44693 int enabled;
44694- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
44695+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
44696 unsigned char *iobuf;
44697
44698 /* Only super-user can write RIDs */
44699diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
44700index 34c8a33..3261fdc 100644
44701--- a/drivers/net/wireless/at76c50x-usb.c
44702+++ b/drivers/net/wireless/at76c50x-usb.c
44703@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
44704 }
44705
44706 /* Convert timeout from the DFU status to jiffies */
44707-static inline unsigned long at76_get_timeout(struct dfu_status *s)
44708+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
44709 {
44710 return msecs_to_jiffies((s->poll_timeout[2] << 16)
44711 | (s->poll_timeout[1] << 8)
44712diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
44713index ef3329e..c28ff5d 100644
44714--- a/drivers/net/wireless/ath/ath10k/htc.c
44715+++ b/drivers/net/wireless/ath/ath10k/htc.c
44716@@ -963,7 +963,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
44717 /* registered target arrival callback from the HIF layer */
44718 int ath10k_htc_init(struct ath10k *ar)
44719 {
44720- struct ath10k_hif_cb htc_callbacks;
44721+ static struct ath10k_hif_cb htc_callbacks = {
44722+ .rx_completion = ath10k_htc_rx_completion_handler,
44723+ .tx_completion = ath10k_htc_tx_completion_handler,
44724+ };
44725 struct ath10k_htc_ep *ep = NULL;
44726 struct ath10k_htc *htc = &ar->htc;
44727
44728@@ -973,8 +976,6 @@ int ath10k_htc_init(struct ath10k *ar)
44729 ath10k_htc_reset_endpoint_states(htc);
44730
44731 /* setup HIF layer callbacks */
44732- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
44733- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
44734 htc->ar = ar;
44735
44736 /* Get HIF default pipe for HTC message exchange */
44737diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
44738index e1dd8c7..9f91b3f 100644
44739--- a/drivers/net/wireless/ath/ath10k/htc.h
44740+++ b/drivers/net/wireless/ath/ath10k/htc.h
44741@@ -271,12 +271,12 @@ enum ath10k_htc_ep_id {
44742
44743 struct ath10k_htc_ops {
44744 void (*target_send_suspend_complete)(struct ath10k *ar);
44745-};
44746+} __no_const;
44747
44748 struct ath10k_htc_ep_ops {
44749 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
44750 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
44751-};
44752+} __no_const;
44753
44754 /* service connection information */
44755 struct ath10k_htc_svc_conn_req {
44756diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44757index 8d78253..bebbb68 100644
44758--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44759+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44760@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44761 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
44762 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
44763
44764- ACCESS_ONCE(ads->ds_link) = i->link;
44765- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
44766+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
44767+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
44768
44769 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
44770 ctl6 = SM(i->keytype, AR_EncrType);
44771@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44772
44773 if ((i->is_first || i->is_last) &&
44774 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
44775- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
44776+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
44777 | set11nTries(i->rates, 1)
44778 | set11nTries(i->rates, 2)
44779 | set11nTries(i->rates, 3)
44780 | (i->dur_update ? AR_DurUpdateEna : 0)
44781 | SM(0, AR_BurstDur);
44782
44783- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
44784+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
44785 | set11nRate(i->rates, 1)
44786 | set11nRate(i->rates, 2)
44787 | set11nRate(i->rates, 3);
44788 } else {
44789- ACCESS_ONCE(ads->ds_ctl2) = 0;
44790- ACCESS_ONCE(ads->ds_ctl3) = 0;
44791+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
44792+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
44793 }
44794
44795 if (!i->is_first) {
44796- ACCESS_ONCE(ads->ds_ctl0) = 0;
44797- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44798- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44799+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
44800+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44801+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44802 return;
44803 }
44804
44805@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44806 break;
44807 }
44808
44809- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44810+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44811 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44812 | SM(i->txpower, AR_XmitPower)
44813 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44814@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44815 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
44816 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
44817
44818- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44819- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44820+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44821+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44822
44823 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
44824 return;
44825
44826- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44827+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44828 | set11nPktDurRTSCTS(i->rates, 1);
44829
44830- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44831+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44832 | set11nPktDurRTSCTS(i->rates, 3);
44833
44834- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44835+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44836 | set11nRateFlags(i->rates, 1)
44837 | set11nRateFlags(i->rates, 2)
44838 | set11nRateFlags(i->rates, 3)
44839diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44840index f6c5c1b..6058354 100644
44841--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44842+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44843@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44844 (i->qcu << AR_TxQcuNum_S) | desc_len;
44845
44846 checksum += val;
44847- ACCESS_ONCE(ads->info) = val;
44848+ ACCESS_ONCE_RW(ads->info) = val;
44849
44850 checksum += i->link;
44851- ACCESS_ONCE(ads->link) = i->link;
44852+ ACCESS_ONCE_RW(ads->link) = i->link;
44853
44854 checksum += i->buf_addr[0];
44855- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
44856+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
44857 checksum += i->buf_addr[1];
44858- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
44859+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
44860 checksum += i->buf_addr[2];
44861- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
44862+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
44863 checksum += i->buf_addr[3];
44864- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
44865+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
44866
44867 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
44868- ACCESS_ONCE(ads->ctl3) = val;
44869+ ACCESS_ONCE_RW(ads->ctl3) = val;
44870 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
44871- ACCESS_ONCE(ads->ctl5) = val;
44872+ ACCESS_ONCE_RW(ads->ctl5) = val;
44873 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
44874- ACCESS_ONCE(ads->ctl7) = val;
44875+ ACCESS_ONCE_RW(ads->ctl7) = val;
44876 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
44877- ACCESS_ONCE(ads->ctl9) = val;
44878+ ACCESS_ONCE_RW(ads->ctl9) = val;
44879
44880 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
44881- ACCESS_ONCE(ads->ctl10) = checksum;
44882+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
44883
44884 if (i->is_first || i->is_last) {
44885- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
44886+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
44887 | set11nTries(i->rates, 1)
44888 | set11nTries(i->rates, 2)
44889 | set11nTries(i->rates, 3)
44890 | (i->dur_update ? AR_DurUpdateEna : 0)
44891 | SM(0, AR_BurstDur);
44892
44893- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
44894+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
44895 | set11nRate(i->rates, 1)
44896 | set11nRate(i->rates, 2)
44897 | set11nRate(i->rates, 3);
44898 } else {
44899- ACCESS_ONCE(ads->ctl13) = 0;
44900- ACCESS_ONCE(ads->ctl14) = 0;
44901+ ACCESS_ONCE_RW(ads->ctl13) = 0;
44902+ ACCESS_ONCE_RW(ads->ctl14) = 0;
44903 }
44904
44905 ads->ctl20 = 0;
44906@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44907
44908 ctl17 = SM(i->keytype, AR_EncrType);
44909 if (!i->is_first) {
44910- ACCESS_ONCE(ads->ctl11) = 0;
44911- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44912- ACCESS_ONCE(ads->ctl15) = 0;
44913- ACCESS_ONCE(ads->ctl16) = 0;
44914- ACCESS_ONCE(ads->ctl17) = ctl17;
44915- ACCESS_ONCE(ads->ctl18) = 0;
44916- ACCESS_ONCE(ads->ctl19) = 0;
44917+ ACCESS_ONCE_RW(ads->ctl11) = 0;
44918+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44919+ ACCESS_ONCE_RW(ads->ctl15) = 0;
44920+ ACCESS_ONCE_RW(ads->ctl16) = 0;
44921+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44922+ ACCESS_ONCE_RW(ads->ctl18) = 0;
44923+ ACCESS_ONCE_RW(ads->ctl19) = 0;
44924 return;
44925 }
44926
44927- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44928+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44929 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44930 | SM(i->txpower, AR_XmitPower)
44931 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44932@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44933 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
44934 ctl12 |= SM(val, AR_PAPRDChainMask);
44935
44936- ACCESS_ONCE(ads->ctl12) = ctl12;
44937- ACCESS_ONCE(ads->ctl17) = ctl17;
44938+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
44939+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44940
44941- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44942+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44943 | set11nPktDurRTSCTS(i->rates, 1);
44944
44945- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44946+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44947 | set11nPktDurRTSCTS(i->rates, 3);
44948
44949- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
44950+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
44951 | set11nRateFlags(i->rates, 1)
44952 | set11nRateFlags(i->rates, 2)
44953 | set11nRateFlags(i->rates, 3)
44954 | SM(i->rtscts_rate, AR_RTSCTSRate);
44955
44956- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
44957+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
44958 }
44959
44960 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
44961diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
44962index 69a907b..91e071c 100644
44963--- a/drivers/net/wireless/ath/ath9k/hw.h
44964+++ b/drivers/net/wireless/ath/ath9k/hw.h
44965@@ -657,7 +657,7 @@ struct ath_hw_private_ops {
44966
44967 /* ANI */
44968 void (*ani_cache_ini_regs)(struct ath_hw *ah);
44969-};
44970+} __no_const;
44971
44972 /**
44973 * struct ath_spec_scan - parameters for Atheros spectral scan
44974@@ -729,7 +729,7 @@ struct ath_hw_ops {
44975 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
44976 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
44977 #endif
44978-};
44979+} __no_const;
44980
44981 struct ath_nf_limits {
44982 s16 max;
44983diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
44984index 92190da..f3a4c4c 100644
44985--- a/drivers/net/wireless/b43/phy_lp.c
44986+++ b/drivers/net/wireless/b43/phy_lp.c
44987@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
44988 {
44989 struct ssb_bus *bus = dev->dev->sdev->bus;
44990
44991- static const struct b206x_channel *chandata = NULL;
44992+ const struct b206x_channel *chandata = NULL;
44993 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
44994 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
44995 u16 old_comm15, scale;
44996diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
44997index 9581d07..84f6a76 100644
44998--- a/drivers/net/wireless/iwlegacy/3945-mac.c
44999+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
45000@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
45001 */
45002 if (il3945_mod_params.disable_hw_scan) {
45003 D_INFO("Disabling hw_scan\n");
45004- il3945_mac_ops.hw_scan = NULL;
45005+ pax_open_kernel();
45006+ *(void **)&il3945_mac_ops.hw_scan = NULL;
45007+ pax_close_kernel();
45008 }
45009
45010 D_INFO("*** LOAD DRIVER ***\n");
45011diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
45012index d94f8ab..5b568c8 100644
45013--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
45014+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
45015@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
45016 {
45017 struct iwl_priv *priv = file->private_data;
45018 char buf[64];
45019- int buf_size;
45020+ size_t buf_size;
45021 u32 offset, len;
45022
45023 memset(buf, 0, sizeof(buf));
45024@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
45025 struct iwl_priv *priv = file->private_data;
45026
45027 char buf[8];
45028- int buf_size;
45029+ size_t buf_size;
45030 u32 reset_flag;
45031
45032 memset(buf, 0, sizeof(buf));
45033@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
45034 {
45035 struct iwl_priv *priv = file->private_data;
45036 char buf[8];
45037- int buf_size;
45038+ size_t buf_size;
45039 int ht40;
45040
45041 memset(buf, 0, sizeof(buf));
45042@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
45043 {
45044 struct iwl_priv *priv = file->private_data;
45045 char buf[8];
45046- int buf_size;
45047+ size_t buf_size;
45048 int value;
45049
45050 memset(buf, 0, sizeof(buf));
45051@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
45052 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
45053 DEBUGFS_READ_FILE_OPS(current_sleep_command);
45054
45055-static const char *fmt_value = " %-30s %10u\n";
45056-static const char *fmt_hex = " %-30s 0x%02X\n";
45057-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
45058-static const char *fmt_header =
45059+static const char fmt_value[] = " %-30s %10u\n";
45060+static const char fmt_hex[] = " %-30s 0x%02X\n";
45061+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
45062+static const char fmt_header[] =
45063 "%-32s current cumulative delta max\n";
45064
45065 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
45066@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
45067 {
45068 struct iwl_priv *priv = file->private_data;
45069 char buf[8];
45070- int buf_size;
45071+ size_t buf_size;
45072 int clear;
45073
45074 memset(buf, 0, sizeof(buf));
45075@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
45076 {
45077 struct iwl_priv *priv = file->private_data;
45078 char buf[8];
45079- int buf_size;
45080+ size_t buf_size;
45081 int trace;
45082
45083 memset(buf, 0, sizeof(buf));
45084@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
45085 {
45086 struct iwl_priv *priv = file->private_data;
45087 char buf[8];
45088- int buf_size;
45089+ size_t buf_size;
45090 int missed;
45091
45092 memset(buf, 0, sizeof(buf));
45093@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
45094
45095 struct iwl_priv *priv = file->private_data;
45096 char buf[8];
45097- int buf_size;
45098+ size_t buf_size;
45099 int plcp;
45100
45101 memset(buf, 0, sizeof(buf));
45102@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
45103
45104 struct iwl_priv *priv = file->private_data;
45105 char buf[8];
45106- int buf_size;
45107+ size_t buf_size;
45108 int flush;
45109
45110 memset(buf, 0, sizeof(buf));
45111@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
45112
45113 struct iwl_priv *priv = file->private_data;
45114 char buf[8];
45115- int buf_size;
45116+ size_t buf_size;
45117 int rts;
45118
45119 if (!priv->cfg->ht_params)
45120@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
45121 {
45122 struct iwl_priv *priv = file->private_data;
45123 char buf[8];
45124- int buf_size;
45125+ size_t buf_size;
45126
45127 memset(buf, 0, sizeof(buf));
45128 buf_size = min(count, sizeof(buf) - 1);
45129@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
45130 struct iwl_priv *priv = file->private_data;
45131 u32 event_log_flag;
45132 char buf[8];
45133- int buf_size;
45134+ size_t buf_size;
45135
45136 /* check that the interface is up */
45137 if (!iwl_is_ready(priv))
45138@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
45139 struct iwl_priv *priv = file->private_data;
45140 char buf[8];
45141 u32 calib_disabled;
45142- int buf_size;
45143+ size_t buf_size;
45144
45145 memset(buf, 0, sizeof(buf));
45146 buf_size = min(count, sizeof(buf) - 1);
45147diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
45148index 7aad766..06addb4 100644
45149--- a/drivers/net/wireless/iwlwifi/dvm/main.c
45150+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
45151@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
45152 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
45153 {
45154 struct iwl_nvm_data *data = priv->nvm_data;
45155- char *debug_msg;
45156+ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
45157
45158 if (data->sku_cap_11n_enable &&
45159 !priv->cfg->ht_params) {
45160@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
45161 return -EINVAL;
45162 }
45163
45164- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
45165 IWL_DEBUG_INFO(priv, debug_msg,
45166 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
45167 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
45168diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
45169index 6bc3100..dd1b80d 100644
45170--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
45171+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
45172@@ -1249,7 +1249,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
45173 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
45174
45175 char buf[8];
45176- int buf_size;
45177+ size_t buf_size;
45178 u32 reset_flag;
45179
45180 memset(buf, 0, sizeof(buf));
45181@@ -1270,7 +1270,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
45182 {
45183 struct iwl_trans *trans = file->private_data;
45184 char buf[8];
45185- int buf_size;
45186+ size_t buf_size;
45187 int csr;
45188
45189 memset(buf, 0, sizeof(buf));
45190diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
45191index 2cd3f54..e936f90 100644
45192--- a/drivers/net/wireless/mac80211_hwsim.c
45193+++ b/drivers/net/wireless/mac80211_hwsim.c
45194@@ -2196,25 +2196,19 @@ static int __init init_mac80211_hwsim(void)
45195
45196 if (channels > 1) {
45197 hwsim_if_comb.num_different_channels = channels;
45198- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
45199- mac80211_hwsim_ops.cancel_hw_scan =
45200- mac80211_hwsim_cancel_hw_scan;
45201- mac80211_hwsim_ops.sw_scan_start = NULL;
45202- mac80211_hwsim_ops.sw_scan_complete = NULL;
45203- mac80211_hwsim_ops.remain_on_channel =
45204- mac80211_hwsim_roc;
45205- mac80211_hwsim_ops.cancel_remain_on_channel =
45206- mac80211_hwsim_croc;
45207- mac80211_hwsim_ops.add_chanctx =
45208- mac80211_hwsim_add_chanctx;
45209- mac80211_hwsim_ops.remove_chanctx =
45210- mac80211_hwsim_remove_chanctx;
45211- mac80211_hwsim_ops.change_chanctx =
45212- mac80211_hwsim_change_chanctx;
45213- mac80211_hwsim_ops.assign_vif_chanctx =
45214- mac80211_hwsim_assign_vif_chanctx;
45215- mac80211_hwsim_ops.unassign_vif_chanctx =
45216- mac80211_hwsim_unassign_vif_chanctx;
45217+ pax_open_kernel();
45218+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
45219+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
45220+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
45221+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
45222+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
45223+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
45224+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
45225+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
45226+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
45227+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
45228+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
45229+ pax_close_kernel();
45230 }
45231
45232 spin_lock_init(&hwsim_radio_lock);
45233diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
45234index 8169a85..7fa3b47 100644
45235--- a/drivers/net/wireless/rndis_wlan.c
45236+++ b/drivers/net/wireless/rndis_wlan.c
45237@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
45238
45239 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
45240
45241- if (rts_threshold < 0 || rts_threshold > 2347)
45242+ if (rts_threshold > 2347)
45243 rts_threshold = 2347;
45244
45245 tmp = cpu_to_le32(rts_threshold);
45246diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
45247index fe4c572..99dedfa 100644
45248--- a/drivers/net/wireless/rt2x00/rt2x00.h
45249+++ b/drivers/net/wireless/rt2x00/rt2x00.h
45250@@ -387,7 +387,7 @@ struct rt2x00_intf {
45251 * for hardware which doesn't support hardware
45252 * sequence counting.
45253 */
45254- atomic_t seqno;
45255+ atomic_unchecked_t seqno;
45256 };
45257
45258 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
45259diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
45260index 66a2db8..70cad04 100644
45261--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
45262+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
45263@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
45264 * sequence counter given by mac80211.
45265 */
45266 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
45267- seqno = atomic_add_return(0x10, &intf->seqno);
45268+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
45269 else
45270- seqno = atomic_read(&intf->seqno);
45271+ seqno = atomic_read_unchecked(&intf->seqno);
45272
45273 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
45274 hdr->seq_ctrl |= cpu_to_le16(seqno);
45275diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
45276index e2b3d9c..67a5184 100644
45277--- a/drivers/net/wireless/ti/wl1251/sdio.c
45278+++ b/drivers/net/wireless/ti/wl1251/sdio.c
45279@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
45280
45281 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
45282
45283- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
45284- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
45285+ pax_open_kernel();
45286+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
45287+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
45288+ pax_close_kernel();
45289
45290 wl1251_info("using dedicated interrupt line");
45291 } else {
45292- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
45293- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
45294+ pax_open_kernel();
45295+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
45296+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
45297+ pax_close_kernel();
45298
45299 wl1251_info("using SDIO interrupt");
45300 }
45301diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
45302index 1c627da..69f7d17 100644
45303--- a/drivers/net/wireless/ti/wl12xx/main.c
45304+++ b/drivers/net/wireless/ti/wl12xx/main.c
45305@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
45306 sizeof(wl->conf.mem));
45307
45308 /* read data preparation is only needed by wl127x */
45309- wl->ops->prepare_read = wl127x_prepare_read;
45310+ pax_open_kernel();
45311+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
45312+ pax_close_kernel();
45313
45314 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
45315 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
45316@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
45317 sizeof(wl->conf.mem));
45318
45319 /* read data preparation is only needed by wl127x */
45320- wl->ops->prepare_read = wl127x_prepare_read;
45321+ pax_open_kernel();
45322+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
45323+ pax_close_kernel();
45324
45325 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
45326 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
45327diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
45328index 7aa0eb8..5a9ef38 100644
45329--- a/drivers/net/wireless/ti/wl18xx/main.c
45330+++ b/drivers/net/wireless/ti/wl18xx/main.c
45331@@ -1730,8 +1730,10 @@ static int wl18xx_setup(struct wl1271 *wl)
45332 }
45333
45334 if (!checksum_param) {
45335- wl18xx_ops.set_rx_csum = NULL;
45336- wl18xx_ops.init_vif = NULL;
45337+ pax_open_kernel();
45338+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
45339+ *(void **)&wl18xx_ops.init_vif = NULL;
45340+ pax_close_kernel();
45341 }
45342
45343 /* Enable 11a Band only if we have 5G antennas */
45344diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
45345index 7ef0b4a..ff65c28 100644
45346--- a/drivers/net/wireless/zd1211rw/zd_usb.c
45347+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
45348@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
45349 {
45350 struct zd_usb *usb = urb->context;
45351 struct zd_usb_interrupt *intr = &usb->intr;
45352- int len;
45353+ unsigned int len;
45354 u16 int_num;
45355
45356 ZD_ASSERT(in_interrupt());
45357diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
45358index 59f95d8..53e0e7f 100644
45359--- a/drivers/nfc/nfcwilink.c
45360+++ b/drivers/nfc/nfcwilink.c
45361@@ -513,7 +513,7 @@ static struct nci_ops nfcwilink_ops = {
45362
45363 static int nfcwilink_probe(struct platform_device *pdev)
45364 {
45365- static struct nfcwilink *drv;
45366+ struct nfcwilink *drv;
45367 int rc;
45368 __u32 protocols;
45369
45370diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
45371index d93b2b6..ae50401 100644
45372--- a/drivers/oprofile/buffer_sync.c
45373+++ b/drivers/oprofile/buffer_sync.c
45374@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
45375 if (cookie == NO_COOKIE)
45376 offset = pc;
45377 if (cookie == INVALID_COOKIE) {
45378- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
45379+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
45380 offset = pc;
45381 }
45382 if (cookie != last_cookie) {
45383@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
45384 /* add userspace sample */
45385
45386 if (!mm) {
45387- atomic_inc(&oprofile_stats.sample_lost_no_mm);
45388+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
45389 return 0;
45390 }
45391
45392 cookie = lookup_dcookie(mm, s->eip, &offset);
45393
45394 if (cookie == INVALID_COOKIE) {
45395- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
45396+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
45397 return 0;
45398 }
45399
45400@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
45401 /* ignore backtraces if failed to add a sample */
45402 if (state == sb_bt_start) {
45403 state = sb_bt_ignore;
45404- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
45405+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
45406 }
45407 }
45408 release_mm(mm);
45409diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
45410index c0cc4e7..44d4e54 100644
45411--- a/drivers/oprofile/event_buffer.c
45412+++ b/drivers/oprofile/event_buffer.c
45413@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
45414 }
45415
45416 if (buffer_pos == buffer_size) {
45417- atomic_inc(&oprofile_stats.event_lost_overflow);
45418+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
45419 return;
45420 }
45421
45422diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
45423index ed2c3ec..deda85a 100644
45424--- a/drivers/oprofile/oprof.c
45425+++ b/drivers/oprofile/oprof.c
45426@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
45427 if (oprofile_ops.switch_events())
45428 return;
45429
45430- atomic_inc(&oprofile_stats.multiplex_counter);
45431+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
45432 start_switch_worker();
45433 }
45434
45435diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
45436index ee2cfce..7f8f699 100644
45437--- a/drivers/oprofile/oprofile_files.c
45438+++ b/drivers/oprofile/oprofile_files.c
45439@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
45440
45441 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
45442
45443-static ssize_t timeout_read(struct file *file, char __user *buf,
45444+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
45445 size_t count, loff_t *offset)
45446 {
45447 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
45448diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
45449index 59659ce..6c860a0 100644
45450--- a/drivers/oprofile/oprofile_stats.c
45451+++ b/drivers/oprofile/oprofile_stats.c
45452@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
45453 cpu_buf->sample_invalid_eip = 0;
45454 }
45455
45456- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
45457- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
45458- atomic_set(&oprofile_stats.event_lost_overflow, 0);
45459- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
45460- atomic_set(&oprofile_stats.multiplex_counter, 0);
45461+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
45462+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
45463+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
45464+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
45465+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
45466 }
45467
45468
45469diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
45470index 1fc622b..8c48fc3 100644
45471--- a/drivers/oprofile/oprofile_stats.h
45472+++ b/drivers/oprofile/oprofile_stats.h
45473@@ -13,11 +13,11 @@
45474 #include <linux/atomic.h>
45475
45476 struct oprofile_stat_struct {
45477- atomic_t sample_lost_no_mm;
45478- atomic_t sample_lost_no_mapping;
45479- atomic_t bt_lost_no_mapping;
45480- atomic_t event_lost_overflow;
45481- atomic_t multiplex_counter;
45482+ atomic_unchecked_t sample_lost_no_mm;
45483+ atomic_unchecked_t sample_lost_no_mapping;
45484+ atomic_unchecked_t bt_lost_no_mapping;
45485+ atomic_unchecked_t event_lost_overflow;
45486+ atomic_unchecked_t multiplex_counter;
45487 };
45488
45489 extern struct oprofile_stat_struct oprofile_stats;
45490diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
45491index 3f49345..c750d0b 100644
45492--- a/drivers/oprofile/oprofilefs.c
45493+++ b/drivers/oprofile/oprofilefs.c
45494@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
45495
45496 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
45497 {
45498- atomic_t *val = file->private_data;
45499- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
45500+ atomic_unchecked_t *val = file->private_data;
45501+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
45502 }
45503
45504
45505@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
45506
45507
45508 int oprofilefs_create_ro_atomic(struct dentry *root,
45509- char const *name, atomic_t *val)
45510+ char const *name, atomic_unchecked_t *val)
45511 {
45512 return __oprofilefs_create_file(root, name,
45513 &atomic_ro_fops, 0444, val);
45514diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
45515index 61be1d9..dec05d7 100644
45516--- a/drivers/oprofile/timer_int.c
45517+++ b/drivers/oprofile/timer_int.c
45518@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
45519 return NOTIFY_OK;
45520 }
45521
45522-static struct notifier_block __refdata oprofile_cpu_notifier = {
45523+static struct notifier_block oprofile_cpu_notifier = {
45524 .notifier_call = oprofile_cpu_notify,
45525 };
45526
45527diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
45528index 92ed045..62d39bd7 100644
45529--- a/drivers/parport/procfs.c
45530+++ b/drivers/parport/procfs.c
45531@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
45532
45533 *ppos += len;
45534
45535- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
45536+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
45537 }
45538
45539 #ifdef CONFIG_PARPORT_1284
45540@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
45541
45542 *ppos += len;
45543
45544- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
45545+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
45546 }
45547 #endif /* IEEE1284.3 support. */
45548
45549diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
45550index 2f5786c..61ab4d1 100644
45551--- a/drivers/pci/hotplug/acpiphp_ibm.c
45552+++ b/drivers/pci/hotplug/acpiphp_ibm.c
45553@@ -463,7 +463,9 @@ static int __init ibm_acpiphp_init(void)
45554 goto init_cleanup;
45555 }
45556
45557- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
45558+ pax_open_kernel();
45559+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
45560+ pax_close_kernel();
45561 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
45562
45563 return retval;
45564diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
45565index a6a71c4..c91097b 100644
45566--- a/drivers/pci/hotplug/cpcihp_generic.c
45567+++ b/drivers/pci/hotplug/cpcihp_generic.c
45568@@ -73,7 +73,6 @@ static u16 port;
45569 static unsigned int enum_bit;
45570 static u8 enum_mask;
45571
45572-static struct cpci_hp_controller_ops generic_hpc_ops;
45573 static struct cpci_hp_controller generic_hpc;
45574
45575 static int __init validate_parameters(void)
45576@@ -139,6 +138,10 @@ static int query_enum(void)
45577 return ((value & enum_mask) == enum_mask);
45578 }
45579
45580+static struct cpci_hp_controller_ops generic_hpc_ops = {
45581+ .query_enum = query_enum,
45582+};
45583+
45584 static int __init cpcihp_generic_init(void)
45585 {
45586 int status;
45587@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
45588 pci_dev_put(dev);
45589
45590 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
45591- generic_hpc_ops.query_enum = query_enum;
45592 generic_hpc.ops = &generic_hpc_ops;
45593
45594 status = cpci_hp_register_controller(&generic_hpc);
45595diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
45596index 449b4bb..257e2e8 100644
45597--- a/drivers/pci/hotplug/cpcihp_zt5550.c
45598+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
45599@@ -59,7 +59,6 @@
45600 /* local variables */
45601 static bool debug;
45602 static bool poll;
45603-static struct cpci_hp_controller_ops zt5550_hpc_ops;
45604 static struct cpci_hp_controller zt5550_hpc;
45605
45606 /* Primary cPCI bus bridge device */
45607@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
45608 return 0;
45609 }
45610
45611+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
45612+ .query_enum = zt5550_hc_query_enum,
45613+};
45614+
45615 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
45616 {
45617 int status;
45618@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
45619 dbg("returned from zt5550_hc_config");
45620
45621 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
45622- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
45623 zt5550_hpc.ops = &zt5550_hpc_ops;
45624 if(!poll) {
45625 zt5550_hpc.irq = hc_dev->irq;
45626 zt5550_hpc.irq_flags = IRQF_SHARED;
45627 zt5550_hpc.dev_id = hc_dev;
45628
45629- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
45630- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
45631- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
45632+ pax_open_kernel();
45633+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
45634+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
45635+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
45636+ pax_open_kernel();
45637 } else {
45638 info("using ENUM# polling mode");
45639 }
45640diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
45641index 76ba8a1..20ca857 100644
45642--- a/drivers/pci/hotplug/cpqphp_nvram.c
45643+++ b/drivers/pci/hotplug/cpqphp_nvram.c
45644@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
45645
45646 void compaq_nvram_init (void __iomem *rom_start)
45647 {
45648+
45649+#ifndef CONFIG_PAX_KERNEXEC
45650 if (rom_start) {
45651 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
45652 }
45653+#endif
45654+
45655 dbg("int15 entry = %p\n", compaq_int15_entry_point);
45656
45657 /* initialize our int15 lock */
45658diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
45659index ec20f74..c1d961e 100644
45660--- a/drivers/pci/hotplug/pci_hotplug_core.c
45661+++ b/drivers/pci/hotplug/pci_hotplug_core.c
45662@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
45663 return -EINVAL;
45664 }
45665
45666- slot->ops->owner = owner;
45667- slot->ops->mod_name = mod_name;
45668+ pax_open_kernel();
45669+ *(struct module **)&slot->ops->owner = owner;
45670+ *(const char **)&slot->ops->mod_name = mod_name;
45671+ pax_close_kernel();
45672
45673 mutex_lock(&pci_hp_mutex);
45674 /*
45675diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
45676index f4a18f5..ff2463c 100644
45677--- a/drivers/pci/hotplug/pciehp_core.c
45678+++ b/drivers/pci/hotplug/pciehp_core.c
45679@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
45680 struct slot *slot = ctrl->slot;
45681 struct hotplug_slot *hotplug = NULL;
45682 struct hotplug_slot_info *info = NULL;
45683- struct hotplug_slot_ops *ops = NULL;
45684+ hotplug_slot_ops_no_const *ops = NULL;
45685 char name[SLOT_NAME_SIZE];
45686 int retval = -ENOMEM;
45687
45688diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
45689index 7128cfd..db7c65b 100644
45690--- a/drivers/pci/pci-sysfs.c
45691+++ b/drivers/pci/pci-sysfs.c
45692@@ -634,6 +634,10 @@ pci_write_config(struct file* filp, struct kobject *kobj,
45693 loff_t init_off = off;
45694 u8 *data = (u8*) buf;
45695
45696+#ifdef CONFIG_GRKERNSEC_KMEM
45697+ return -EPERM;
45698+#endif
45699+
45700 if (off > dev->cfg_size)
45701 return 0;
45702 if (off + count > dev->cfg_size) {
45703@@ -940,6 +944,10 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
45704 resource_size_t start, end;
45705 int i;
45706
45707+#ifdef CONFIG_GRKERNSEC_KMEM
45708+ return -EPERM;
45709+#endif
45710+
45711 for (i = 0; i < PCI_ROM_RESOURCE; i++)
45712 if (res == &pdev->resource[i])
45713 break;
45714@@ -1047,6 +1055,10 @@ pci_write_resource_io(struct file *filp, struct kobject *kobj,
45715 struct bin_attribute *attr, char *buf,
45716 loff_t off, size_t count)
45717 {
45718+#ifdef CONFIG_GRKERNSEC_KMEM
45719+ return -EPERM;
45720+#endif
45721+
45722 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
45723 }
45724
45725@@ -1083,7 +1095,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
45726 {
45727 /* allocate attribute structure, piggyback attribute name */
45728 int name_len = write_combine ? 13 : 10;
45729- struct bin_attribute *res_attr;
45730+ bin_attribute_no_const *res_attr;
45731 int retval;
45732
45733 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
45734@@ -1268,7 +1280,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
45735 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
45736 {
45737 int retval;
45738- struct bin_attribute *attr;
45739+ bin_attribute_no_const *attr;
45740
45741 /* If the device has VPD, try to expose it in sysfs. */
45742 if (dev->vpd) {
45743@@ -1315,7 +1327,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
45744 {
45745 int retval;
45746 int rom_size = 0;
45747- struct bin_attribute *attr;
45748+ bin_attribute_no_const *attr;
45749
45750 if (!sysfs_initialized)
45751 return -EACCES;
45752diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
45753index 8a00c06..18a9715 100644
45754--- a/drivers/pci/pci.h
45755+++ b/drivers/pci/pci.h
45756@@ -95,7 +95,7 @@ struct pci_vpd_ops {
45757 struct pci_vpd {
45758 unsigned int len;
45759 const struct pci_vpd_ops *ops;
45760- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
45761+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
45762 };
45763
45764 int pci_vpd_pci22_init(struct pci_dev *dev);
45765diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
45766index 403a443..034e050 100644
45767--- a/drivers/pci/pcie/aspm.c
45768+++ b/drivers/pci/pcie/aspm.c
45769@@ -27,9 +27,9 @@
45770 #define MODULE_PARAM_PREFIX "pcie_aspm."
45771
45772 /* Note: those are not register definitions */
45773-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
45774-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
45775-#define ASPM_STATE_L1 (4) /* L1 state */
45776+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
45777+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
45778+#define ASPM_STATE_L1 (4U) /* L1 state */
45779 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
45780 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
45781
45782diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
45783index 7ef0f86..17b710f 100644
45784--- a/drivers/pci/probe.c
45785+++ b/drivers/pci/probe.c
45786@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
45787 struct pci_bus_region region, inverted_region;
45788 bool bar_too_big = false, bar_disabled = false;
45789
45790- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
45791+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
45792
45793 /* No printks while decoding is disabled! */
45794 if (!dev->mmio_always_on) {
45795diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
45796index cdc7836..2e0eb94 100644
45797--- a/drivers/pci/proc.c
45798+++ b/drivers/pci/proc.c
45799@@ -117,6 +117,10 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
45800 int size = dev->cfg_size;
45801 int cnt;
45802
45803+#ifdef CONFIG_GRKERNSEC_KMEM
45804+ return -EPERM;
45805+#endif
45806+
45807 if (pos >= size)
45808 return 0;
45809 if (nbytes >= size)
45810@@ -196,6 +200,10 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
45811 #endif /* HAVE_PCI_MMAP */
45812 int ret = 0;
45813
45814+#ifdef CONFIG_GRKERNSEC_KMEM
45815+ return -EPERM;
45816+#endif
45817+
45818 switch (cmd) {
45819 case PCIIOC_CONTROLLER:
45820 ret = pci_domain_nr(dev->bus);
45821@@ -234,6 +242,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
45822 struct pci_filp_private *fpriv = file->private_data;
45823 int i, ret;
45824
45825+#ifdef CONFIG_GRKERNSEC_KMEM
45826+ return -EPERM;
45827+#endif
45828+
45829 if (!capable(CAP_SYS_RAWIO))
45830 return -EPERM;
45831
45832@@ -434,7 +446,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
45833 static int __init pci_proc_init(void)
45834 {
45835 struct pci_dev *dev = NULL;
45836+
45837+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45838+#ifdef CONFIG_GRKERNSEC_PROC_USER
45839+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
45840+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45841+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45842+#endif
45843+#else
45844 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
45845+#endif
45846 proc_create("devices", 0, proc_bus_pci_dir,
45847 &proc_bus_pci_dev_operations);
45848 proc_initialized = 1;
45849diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
45850index e1c1ec5..bef4210 100644
45851--- a/drivers/pci/syscall.c
45852+++ b/drivers/pci/syscall.c
45853@@ -92,6 +92,10 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
45854 u32 dword;
45855 int err = 0;
45856
45857+#ifdef CONFIG_GRKERNSEC_KMEM
45858+ return -EPERM
45859+#endif
45860+
45861 if (!capable(CAP_SYS_ADMIN))
45862 return -EPERM;
45863
45864diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
45865index 19c313b..ed28b38 100644
45866--- a/drivers/platform/x86/asus-wmi.c
45867+++ b/drivers/platform/x86/asus-wmi.c
45868@@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m, void *data)
45869 int err;
45870 u32 retval = -1;
45871
45872+#ifdef CONFIG_GRKERNSEC_KMEM
45873+ return -EPERM;
45874+#endif
45875+
45876 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
45877
45878 if (err < 0)
45879@@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m, void *data)
45880 int err;
45881 u32 retval = -1;
45882
45883+#ifdef CONFIG_GRKERNSEC_KMEM
45884+ return -EPERM;
45885+#endif
45886+
45887 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
45888 &retval);
45889
45890@@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m, void *data)
45891 union acpi_object *obj;
45892 acpi_status status;
45893
45894+#ifdef CONFIG_GRKERNSEC_KMEM
45895+ return -EPERM;
45896+#endif
45897+
45898 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
45899 1, asus->debug.method_id,
45900 &input, &output);
45901diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
45902index 3e5b4497..dcdfb70 100644
45903--- a/drivers/platform/x86/chromeos_laptop.c
45904+++ b/drivers/platform/x86/chromeos_laptop.c
45905@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
45906 return 0;
45907 }
45908
45909-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
45910+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
45911 {
45912 .ident = "Samsung Series 5 550 - Touchpad",
45913 .matches = {
45914diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
45915index 62f8030..c7f2a45 100644
45916--- a/drivers/platform/x86/msi-laptop.c
45917+++ b/drivers/platform/x86/msi-laptop.c
45918@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
45919
45920 if (!quirks->ec_read_only) {
45921 /* allow userland write sysfs file */
45922- dev_attr_bluetooth.store = store_bluetooth;
45923- dev_attr_wlan.store = store_wlan;
45924- dev_attr_threeg.store = store_threeg;
45925- dev_attr_bluetooth.attr.mode |= S_IWUSR;
45926- dev_attr_wlan.attr.mode |= S_IWUSR;
45927- dev_attr_threeg.attr.mode |= S_IWUSR;
45928+ pax_open_kernel();
45929+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
45930+ *(void **)&dev_attr_wlan.store = store_wlan;
45931+ *(void **)&dev_attr_threeg.store = store_threeg;
45932+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
45933+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
45934+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
45935+ pax_close_kernel();
45936 }
45937
45938 /* disable hardware control by fn key */
45939diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
45940index 70222f2..8c8ce66 100644
45941--- a/drivers/platform/x86/msi-wmi.c
45942+++ b/drivers/platform/x86/msi-wmi.c
45943@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
45944 static void msi_wmi_notify(u32 value, void *context)
45945 {
45946 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
45947- static struct key_entry *key;
45948+ struct key_entry *key;
45949 union acpi_object *obj;
45950 acpi_status status;
45951
45952diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
45953index 3484dd2..13ee730 100644
45954--- a/drivers/platform/x86/sony-laptop.c
45955+++ b/drivers/platform/x86/sony-laptop.c
45956@@ -2448,7 +2448,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
45957 }
45958
45959 /* High speed charging function */
45960-static struct device_attribute *hsc_handle;
45961+static device_attribute_no_const *hsc_handle;
45962
45963 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
45964 struct device_attribute *attr,
45965diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
45966index 4e86e97..04d50d1 100644
45967--- a/drivers/platform/x86/thinkpad_acpi.c
45968+++ b/drivers/platform/x86/thinkpad_acpi.c
45969@@ -2091,7 +2091,7 @@ static int hotkey_mask_get(void)
45970 return 0;
45971 }
45972
45973-void static hotkey_mask_warn_incomplete_mask(void)
45974+static void hotkey_mask_warn_incomplete_mask(void)
45975 {
45976 /* log only what the user can fix... */
45977 const u32 wantedmask = hotkey_driver_mask &
45978@@ -2318,11 +2318,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
45979 }
45980 }
45981
45982-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45983- struct tp_nvram_state *newn,
45984- const u32 event_mask)
45985-{
45986-
45987 #define TPACPI_COMPARE_KEY(__scancode, __member) \
45988 do { \
45989 if ((event_mask & (1 << __scancode)) && \
45990@@ -2336,36 +2331,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45991 tpacpi_hotkey_send_key(__scancode); \
45992 } while (0)
45993
45994- void issue_volchange(const unsigned int oldvol,
45995- const unsigned int newvol)
45996- {
45997- unsigned int i = oldvol;
45998+static void issue_volchange(const unsigned int oldvol,
45999+ const unsigned int newvol,
46000+ const u32 event_mask)
46001+{
46002+ unsigned int i = oldvol;
46003
46004- while (i > newvol) {
46005- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
46006- i--;
46007- }
46008- while (i < newvol) {
46009- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
46010- i++;
46011- }
46012+ while (i > newvol) {
46013+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
46014+ i--;
46015 }
46016+ while (i < newvol) {
46017+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
46018+ i++;
46019+ }
46020+}
46021
46022- void issue_brightnesschange(const unsigned int oldbrt,
46023- const unsigned int newbrt)
46024- {
46025- unsigned int i = oldbrt;
46026+static void issue_brightnesschange(const unsigned int oldbrt,
46027+ const unsigned int newbrt,
46028+ const u32 event_mask)
46029+{
46030+ unsigned int i = oldbrt;
46031
46032- while (i > newbrt) {
46033- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
46034- i--;
46035- }
46036- while (i < newbrt) {
46037- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
46038- i++;
46039- }
46040+ while (i > newbrt) {
46041+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
46042+ i--;
46043+ }
46044+ while (i < newbrt) {
46045+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
46046+ i++;
46047 }
46048+}
46049
46050+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46051+ struct tp_nvram_state *newn,
46052+ const u32 event_mask)
46053+{
46054 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
46055 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
46056 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
46057@@ -2399,7 +2400,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46058 oldn->volume_level != newn->volume_level) {
46059 /* recently muted, or repeated mute keypress, or
46060 * multiple presses ending in mute */
46061- issue_volchange(oldn->volume_level, newn->volume_level);
46062+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
46063 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
46064 }
46065 } else {
46066@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46067 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
46068 }
46069 if (oldn->volume_level != newn->volume_level) {
46070- issue_volchange(oldn->volume_level, newn->volume_level);
46071+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
46072 } else if (oldn->volume_toggle != newn->volume_toggle) {
46073 /* repeated vol up/down keypress at end of scale ? */
46074 if (newn->volume_level == 0)
46075@@ -2422,7 +2423,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46076 /* handle brightness */
46077 if (oldn->brightness_level != newn->brightness_level) {
46078 issue_brightnesschange(oldn->brightness_level,
46079- newn->brightness_level);
46080+ newn->brightness_level,
46081+ event_mask);
46082 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
46083 /* repeated key presses that didn't change state */
46084 if (newn->brightness_level == 0)
46085@@ -2431,10 +2433,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46086 && !tp_features.bright_unkfw)
46087 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
46088 }
46089+}
46090
46091 #undef TPACPI_COMPARE_KEY
46092 #undef TPACPI_MAY_SEND_KEY
46093-}
46094
46095 /*
46096 * Polling driver
46097diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
46098index 769d265..a3a05ca 100644
46099--- a/drivers/pnp/pnpbios/bioscalls.c
46100+++ b/drivers/pnp/pnpbios/bioscalls.c
46101@@ -58,7 +58,7 @@ do { \
46102 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
46103 } while(0)
46104
46105-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
46106+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
46107 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
46108
46109 /*
46110@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
46111
46112 cpu = get_cpu();
46113 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
46114+
46115+ pax_open_kernel();
46116 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
46117+ pax_close_kernel();
46118
46119 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
46120 spin_lock_irqsave(&pnp_bios_lock, flags);
46121@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
46122 :"memory");
46123 spin_unlock_irqrestore(&pnp_bios_lock, flags);
46124
46125+ pax_open_kernel();
46126 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
46127+ pax_close_kernel();
46128+
46129 put_cpu();
46130
46131 /* If we get here and this is set then the PnP BIOS faulted on us. */
46132@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
46133 return status;
46134 }
46135
46136-void pnpbios_calls_init(union pnp_bios_install_struct *header)
46137+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
46138 {
46139 int i;
46140
46141@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
46142 pnp_bios_callpoint.offset = header->fields.pm16offset;
46143 pnp_bios_callpoint.segment = PNP_CS16;
46144
46145+ pax_open_kernel();
46146+
46147 for_each_possible_cpu(i) {
46148 struct desc_struct *gdt = get_cpu_gdt_table(i);
46149 if (!gdt)
46150@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
46151 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
46152 (unsigned long)__va(header->fields.pm16dseg));
46153 }
46154+
46155+ pax_close_kernel();
46156 }
46157diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
46158index d95e101..67f0c3f 100644
46159--- a/drivers/pnp/resource.c
46160+++ b/drivers/pnp/resource.c
46161@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
46162 return 1;
46163
46164 /* check if the resource is valid */
46165- if (*irq < 0 || *irq > 15)
46166+ if (*irq > 15)
46167 return 0;
46168
46169 /* check if the resource is reserved */
46170@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
46171 return 1;
46172
46173 /* check if the resource is valid */
46174- if (*dma < 0 || *dma == 4 || *dma > 7)
46175+ if (*dma == 4 || *dma > 7)
46176 return 0;
46177
46178 /* check if the resource is reserved */
46179diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
46180index 0c52e2a..3421ab7 100644
46181--- a/drivers/power/pda_power.c
46182+++ b/drivers/power/pda_power.c
46183@@ -37,7 +37,11 @@ static int polling;
46184
46185 #if IS_ENABLED(CONFIG_USB_PHY)
46186 static struct usb_phy *transceiver;
46187-static struct notifier_block otg_nb;
46188+static int otg_handle_notification(struct notifier_block *nb,
46189+ unsigned long event, void *unused);
46190+static struct notifier_block otg_nb = {
46191+ .notifier_call = otg_handle_notification
46192+};
46193 #endif
46194
46195 static struct regulator *ac_draw;
46196@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
46197
46198 #if IS_ENABLED(CONFIG_USB_PHY)
46199 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
46200- otg_nb.notifier_call = otg_handle_notification;
46201 ret = usb_register_notifier(transceiver, &otg_nb);
46202 if (ret) {
46203 dev_err(dev, "failure to register otg notifier\n");
46204diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
46205index cc439fd..8fa30df 100644
46206--- a/drivers/power/power_supply.h
46207+++ b/drivers/power/power_supply.h
46208@@ -16,12 +16,12 @@ struct power_supply;
46209
46210 #ifdef CONFIG_SYSFS
46211
46212-extern void power_supply_init_attrs(struct device_type *dev_type);
46213+extern void power_supply_init_attrs(void);
46214 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
46215
46216 #else
46217
46218-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
46219+static inline void power_supply_init_attrs(void) {}
46220 #define power_supply_uevent NULL
46221
46222 #endif /* CONFIG_SYSFS */
46223diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
46224index 00e6672..2642c08 100644
46225--- a/drivers/power/power_supply_core.c
46226+++ b/drivers/power/power_supply_core.c
46227@@ -24,7 +24,10 @@
46228 struct class *power_supply_class;
46229 EXPORT_SYMBOL_GPL(power_supply_class);
46230
46231-static struct device_type power_supply_dev_type;
46232+extern const struct attribute_group *power_supply_attr_groups[];
46233+static struct device_type power_supply_dev_type = {
46234+ .groups = power_supply_attr_groups,
46235+};
46236
46237 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
46238 struct power_supply *supply)
46239@@ -584,7 +587,7 @@ static int __init power_supply_class_init(void)
46240 return PTR_ERR(power_supply_class);
46241
46242 power_supply_class->dev_uevent = power_supply_uevent;
46243- power_supply_init_attrs(&power_supply_dev_type);
46244+ power_supply_init_attrs();
46245
46246 return 0;
46247 }
46248diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
46249index 44420d1..967126e 100644
46250--- a/drivers/power/power_supply_sysfs.c
46251+++ b/drivers/power/power_supply_sysfs.c
46252@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
46253 .is_visible = power_supply_attr_is_visible,
46254 };
46255
46256-static const struct attribute_group *power_supply_attr_groups[] = {
46257+const struct attribute_group *power_supply_attr_groups[] = {
46258 &power_supply_attr_group,
46259 NULL,
46260 };
46261
46262-void power_supply_init_attrs(struct device_type *dev_type)
46263+void power_supply_init_attrs(void)
46264 {
46265 int i;
46266
46267- dev_type->groups = power_supply_attr_groups;
46268-
46269 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
46270 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
46271 }
46272diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
46273index a01b8b3..37c2afe 100644
46274--- a/drivers/regulator/core.c
46275+++ b/drivers/regulator/core.c
46276@@ -3307,7 +3307,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
46277 {
46278 const struct regulation_constraints *constraints = NULL;
46279 const struct regulator_init_data *init_data;
46280- static atomic_t regulator_no = ATOMIC_INIT(0);
46281+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
46282 struct regulator_dev *rdev;
46283 struct device *dev;
46284 int ret, i;
46285@@ -3377,7 +3377,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
46286 rdev->dev.of_node = config->of_node;
46287 rdev->dev.parent = dev;
46288 dev_set_name(&rdev->dev, "regulator.%d",
46289- atomic_inc_return(&regulator_no) - 1);
46290+ atomic_inc_return_unchecked(&regulator_no) - 1);
46291 ret = device_register(&rdev->dev);
46292 if (ret != 0) {
46293 put_device(&rdev->dev);
46294diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
46295index 144bcac..d20e7db 100644
46296--- a/drivers/regulator/max8660.c
46297+++ b/drivers/regulator/max8660.c
46298@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
46299 max8660->shadow_regs[MAX8660_OVER1] = 5;
46300 } else {
46301 /* Otherwise devices can be toggled via software */
46302- max8660_dcdc_ops.enable = max8660_dcdc_enable;
46303- max8660_dcdc_ops.disable = max8660_dcdc_disable;
46304+ pax_open_kernel();
46305+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
46306+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
46307+ pax_close_kernel();
46308 }
46309
46310 /*
46311diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
46312index 5b77ab7..a62f061 100644
46313--- a/drivers/regulator/max8973-regulator.c
46314+++ b/drivers/regulator/max8973-regulator.c
46315@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
46316 if (!pdata || !pdata->enable_ext_control) {
46317 max->desc.enable_reg = MAX8973_VOUT;
46318 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
46319- max->ops.enable = regulator_enable_regmap;
46320- max->ops.disable = regulator_disable_regmap;
46321- max->ops.is_enabled = regulator_is_enabled_regmap;
46322+ pax_open_kernel();
46323+ *(void **)&max->ops.enable = regulator_enable_regmap;
46324+ *(void **)&max->ops.disable = regulator_disable_regmap;
46325+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
46326+ pax_close_kernel();
46327 }
46328
46329 if (pdata) {
46330diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
46331index 1037e07..e64dea1 100644
46332--- a/drivers/regulator/mc13892-regulator.c
46333+++ b/drivers/regulator/mc13892-regulator.c
46334@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
46335 }
46336 mc13xxx_unlock(mc13892);
46337
46338- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
46339+ pax_open_kernel();
46340+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
46341 = mc13892_vcam_set_mode;
46342- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
46343+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
46344 = mc13892_vcam_get_mode;
46345+ pax_close_kernel();
46346
46347 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
46348 ARRAY_SIZE(mc13892_regulators));
46349diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
46350index 24e733c..bfbaa3e 100644
46351--- a/drivers/rtc/rtc-cmos.c
46352+++ b/drivers/rtc/rtc-cmos.c
46353@@ -731,7 +731,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
46354 hpet_rtc_timer_init();
46355
46356 /* export at least the first block of NVRAM */
46357- nvram.size = address_space - NVRAM_OFFSET;
46358+ pax_open_kernel();
46359+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
46360+ pax_close_kernel();
46361 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
46362 if (retval < 0) {
46363 dev_dbg(dev, "can't create nvram file? %d\n", retval);
46364diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
46365index d049393..bb20be0 100644
46366--- a/drivers/rtc/rtc-dev.c
46367+++ b/drivers/rtc/rtc-dev.c
46368@@ -16,6 +16,7 @@
46369 #include <linux/module.h>
46370 #include <linux/rtc.h>
46371 #include <linux/sched.h>
46372+#include <linux/grsecurity.h>
46373 #include "rtc-core.h"
46374
46375 static dev_t rtc_devt;
46376@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
46377 if (copy_from_user(&tm, uarg, sizeof(tm)))
46378 return -EFAULT;
46379
46380+ gr_log_timechange();
46381+
46382 return rtc_set_time(rtc, &tm);
46383
46384 case RTC_PIE_ON:
46385diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
46386index ca18fd1..055e42d 100644
46387--- a/drivers/rtc/rtc-ds1307.c
46388+++ b/drivers/rtc/rtc-ds1307.c
46389@@ -107,7 +107,7 @@ struct ds1307 {
46390 u8 offset; /* register's offset */
46391 u8 regs[11];
46392 u16 nvram_offset;
46393- struct bin_attribute *nvram;
46394+ bin_attribute_no_const *nvram;
46395 enum ds_type type;
46396 unsigned long flags;
46397 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
46398diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
46399index fcb0329..d77b7f2 100644
46400--- a/drivers/rtc/rtc-m48t59.c
46401+++ b/drivers/rtc/rtc-m48t59.c
46402@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
46403 if (IS_ERR(m48t59->rtc))
46404 return PTR_ERR(m48t59->rtc);
46405
46406- m48t59_nvram_attr.size = pdata->offset;
46407+ pax_open_kernel();
46408+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
46409+ pax_close_kernel();
46410
46411 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
46412 if (ret)
46413diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
46414index 14b5f8d..cc9bd26 100644
46415--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
46416+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
46417@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
46418 for (bit = 0; bit < 8; bit++) {
46419
46420 if ((pci_status[i] & (0x1 << bit)) != 0) {
46421- static const char *s;
46422+ const char *s;
46423
46424 s = pci_status_strings[bit];
46425 if (i == 7/*TARG*/ && bit == 3)
46426@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
46427
46428 for (bit = 0; bit < 8; bit++) {
46429
46430- if ((split_status[i] & (0x1 << bit)) != 0) {
46431- static const char *s;
46432-
46433- s = split_status_strings[bit];
46434- printk(s, ahd_name(ahd),
46435+ if ((split_status[i] & (0x1 << bit)) != 0)
46436+ printk(split_status_strings[bit], ahd_name(ahd),
46437 split_status_source[i]);
46438- }
46439
46440 if (i > 1)
46441 continue;
46442
46443- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
46444- static const char *s;
46445-
46446- s = split_status_strings[bit];
46447- printk(s, ahd_name(ahd), "SG");
46448- }
46449+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
46450+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
46451 }
46452 }
46453 /*
46454diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
46455index e693af6..2e525b6 100644
46456--- a/drivers/scsi/bfa/bfa_fcpim.h
46457+++ b/drivers/scsi/bfa/bfa_fcpim.h
46458@@ -36,7 +36,7 @@ struct bfa_iotag_s {
46459
46460 struct bfa_itn_s {
46461 bfa_isr_func_t isr;
46462-};
46463+} __no_const;
46464
46465 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
46466 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
46467diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
46468index 90814fe..4384138 100644
46469--- a/drivers/scsi/bfa/bfa_ioc.h
46470+++ b/drivers/scsi/bfa/bfa_ioc.h
46471@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
46472 bfa_ioc_disable_cbfn_t disable_cbfn;
46473 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
46474 bfa_ioc_reset_cbfn_t reset_cbfn;
46475-};
46476+} __no_const;
46477
46478 /*
46479 * IOC event notification mechanism.
46480@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
46481 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
46482 enum bfi_ioc_state fwstate);
46483 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
46484-};
46485+} __no_const;
46486
46487 /*
46488 * Queue element to wait for room in request queue. FIFO order is
46489diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
46490index c9382d6..6619864 100644
46491--- a/drivers/scsi/fcoe/fcoe_sysfs.c
46492+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
46493@@ -33,8 +33,8 @@
46494 */
46495 #include "libfcoe.h"
46496
46497-static atomic_t ctlr_num;
46498-static atomic_t fcf_num;
46499+static atomic_unchecked_t ctlr_num;
46500+static atomic_unchecked_t fcf_num;
46501
46502 /*
46503 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
46504@@ -681,7 +681,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
46505 if (!ctlr)
46506 goto out;
46507
46508- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
46509+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
46510 ctlr->f = f;
46511 ctlr->mode = FIP_CONN_TYPE_FABRIC;
46512 INIT_LIST_HEAD(&ctlr->fcfs);
46513@@ -898,7 +898,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
46514 fcf->dev.parent = &ctlr->dev;
46515 fcf->dev.bus = &fcoe_bus_type;
46516 fcf->dev.type = &fcoe_fcf_device_type;
46517- fcf->id = atomic_inc_return(&fcf_num) - 1;
46518+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
46519 fcf->state = FCOE_FCF_STATE_UNKNOWN;
46520
46521 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
46522@@ -934,8 +934,8 @@ int __init fcoe_sysfs_setup(void)
46523 {
46524 int error;
46525
46526- atomic_set(&ctlr_num, 0);
46527- atomic_set(&fcf_num, 0);
46528+ atomic_set_unchecked(&ctlr_num, 0);
46529+ atomic_set_unchecked(&fcf_num, 0);
46530
46531 error = bus_register(&fcoe_bus_type);
46532 if (error)
46533diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
46534index 3cafe0d..f1e87f8 100644
46535--- a/drivers/scsi/hosts.c
46536+++ b/drivers/scsi/hosts.c
46537@@ -42,7 +42,7 @@
46538 #include "scsi_logging.h"
46539
46540
46541-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
46542+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
46543
46544
46545 static void scsi_host_cls_release(struct device *dev)
46546@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
46547 * subtract one because we increment first then return, but we need to
46548 * know what the next host number was before increment
46549 */
46550- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
46551+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
46552 shost->dma_channel = 0xff;
46553
46554 /* These three are default values which can be overridden */
46555diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
46556index 0eb0940..3ca9b79 100644
46557--- a/drivers/scsi/hpsa.c
46558+++ b/drivers/scsi/hpsa.c
46559@@ -579,7 +579,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
46560 unsigned long flags;
46561
46562 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
46563- return h->access.command_completed(h, q);
46564+ return h->access->command_completed(h, q);
46565
46566 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
46567 a = rq->head[rq->current_entry];
46568@@ -3445,7 +3445,7 @@ static void start_io(struct ctlr_info *h)
46569 while (!list_empty(&h->reqQ)) {
46570 c = list_entry(h->reqQ.next, struct CommandList, list);
46571 /* can't do anything if fifo is full */
46572- if ((h->access.fifo_full(h))) {
46573+ if ((h->access->fifo_full(h))) {
46574 dev_warn(&h->pdev->dev, "fifo full\n");
46575 break;
46576 }
46577@@ -3467,7 +3467,7 @@ static void start_io(struct ctlr_info *h)
46578
46579 /* Tell the controller execute command */
46580 spin_unlock_irqrestore(&h->lock, flags);
46581- h->access.submit_command(h, c);
46582+ h->access->submit_command(h, c);
46583 spin_lock_irqsave(&h->lock, flags);
46584 }
46585 spin_unlock_irqrestore(&h->lock, flags);
46586@@ -3475,17 +3475,17 @@ static void start_io(struct ctlr_info *h)
46587
46588 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
46589 {
46590- return h->access.command_completed(h, q);
46591+ return h->access->command_completed(h, q);
46592 }
46593
46594 static inline bool interrupt_pending(struct ctlr_info *h)
46595 {
46596- return h->access.intr_pending(h);
46597+ return h->access->intr_pending(h);
46598 }
46599
46600 static inline long interrupt_not_for_us(struct ctlr_info *h)
46601 {
46602- return (h->access.intr_pending(h) == 0) ||
46603+ return (h->access->intr_pending(h) == 0) ||
46604 (h->interrupts_enabled == 0);
46605 }
46606
46607@@ -4387,7 +4387,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
46608 if (prod_index < 0)
46609 return -ENODEV;
46610 h->product_name = products[prod_index].product_name;
46611- h->access = *(products[prod_index].access);
46612+ h->access = products[prod_index].access;
46613
46614 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
46615 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
46616@@ -4669,7 +4669,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
46617
46618 assert_spin_locked(&lockup_detector_lock);
46619 remove_ctlr_from_lockup_detector_list(h);
46620- h->access.set_intr_mask(h, HPSA_INTR_OFF);
46621+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
46622 spin_lock_irqsave(&h->lock, flags);
46623 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
46624 spin_unlock_irqrestore(&h->lock, flags);
46625@@ -4846,7 +4846,7 @@ reinit_after_soft_reset:
46626 }
46627
46628 /* make sure the board interrupts are off */
46629- h->access.set_intr_mask(h, HPSA_INTR_OFF);
46630+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
46631
46632 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
46633 goto clean2;
46634@@ -4880,7 +4880,7 @@ reinit_after_soft_reset:
46635 * fake ones to scoop up any residual completions.
46636 */
46637 spin_lock_irqsave(&h->lock, flags);
46638- h->access.set_intr_mask(h, HPSA_INTR_OFF);
46639+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
46640 spin_unlock_irqrestore(&h->lock, flags);
46641 free_irqs(h);
46642 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
46643@@ -4899,9 +4899,9 @@ reinit_after_soft_reset:
46644 dev_info(&h->pdev->dev, "Board READY.\n");
46645 dev_info(&h->pdev->dev,
46646 "Waiting for stale completions to drain.\n");
46647- h->access.set_intr_mask(h, HPSA_INTR_ON);
46648+ h->access->set_intr_mask(h, HPSA_INTR_ON);
46649 msleep(10000);
46650- h->access.set_intr_mask(h, HPSA_INTR_OFF);
46651+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
46652
46653 rc = controller_reset_failed(h->cfgtable);
46654 if (rc)
46655@@ -4922,7 +4922,7 @@ reinit_after_soft_reset:
46656 }
46657
46658 /* Turn the interrupts on so we can service requests */
46659- h->access.set_intr_mask(h, HPSA_INTR_ON);
46660+ h->access->set_intr_mask(h, HPSA_INTR_ON);
46661
46662 hpsa_hba_inquiry(h);
46663 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
46664@@ -4977,7 +4977,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
46665 * To write all data in the battery backed cache to disks
46666 */
46667 hpsa_flush_cache(h);
46668- h->access.set_intr_mask(h, HPSA_INTR_OFF);
46669+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
46670 hpsa_free_irqs_and_disable_msix(h);
46671 }
46672
46673@@ -5145,7 +5145,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
46674 return;
46675 }
46676 /* Change the access methods to the performant access methods */
46677- h->access = SA5_performant_access;
46678+ h->access = &SA5_performant_access;
46679 h->transMethod = CFGTBL_Trans_Performant;
46680 }
46681
46682diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
46683index bc85e72..ae04a39 100644
46684--- a/drivers/scsi/hpsa.h
46685+++ b/drivers/scsi/hpsa.h
46686@@ -79,7 +79,7 @@ struct ctlr_info {
46687 unsigned int msix_vector;
46688 unsigned int msi_vector;
46689 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
46690- struct access_method access;
46691+ struct access_method *access;
46692
46693 /* queue and queue Info */
46694 struct list_head reqQ;
46695diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
46696index 5879929..32b241d 100644
46697--- a/drivers/scsi/libfc/fc_exch.c
46698+++ b/drivers/scsi/libfc/fc_exch.c
46699@@ -100,12 +100,12 @@ struct fc_exch_mgr {
46700 u16 pool_max_index;
46701
46702 struct {
46703- atomic_t no_free_exch;
46704- atomic_t no_free_exch_xid;
46705- atomic_t xid_not_found;
46706- atomic_t xid_busy;
46707- atomic_t seq_not_found;
46708- atomic_t non_bls_resp;
46709+ atomic_unchecked_t no_free_exch;
46710+ atomic_unchecked_t no_free_exch_xid;
46711+ atomic_unchecked_t xid_not_found;
46712+ atomic_unchecked_t xid_busy;
46713+ atomic_unchecked_t seq_not_found;
46714+ atomic_unchecked_t non_bls_resp;
46715 } stats;
46716 };
46717
46718@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
46719 /* allocate memory for exchange */
46720 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
46721 if (!ep) {
46722- atomic_inc(&mp->stats.no_free_exch);
46723+ atomic_inc_unchecked(&mp->stats.no_free_exch);
46724 goto out;
46725 }
46726 memset(ep, 0, sizeof(*ep));
46727@@ -797,7 +797,7 @@ out:
46728 return ep;
46729 err:
46730 spin_unlock_bh(&pool->lock);
46731- atomic_inc(&mp->stats.no_free_exch_xid);
46732+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
46733 mempool_free(ep, mp->ep_pool);
46734 return NULL;
46735 }
46736@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46737 xid = ntohs(fh->fh_ox_id); /* we originated exch */
46738 ep = fc_exch_find(mp, xid);
46739 if (!ep) {
46740- atomic_inc(&mp->stats.xid_not_found);
46741+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46742 reject = FC_RJT_OX_ID;
46743 goto out;
46744 }
46745@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46746 ep = fc_exch_find(mp, xid);
46747 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
46748 if (ep) {
46749- atomic_inc(&mp->stats.xid_busy);
46750+ atomic_inc_unchecked(&mp->stats.xid_busy);
46751 reject = FC_RJT_RX_ID;
46752 goto rel;
46753 }
46754@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46755 }
46756 xid = ep->xid; /* get our XID */
46757 } else if (!ep) {
46758- atomic_inc(&mp->stats.xid_not_found);
46759+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46760 reject = FC_RJT_RX_ID; /* XID not found */
46761 goto out;
46762 }
46763@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46764 } else {
46765 sp = &ep->seq;
46766 if (sp->id != fh->fh_seq_id) {
46767- atomic_inc(&mp->stats.seq_not_found);
46768+ atomic_inc_unchecked(&mp->stats.seq_not_found);
46769 if (f_ctl & FC_FC_END_SEQ) {
46770 /*
46771 * Update sequence_id based on incoming last
46772@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
46773
46774 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
46775 if (!ep) {
46776- atomic_inc(&mp->stats.xid_not_found);
46777+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46778 goto out;
46779 }
46780 if (ep->esb_stat & ESB_ST_COMPLETE) {
46781- atomic_inc(&mp->stats.xid_not_found);
46782+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46783 goto rel;
46784 }
46785 if (ep->rxid == FC_XID_UNKNOWN)
46786 ep->rxid = ntohs(fh->fh_rx_id);
46787 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
46788- atomic_inc(&mp->stats.xid_not_found);
46789+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46790 goto rel;
46791 }
46792 if (ep->did != ntoh24(fh->fh_s_id) &&
46793 ep->did != FC_FID_FLOGI) {
46794- atomic_inc(&mp->stats.xid_not_found);
46795+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46796 goto rel;
46797 }
46798 sof = fr_sof(fp);
46799@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
46800 sp->ssb_stat |= SSB_ST_RESP;
46801 sp->id = fh->fh_seq_id;
46802 } else if (sp->id != fh->fh_seq_id) {
46803- atomic_inc(&mp->stats.seq_not_found);
46804+ atomic_inc_unchecked(&mp->stats.seq_not_found);
46805 goto rel;
46806 }
46807
46808@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
46809 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
46810
46811 if (!sp)
46812- atomic_inc(&mp->stats.xid_not_found);
46813+ atomic_inc_unchecked(&mp->stats.xid_not_found);
46814 else
46815- atomic_inc(&mp->stats.non_bls_resp);
46816+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
46817
46818 fc_frame_free(fp);
46819 }
46820@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
46821
46822 list_for_each_entry(ema, &lport->ema_list, ema_list) {
46823 mp = ema->mp;
46824- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
46825+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
46826 st->fc_no_free_exch_xid +=
46827- atomic_read(&mp->stats.no_free_exch_xid);
46828- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
46829- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
46830- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
46831- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
46832+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
46833+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
46834+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
46835+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
46836+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
46837 }
46838 }
46839 EXPORT_SYMBOL(fc_exch_update_stats);
46840diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
46841index d289583..b745eec 100644
46842--- a/drivers/scsi/libsas/sas_ata.c
46843+++ b/drivers/scsi/libsas/sas_ata.c
46844@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
46845 .postreset = ata_std_postreset,
46846 .error_handler = ata_std_error_handler,
46847 .post_internal_cmd = sas_ata_post_internal,
46848- .qc_defer = ata_std_qc_defer,
46849+ .qc_defer = ata_std_qc_defer,
46850 .qc_prep = ata_noop_qc_prep,
46851 .qc_issue = sas_ata_qc_issue,
46852 .qc_fill_rtf = sas_ata_qc_fill_rtf,
46853diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
46854index 4e1b75c..0bbdfa9 100644
46855--- a/drivers/scsi/lpfc/lpfc.h
46856+++ b/drivers/scsi/lpfc/lpfc.h
46857@@ -432,7 +432,7 @@ struct lpfc_vport {
46858 struct dentry *debug_nodelist;
46859 struct dentry *vport_debugfs_root;
46860 struct lpfc_debugfs_trc *disc_trc;
46861- atomic_t disc_trc_cnt;
46862+ atomic_unchecked_t disc_trc_cnt;
46863 #endif
46864 uint8_t stat_data_enabled;
46865 uint8_t stat_data_blocked;
46866@@ -865,8 +865,8 @@ struct lpfc_hba {
46867 struct timer_list fabric_block_timer;
46868 unsigned long bit_flags;
46869 #define FABRIC_COMANDS_BLOCKED 0
46870- atomic_t num_rsrc_err;
46871- atomic_t num_cmd_success;
46872+ atomic_unchecked_t num_rsrc_err;
46873+ atomic_unchecked_t num_cmd_success;
46874 unsigned long last_rsrc_error_time;
46875 unsigned long last_ramp_down_time;
46876 unsigned long last_ramp_up_time;
46877@@ -902,7 +902,7 @@ struct lpfc_hba {
46878
46879 struct dentry *debug_slow_ring_trc;
46880 struct lpfc_debugfs_trc *slow_ring_trc;
46881- atomic_t slow_ring_trc_cnt;
46882+ atomic_unchecked_t slow_ring_trc_cnt;
46883 /* iDiag debugfs sub-directory */
46884 struct dentry *idiag_root;
46885 struct dentry *idiag_pci_cfg;
46886diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
46887index 60084e6..0e2e700 100644
46888--- a/drivers/scsi/lpfc/lpfc_debugfs.c
46889+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
46890@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
46891
46892 #include <linux/debugfs.h>
46893
46894-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
46895+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
46896 static unsigned long lpfc_debugfs_start_time = 0L;
46897
46898 /* iDiag */
46899@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
46900 lpfc_debugfs_enable = 0;
46901
46902 len = 0;
46903- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
46904+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
46905 (lpfc_debugfs_max_disc_trc - 1);
46906 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
46907 dtp = vport->disc_trc + i;
46908@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
46909 lpfc_debugfs_enable = 0;
46910
46911 len = 0;
46912- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
46913+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
46914 (lpfc_debugfs_max_slow_ring_trc - 1);
46915 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
46916 dtp = phba->slow_ring_trc + i;
46917@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
46918 !vport || !vport->disc_trc)
46919 return;
46920
46921- index = atomic_inc_return(&vport->disc_trc_cnt) &
46922+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
46923 (lpfc_debugfs_max_disc_trc - 1);
46924 dtp = vport->disc_trc + index;
46925 dtp->fmt = fmt;
46926 dtp->data1 = data1;
46927 dtp->data2 = data2;
46928 dtp->data3 = data3;
46929- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
46930+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
46931 dtp->jif = jiffies;
46932 #endif
46933 return;
46934@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
46935 !phba || !phba->slow_ring_trc)
46936 return;
46937
46938- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
46939+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
46940 (lpfc_debugfs_max_slow_ring_trc - 1);
46941 dtp = phba->slow_ring_trc + index;
46942 dtp->fmt = fmt;
46943 dtp->data1 = data1;
46944 dtp->data2 = data2;
46945 dtp->data3 = data3;
46946- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
46947+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
46948 dtp->jif = jiffies;
46949 #endif
46950 return;
46951@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
46952 "slow_ring buffer\n");
46953 goto debug_failed;
46954 }
46955- atomic_set(&phba->slow_ring_trc_cnt, 0);
46956+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
46957 memset(phba->slow_ring_trc, 0,
46958 (sizeof(struct lpfc_debugfs_trc) *
46959 lpfc_debugfs_max_slow_ring_trc));
46960@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
46961 "buffer\n");
46962 goto debug_failed;
46963 }
46964- atomic_set(&vport->disc_trc_cnt, 0);
46965+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
46966
46967 snprintf(name, sizeof(name), "discovery_trace");
46968 vport->debug_disc_trc =
46969diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
46970index 647f5bf..d0068b9 100644
46971--- a/drivers/scsi/lpfc/lpfc_init.c
46972+++ b/drivers/scsi/lpfc/lpfc_init.c
46973@@ -10952,8 +10952,10 @@ lpfc_init(void)
46974 "misc_register returned with status %d", error);
46975
46976 if (lpfc_enable_npiv) {
46977- lpfc_transport_functions.vport_create = lpfc_vport_create;
46978- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
46979+ pax_open_kernel();
46980+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
46981+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
46982+ pax_close_kernel();
46983 }
46984 lpfc_transport_template =
46985 fc_attach_transport(&lpfc_transport_functions);
46986diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
46987index c913e8c..d34a119 100644
46988--- a/drivers/scsi/lpfc/lpfc_scsi.c
46989+++ b/drivers/scsi/lpfc/lpfc_scsi.c
46990@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
46991 uint32_t evt_posted;
46992
46993 spin_lock_irqsave(&phba->hbalock, flags);
46994- atomic_inc(&phba->num_rsrc_err);
46995+ atomic_inc_unchecked(&phba->num_rsrc_err);
46996 phba->last_rsrc_error_time = jiffies;
46997
46998 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
46999@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
47000 unsigned long flags;
47001 struct lpfc_hba *phba = vport->phba;
47002 uint32_t evt_posted;
47003- atomic_inc(&phba->num_cmd_success);
47004+ atomic_inc_unchecked(&phba->num_cmd_success);
47005
47006 if (vport->cfg_lun_queue_depth <= queue_depth)
47007 return;
47008@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
47009 unsigned long num_rsrc_err, num_cmd_success;
47010 int i;
47011
47012- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
47013- num_cmd_success = atomic_read(&phba->num_cmd_success);
47014+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
47015+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
47016
47017 /*
47018 * The error and success command counters are global per
47019@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
47020 }
47021 }
47022 lpfc_destroy_vport_work_array(phba, vports);
47023- atomic_set(&phba->num_rsrc_err, 0);
47024- atomic_set(&phba->num_cmd_success, 0);
47025+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
47026+ atomic_set_unchecked(&phba->num_cmd_success, 0);
47027 }
47028
47029 /**
47030@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
47031 }
47032 }
47033 lpfc_destroy_vport_work_array(phba, vports);
47034- atomic_set(&phba->num_rsrc_err, 0);
47035- atomic_set(&phba->num_cmd_success, 0);
47036+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
47037+ atomic_set_unchecked(&phba->num_cmd_success, 0);
47038 }
47039
47040 /**
47041diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
47042index 7f0af4f..193ac3e 100644
47043--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
47044+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
47045@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
47046 {
47047 struct scsi_device *sdev = to_scsi_device(dev);
47048 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
47049- static struct _raid_device *raid_device;
47050+ struct _raid_device *raid_device;
47051 unsigned long flags;
47052 Mpi2RaidVolPage0_t vol_pg0;
47053 Mpi2ConfigReply_t mpi_reply;
47054@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
47055 {
47056 struct scsi_device *sdev = to_scsi_device(dev);
47057 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
47058- static struct _raid_device *raid_device;
47059+ struct _raid_device *raid_device;
47060 unsigned long flags;
47061 Mpi2RaidVolPage0_t vol_pg0;
47062 Mpi2ConfigReply_t mpi_reply;
47063@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
47064 struct fw_event_work *fw_event)
47065 {
47066 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
47067- static struct _raid_device *raid_device;
47068+ struct _raid_device *raid_device;
47069 unsigned long flags;
47070 u16 handle;
47071
47072@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
47073 u64 sas_address;
47074 struct _sas_device *sas_device;
47075 struct _sas_node *expander_device;
47076- static struct _raid_device *raid_device;
47077+ struct _raid_device *raid_device;
47078 u8 retry_count;
47079 unsigned long flags;
47080
47081diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
47082index a38f71b..f3bc572 100644
47083--- a/drivers/scsi/pmcraid.c
47084+++ b/drivers/scsi/pmcraid.c
47085@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
47086 res->scsi_dev = scsi_dev;
47087 scsi_dev->hostdata = res;
47088 res->change_detected = 0;
47089- atomic_set(&res->read_failures, 0);
47090- atomic_set(&res->write_failures, 0);
47091+ atomic_set_unchecked(&res->read_failures, 0);
47092+ atomic_set_unchecked(&res->write_failures, 0);
47093 rc = 0;
47094 }
47095 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
47096@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
47097
47098 /* If this was a SCSI read/write command keep count of errors */
47099 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
47100- atomic_inc(&res->read_failures);
47101+ atomic_inc_unchecked(&res->read_failures);
47102 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
47103- atomic_inc(&res->write_failures);
47104+ atomic_inc_unchecked(&res->write_failures);
47105
47106 if (!RES_IS_GSCSI(res->cfg_entry) &&
47107 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
47108@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
47109 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
47110 * hrrq_id assigned here in queuecommand
47111 */
47112- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
47113+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
47114 pinstance->num_hrrq;
47115 cmd->cmd_done = pmcraid_io_done;
47116
47117@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
47118 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
47119 * hrrq_id assigned here in queuecommand
47120 */
47121- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
47122+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
47123 pinstance->num_hrrq;
47124
47125 if (request_size) {
47126@@ -4484,7 +4484,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
47127
47128 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
47129 /* add resources only after host is added into system */
47130- if (!atomic_read(&pinstance->expose_resources))
47131+ if (!atomic_read_unchecked(&pinstance->expose_resources))
47132 return;
47133
47134 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
47135@@ -5311,8 +5311,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
47136 init_waitqueue_head(&pinstance->reset_wait_q);
47137
47138 atomic_set(&pinstance->outstanding_cmds, 0);
47139- atomic_set(&pinstance->last_message_id, 0);
47140- atomic_set(&pinstance->expose_resources, 0);
47141+ atomic_set_unchecked(&pinstance->last_message_id, 0);
47142+ atomic_set_unchecked(&pinstance->expose_resources, 0);
47143
47144 INIT_LIST_HEAD(&pinstance->free_res_q);
47145 INIT_LIST_HEAD(&pinstance->used_res_q);
47146@@ -6025,7 +6025,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
47147 /* Schedule worker thread to handle CCN and take care of adding and
47148 * removing devices to OS
47149 */
47150- atomic_set(&pinstance->expose_resources, 1);
47151+ atomic_set_unchecked(&pinstance->expose_resources, 1);
47152 schedule_work(&pinstance->worker_q);
47153 return rc;
47154
47155diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
47156index e1d150f..6c6df44 100644
47157--- a/drivers/scsi/pmcraid.h
47158+++ b/drivers/scsi/pmcraid.h
47159@@ -748,7 +748,7 @@ struct pmcraid_instance {
47160 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
47161
47162 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
47163- atomic_t last_message_id;
47164+ atomic_unchecked_t last_message_id;
47165
47166 /* configuration table */
47167 struct pmcraid_config_table *cfg_table;
47168@@ -777,7 +777,7 @@ struct pmcraid_instance {
47169 atomic_t outstanding_cmds;
47170
47171 /* should add/delete resources to mid-layer now ?*/
47172- atomic_t expose_resources;
47173+ atomic_unchecked_t expose_resources;
47174
47175
47176
47177@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
47178 struct pmcraid_config_table_entry_ext cfg_entry_ext;
47179 };
47180 struct scsi_device *scsi_dev; /* Link scsi_device structure */
47181- atomic_t read_failures; /* count of failed READ commands */
47182- atomic_t write_failures; /* count of failed WRITE commands */
47183+ atomic_unchecked_t read_failures; /* count of failed READ commands */
47184+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
47185
47186 /* To indicate add/delete/modify during CCN */
47187 u8 change_detected;
47188diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
47189index 5f174b8..98d32b0 100644
47190--- a/drivers/scsi/qla2xxx/qla_attr.c
47191+++ b/drivers/scsi/qla2xxx/qla_attr.c
47192@@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
47193 return 0;
47194 }
47195
47196-struct fc_function_template qla2xxx_transport_functions = {
47197+fc_function_template_no_const qla2xxx_transport_functions = {
47198
47199 .show_host_node_name = 1,
47200 .show_host_port_name = 1,
47201@@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_transport_functions = {
47202 .bsg_timeout = qla24xx_bsg_timeout,
47203 };
47204
47205-struct fc_function_template qla2xxx_transport_vport_functions = {
47206+fc_function_template_no_const qla2xxx_transport_vport_functions = {
47207
47208 .show_host_node_name = 1,
47209 .show_host_port_name = 1,
47210diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
47211index 4446bf5..9a3574d 100644
47212--- a/drivers/scsi/qla2xxx/qla_gbl.h
47213+++ b/drivers/scsi/qla2xxx/qla_gbl.h
47214@@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
47215 struct device_attribute;
47216 extern struct device_attribute *qla2x00_host_attrs[];
47217 struct fc_function_template;
47218-extern struct fc_function_template qla2xxx_transport_functions;
47219-extern struct fc_function_template qla2xxx_transport_vport_functions;
47220+extern fc_function_template_no_const qla2xxx_transport_functions;
47221+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
47222 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
47223 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
47224 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
47225diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
47226index 9f01bbb..5e1dcee 100644
47227--- a/drivers/scsi/qla2xxx/qla_os.c
47228+++ b/drivers/scsi/qla2xxx/qla_os.c
47229@@ -1572,8 +1572,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
47230 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
47231 /* Ok, a 64bit DMA mask is applicable. */
47232 ha->flags.enable_64bit_addressing = 1;
47233- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
47234- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
47235+ pax_open_kernel();
47236+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
47237+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
47238+ pax_close_kernel();
47239 return;
47240 }
47241 }
47242diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
47243index 41327d4..feb03d479 100644
47244--- a/drivers/scsi/qla4xxx/ql4_def.h
47245+++ b/drivers/scsi/qla4xxx/ql4_def.h
47246@@ -296,7 +296,7 @@ struct ddb_entry {
47247 * (4000 only) */
47248 atomic_t relogin_timer; /* Max Time to wait for
47249 * relogin to complete */
47250- atomic_t relogin_retry_count; /* Num of times relogin has been
47251+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
47252 * retried */
47253 uint32_t default_time2wait; /* Default Min time between
47254 * relogins (+aens) */
47255diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
47256index f8a0a26..ec03cee 100644
47257--- a/drivers/scsi/qla4xxx/ql4_os.c
47258+++ b/drivers/scsi/qla4xxx/ql4_os.c
47259@@ -3066,12 +3066,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
47260 */
47261 if (!iscsi_is_session_online(cls_sess)) {
47262 /* Reset retry relogin timer */
47263- atomic_inc(&ddb_entry->relogin_retry_count);
47264+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
47265 DEBUG2(ql4_printk(KERN_INFO, ha,
47266 "%s: index[%d] relogin timed out-retrying"
47267 " relogin (%d), retry (%d)\n", __func__,
47268 ddb_entry->fw_ddb_index,
47269- atomic_read(&ddb_entry->relogin_retry_count),
47270+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
47271 ddb_entry->default_time2wait + 4));
47272 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
47273 atomic_set(&ddb_entry->retry_relogin_timer,
47274@@ -5209,7 +5209,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
47275
47276 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
47277 atomic_set(&ddb_entry->relogin_timer, 0);
47278- atomic_set(&ddb_entry->relogin_retry_count, 0);
47279+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
47280 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
47281 ddb_entry->default_relogin_timeout =
47282 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
47283diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
47284index eaa808e..95f8841 100644
47285--- a/drivers/scsi/scsi.c
47286+++ b/drivers/scsi/scsi.c
47287@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
47288 unsigned long timeout;
47289 int rtn = 0;
47290
47291- atomic_inc(&cmd->device->iorequest_cnt);
47292+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
47293
47294 /* check if the device is still usable */
47295 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
47296diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
47297index d1549b7..2f60767 100644
47298--- a/drivers/scsi/scsi_lib.c
47299+++ b/drivers/scsi/scsi_lib.c
47300@@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
47301 shost = sdev->host;
47302 scsi_init_cmd_errh(cmd);
47303 cmd->result = DID_NO_CONNECT << 16;
47304- atomic_inc(&cmd->device->iorequest_cnt);
47305+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
47306
47307 /*
47308 * SCSI request completion path will do scsi_device_unbusy(),
47309@@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
47310
47311 INIT_LIST_HEAD(&cmd->eh_entry);
47312
47313- atomic_inc(&cmd->device->iodone_cnt);
47314+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
47315 if (cmd->result)
47316- atomic_inc(&cmd->device->ioerr_cnt);
47317+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
47318
47319 disposition = scsi_decide_disposition(cmd);
47320 if (disposition != SUCCESS &&
47321diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
47322index 40c6394..62356c2 100644
47323--- a/drivers/scsi/scsi_sysfs.c
47324+++ b/drivers/scsi/scsi_sysfs.c
47325@@ -687,7 +687,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
47326 char *buf) \
47327 { \
47328 struct scsi_device *sdev = to_scsi_device(dev); \
47329- unsigned long long count = atomic_read(&sdev->field); \
47330+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
47331 return snprintf(buf, 20, "0x%llx\n", count); \
47332 } \
47333 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
47334diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
47335index 84a1fdf..693b0d6 100644
47336--- a/drivers/scsi/scsi_tgt_lib.c
47337+++ b/drivers/scsi/scsi_tgt_lib.c
47338@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
47339 int err;
47340
47341 dprintk("%lx %u\n", uaddr, len);
47342- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
47343+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
47344 if (err) {
47345 /*
47346 * TODO: need to fixup sg_tablesize, max_segment_size,
47347diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
47348index 4628fd5..a94a1c2 100644
47349--- a/drivers/scsi/scsi_transport_fc.c
47350+++ b/drivers/scsi/scsi_transport_fc.c
47351@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
47352 * Netlink Infrastructure
47353 */
47354
47355-static atomic_t fc_event_seq;
47356+static atomic_unchecked_t fc_event_seq;
47357
47358 /**
47359 * fc_get_event_number - Obtain the next sequential FC event number
47360@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
47361 u32
47362 fc_get_event_number(void)
47363 {
47364- return atomic_add_return(1, &fc_event_seq);
47365+ return atomic_add_return_unchecked(1, &fc_event_seq);
47366 }
47367 EXPORT_SYMBOL(fc_get_event_number);
47368
47369@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
47370 {
47371 int error;
47372
47373- atomic_set(&fc_event_seq, 0);
47374+ atomic_set_unchecked(&fc_event_seq, 0);
47375
47376 error = transport_class_register(&fc_host_class);
47377 if (error)
47378@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
47379 char *cp;
47380
47381 *val = simple_strtoul(buf, &cp, 0);
47382- if ((*cp && (*cp != '\n')) || (*val < 0))
47383+ if (*cp && (*cp != '\n'))
47384 return -EINVAL;
47385 /*
47386 * Check for overflow; dev_loss_tmo is u32
47387diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
47388index e4a989f..293090c 100644
47389--- a/drivers/scsi/scsi_transport_iscsi.c
47390+++ b/drivers/scsi/scsi_transport_iscsi.c
47391@@ -79,7 +79,7 @@ struct iscsi_internal {
47392 struct transport_container session_cont;
47393 };
47394
47395-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
47396+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
47397 static struct workqueue_struct *iscsi_eh_timer_workq;
47398
47399 static DEFINE_IDA(iscsi_sess_ida);
47400@@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
47401 int err;
47402
47403 ihost = shost->shost_data;
47404- session->sid = atomic_add_return(1, &iscsi_session_nr);
47405+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
47406
47407 if (target_id == ISCSI_MAX_TARGET) {
47408 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
47409@@ -4077,7 +4077,7 @@ static __init int iscsi_transport_init(void)
47410 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
47411 ISCSI_TRANSPORT_VERSION);
47412
47413- atomic_set(&iscsi_session_nr, 0);
47414+ atomic_set_unchecked(&iscsi_session_nr, 0);
47415
47416 err = class_register(&iscsi_transport_class);
47417 if (err)
47418diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
47419index f379c7f..e8fc69c 100644
47420--- a/drivers/scsi/scsi_transport_srp.c
47421+++ b/drivers/scsi/scsi_transport_srp.c
47422@@ -33,7 +33,7 @@
47423 #include "scsi_transport_srp_internal.h"
47424
47425 struct srp_host_attrs {
47426- atomic_t next_port_id;
47427+ atomic_unchecked_t next_port_id;
47428 };
47429 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
47430
47431@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
47432 struct Scsi_Host *shost = dev_to_shost(dev);
47433 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
47434
47435- atomic_set(&srp_host->next_port_id, 0);
47436+ atomic_set_unchecked(&srp_host->next_port_id, 0);
47437 return 0;
47438 }
47439
47440@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
47441 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
47442 rport->roles = ids->roles;
47443
47444- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
47445+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
47446 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
47447
47448 transport_setup_device(&rport->dev);
47449diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
47450index 2634d69..fcf7a81 100644
47451--- a/drivers/scsi/sd.c
47452+++ b/drivers/scsi/sd.c
47453@@ -2940,7 +2940,7 @@ static int sd_probe(struct device *dev)
47454 sdkp->disk = gd;
47455 sdkp->index = index;
47456 atomic_set(&sdkp->openers, 0);
47457- atomic_set(&sdkp->device->ioerr_cnt, 0);
47458+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
47459
47460 if (!sdp->request_queue->rq_timeout) {
47461 if (sdp->type != TYPE_MOD)
47462diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
47463index df5e961..df6b97f 100644
47464--- a/drivers/scsi/sg.c
47465+++ b/drivers/scsi/sg.c
47466@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
47467 sdp->disk->disk_name,
47468 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
47469 NULL,
47470- (char *)arg);
47471+ (char __user *)arg);
47472 case BLKTRACESTART:
47473 return blk_trace_startstop(sdp->device->request_queue, 1);
47474 case BLKTRACESTOP:
47475diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
47476index 9e039c6..ae9e800 100644
47477--- a/drivers/spi/spi.c
47478+++ b/drivers/spi/spi.c
47479@@ -1762,7 +1762,7 @@ int spi_bus_unlock(struct spi_master *master)
47480 EXPORT_SYMBOL_GPL(spi_bus_unlock);
47481
47482 /* portable code must never pass more than 32 bytes */
47483-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
47484+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
47485
47486 static u8 *buf;
47487
47488diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
47489index 2c61783..4d49e4e 100644
47490--- a/drivers/staging/android/timed_output.c
47491+++ b/drivers/staging/android/timed_output.c
47492@@ -25,7 +25,7 @@
47493 #include "timed_output.h"
47494
47495 static struct class *timed_output_class;
47496-static atomic_t device_count;
47497+static atomic_unchecked_t device_count;
47498
47499 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
47500 char *buf)
47501@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
47502 timed_output_class = class_create(THIS_MODULE, "timed_output");
47503 if (IS_ERR(timed_output_class))
47504 return PTR_ERR(timed_output_class);
47505- atomic_set(&device_count, 0);
47506+ atomic_set_unchecked(&device_count, 0);
47507 timed_output_class->dev_groups = timed_output_groups;
47508 }
47509
47510@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
47511 if (ret < 0)
47512 return ret;
47513
47514- tdev->index = atomic_inc_return(&device_count);
47515+ tdev->index = atomic_inc_return_unchecked(&device_count);
47516 tdev->dev = device_create(timed_output_class, NULL,
47517 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
47518 if (IS_ERR(tdev->dev))
47519diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
47520index 0247a20..cb9595c 100644
47521--- a/drivers/staging/gdm724x/gdm_tty.c
47522+++ b/drivers/staging/gdm724x/gdm_tty.c
47523@@ -45,7 +45,7 @@
47524 #define gdm_tty_send_control(n, r, v, d, l) (\
47525 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
47526
47527-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
47528+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
47529
47530 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
47531 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
47532diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
47533index ef5064e..fce01db 100644
47534--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
47535+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
47536@@ -478,13 +478,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
47537 return 0;
47538 }
47539
47540-sfw_test_client_ops_t brw_test_client;
47541-void brw_init_test_client(void)
47542-{
47543- brw_test_client.tso_init = brw_client_init;
47544- brw_test_client.tso_fini = brw_client_fini;
47545- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
47546- brw_test_client.tso_done_rpc = brw_client_done_rpc;
47547+sfw_test_client_ops_t brw_test_client = {
47548+ .tso_init = brw_client_init,
47549+ .tso_fini = brw_client_fini,
47550+ .tso_prep_rpc = brw_client_prep_rpc,
47551+ .tso_done_rpc = brw_client_done_rpc,
47552 };
47553
47554 srpc_service_t brw_test_service;
47555diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
47556index 483c785..e1a2a7b 100644
47557--- a/drivers/staging/lustre/lnet/selftest/framework.c
47558+++ b/drivers/staging/lustre/lnet/selftest/framework.c
47559@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
47560
47561 extern sfw_test_client_ops_t ping_test_client;
47562 extern srpc_service_t ping_test_service;
47563-extern void ping_init_test_client(void);
47564 extern void ping_init_test_service(void);
47565
47566 extern sfw_test_client_ops_t brw_test_client;
47567 extern srpc_service_t brw_test_service;
47568-extern void brw_init_test_client(void);
47569 extern void brw_init_test_service(void);
47570
47571
47572@@ -1684,12 +1682,10 @@ sfw_startup (void)
47573 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
47574 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
47575
47576- brw_init_test_client();
47577 brw_init_test_service();
47578 rc = sfw_register_test(&brw_test_service, &brw_test_client);
47579 LASSERT (rc == 0);
47580
47581- ping_init_test_client();
47582 ping_init_test_service();
47583 rc = sfw_register_test(&ping_test_service, &ping_test_client);
47584 LASSERT (rc == 0);
47585diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
47586index f0f9194..b589047 100644
47587--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
47588+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
47589@@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
47590 return 0;
47591 }
47592
47593-sfw_test_client_ops_t ping_test_client;
47594-void ping_init_test_client(void)
47595-{
47596- ping_test_client.tso_init = ping_client_init;
47597- ping_test_client.tso_fini = ping_client_fini;
47598- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
47599- ping_test_client.tso_done_rpc = ping_client_done_rpc;
47600-}
47601+sfw_test_client_ops_t ping_test_client = {
47602+ .tso_init = ping_client_init,
47603+ .tso_fini = ping_client_fini,
47604+ .tso_prep_rpc = ping_client_prep_rpc,
47605+ .tso_done_rpc = ping_client_done_rpc,
47606+};
47607
47608 srpc_service_t ping_test_service;
47609 void ping_init_test_service(void)
47610diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
47611index 7020d9c..0d3b580 100644
47612--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
47613+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
47614@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
47615 ldlm_completion_callback lcs_completion;
47616 ldlm_blocking_callback lcs_blocking;
47617 ldlm_glimpse_callback lcs_glimpse;
47618-};
47619+} __no_const;
47620
47621 /* ldlm_lockd.c */
47622 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
47623diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
47624index a612255..9a9e2dd 100644
47625--- a/drivers/staging/lustre/lustre/include/obd.h
47626+++ b/drivers/staging/lustre/lustre/include/obd.h
47627@@ -1417,7 +1417,7 @@ struct md_ops {
47628 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
47629 * wrapper function in include/linux/obd_class.h.
47630 */
47631-};
47632+} __no_const;
47633
47634 struct lsm_operations {
47635 void (*lsm_free)(struct lov_stripe_md *);
47636diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
47637index fc6c977..df1f956 100644
47638--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
47639+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
47640@@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
47641 int LL_PROC_PROTO(proc_console_max_delay_cs)
47642 {
47643 int rc, max_delay_cs;
47644- ctl_table_t dummy = *table;
47645+ ctl_table_no_const dummy = *table;
47646 cfs_duration_t d;
47647
47648 dummy.data = &max_delay_cs;
47649@@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
47650 int LL_PROC_PROTO(proc_console_min_delay_cs)
47651 {
47652 int rc, min_delay_cs;
47653- ctl_table_t dummy = *table;
47654+ ctl_table_no_const dummy = *table;
47655 cfs_duration_t d;
47656
47657 dummy.data = &min_delay_cs;
47658@@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
47659 int LL_PROC_PROTO(proc_console_backoff)
47660 {
47661 int rc, backoff;
47662- ctl_table_t dummy = *table;
47663+ ctl_table_no_const dummy = *table;
47664
47665 dummy.data = &backoff;
47666 dummy.proc_handler = &proc_dointvec;
47667diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
47668index 3675020..e80d92c 100644
47669--- a/drivers/staging/media/solo6x10/solo6x10-core.c
47670+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
47671@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
47672
47673 static int solo_sysfs_init(struct solo_dev *solo_dev)
47674 {
47675- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
47676+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
47677 struct device *dev = &solo_dev->dev;
47678 const char *driver;
47679 int i;
47680diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
47681index 3335941..2b26186 100644
47682--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
47683+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
47684@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
47685
47686 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
47687 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
47688- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
47689+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
47690 if (p2m_id < 0)
47691 p2m_id = -p2m_id;
47692 }
47693diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
47694index 6f91d2e..3f011d2 100644
47695--- a/drivers/staging/media/solo6x10/solo6x10.h
47696+++ b/drivers/staging/media/solo6x10/solo6x10.h
47697@@ -238,7 +238,7 @@ struct solo_dev {
47698
47699 /* P2M DMA Engine */
47700 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
47701- atomic_t p2m_count;
47702+ atomic_unchecked_t p2m_count;
47703 int p2m_jiffies;
47704 unsigned int p2m_timeouts;
47705
47706diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
47707index e14a1bb..9cb9bbe 100644
47708--- a/drivers/staging/octeon/ethernet-rx.c
47709+++ b/drivers/staging/octeon/ethernet-rx.c
47710@@ -419,11 +419,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
47711 /* Increment RX stats for virtual ports */
47712 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
47713 #ifdef CONFIG_64BIT
47714- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
47715- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
47716+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
47717+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
47718 #else
47719- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
47720- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
47721+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
47722+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
47723 #endif
47724 }
47725 netif_receive_skb(skb);
47726@@ -434,9 +434,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
47727 dev->name);
47728 */
47729 #ifdef CONFIG_64BIT
47730- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
47731+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
47732 #else
47733- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
47734+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
47735 #endif
47736 dev_kfree_skb_irq(skb);
47737 }
47738diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
47739index c3a90e7..023619a 100644
47740--- a/drivers/staging/octeon/ethernet.c
47741+++ b/drivers/staging/octeon/ethernet.c
47742@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
47743 * since the RX tasklet also increments it.
47744 */
47745 #ifdef CONFIG_64BIT
47746- atomic64_add(rx_status.dropped_packets,
47747- (atomic64_t *)&priv->stats.rx_dropped);
47748+ atomic64_add_unchecked(rx_status.dropped_packets,
47749+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
47750 #else
47751- atomic_add(rx_status.dropped_packets,
47752- (atomic_t *)&priv->stats.rx_dropped);
47753+ atomic_add_unchecked(rx_status.dropped_packets,
47754+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
47755 #endif
47756 }
47757
47758diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
47759index 439c3c9..2d74293 100644
47760--- a/drivers/staging/rtl8188eu/include/hal_intf.h
47761+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
47762@@ -271,7 +271,7 @@ struct hal_ops {
47763 s32 (*c2h_handler)(struct adapter *padapter,
47764 struct c2h_evt_hdr *c2h_evt);
47765 c2h_id_filter c2h_id_filter_ccx;
47766-};
47767+} __no_const;
47768
47769 enum rt_eeprom_type {
47770 EEPROM_93C46,
47771diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
47772index eb6f0e5..e6a0958 100644
47773--- a/drivers/staging/rtl8188eu/include/rtw_io.h
47774+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
47775@@ -126,7 +126,7 @@ struct _io_ops {
47776 u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
47777 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
47778 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
47779-};
47780+} __no_const;
47781
47782 struct io_req {
47783 struct list_head list;
47784diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
47785index dc23395..cf7e9b1 100644
47786--- a/drivers/staging/rtl8712/rtl871x_io.h
47787+++ b/drivers/staging/rtl8712/rtl871x_io.h
47788@@ -108,7 +108,7 @@ struct _io_ops {
47789 u8 *pmem);
47790 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
47791 u8 *pmem);
47792-};
47793+} __no_const;
47794
47795 struct io_req {
47796 struct list_head list;
47797diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
47798index 1f5088b..0e59820 100644
47799--- a/drivers/staging/sbe-2t3e3/netdev.c
47800+++ b/drivers/staging/sbe-2t3e3/netdev.c
47801@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47802 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
47803
47804 if (rlen)
47805- if (copy_to_user(data, &resp, rlen))
47806+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
47807 return -EFAULT;
47808
47809 return 0;
47810diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
47811index a863a98..d272795 100644
47812--- a/drivers/staging/usbip/vhci.h
47813+++ b/drivers/staging/usbip/vhci.h
47814@@ -83,7 +83,7 @@ struct vhci_hcd {
47815 unsigned resuming:1;
47816 unsigned long re_timeout;
47817
47818- atomic_t seqnum;
47819+ atomic_unchecked_t seqnum;
47820
47821 /*
47822 * NOTE:
47823diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
47824index d7974cb..d78076b 100644
47825--- a/drivers/staging/usbip/vhci_hcd.c
47826+++ b/drivers/staging/usbip/vhci_hcd.c
47827@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
47828
47829 spin_lock(&vdev->priv_lock);
47830
47831- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
47832+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
47833 if (priv->seqnum == 0xffff)
47834 dev_info(&urb->dev->dev, "seqnum max\n");
47835
47836@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
47837 return -ENOMEM;
47838 }
47839
47840- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
47841+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
47842 if (unlink->seqnum == 0xffff)
47843 pr_info("seqnum max\n");
47844
47845@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
47846 vdev->rhport = rhport;
47847 }
47848
47849- atomic_set(&vhci->seqnum, 0);
47850+ atomic_set_unchecked(&vhci->seqnum, 0);
47851 spin_lock_init(&vhci->lock);
47852
47853 hcd->power_budget = 0; /* no limit */
47854diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
47855index d07fcb5..358e1e1 100644
47856--- a/drivers/staging/usbip/vhci_rx.c
47857+++ b/drivers/staging/usbip/vhci_rx.c
47858@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
47859 if (!urb) {
47860 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
47861 pr_info("max seqnum %d\n",
47862- atomic_read(&the_controller->seqnum));
47863+ atomic_read_unchecked(&the_controller->seqnum));
47864 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
47865 return;
47866 }
47867diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
47868index 8acff44..bdb2fca 100644
47869--- a/drivers/staging/vt6655/hostap.c
47870+++ b/drivers/staging/vt6655/hostap.c
47871@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
47872 *
47873 */
47874
47875+static net_device_ops_no_const apdev_netdev_ops;
47876+
47877 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
47878 {
47879 PSDevice apdev_priv;
47880 struct net_device *dev = pDevice->dev;
47881 int ret;
47882- const struct net_device_ops apdev_netdev_ops = {
47883- .ndo_start_xmit = pDevice->tx_80211,
47884- };
47885
47886 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
47887
47888@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
47889 *apdev_priv = *pDevice;
47890 eth_hw_addr_inherit(pDevice->apdev, dev);
47891
47892+ /* only half broken now */
47893+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
47894 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
47895
47896 pDevice->apdev->type = ARPHRD_IEEE80211;
47897diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
47898index c699a30..b90a5fd 100644
47899--- a/drivers/staging/vt6656/hostap.c
47900+++ b/drivers/staging/vt6656/hostap.c
47901@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
47902 *
47903 */
47904
47905+static net_device_ops_no_const apdev_netdev_ops;
47906+
47907 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
47908 {
47909 struct vnt_private *apdev_priv;
47910 struct net_device *dev = pDevice->dev;
47911 int ret;
47912- const struct net_device_ops apdev_netdev_ops = {
47913- .ndo_start_xmit = pDevice->tx_80211,
47914- };
47915
47916 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
47917
47918@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
47919 *apdev_priv = *pDevice;
47920 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
47921
47922+ /* only half broken now */
47923+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
47924 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
47925
47926 pDevice->apdev->type = ARPHRD_IEEE80211;
47927diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
47928index e51b09a..5ebac31 100644
47929--- a/drivers/target/sbp/sbp_target.c
47930+++ b/drivers/target/sbp/sbp_target.c
47931@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
47932
47933 #define SESSION_MAINTENANCE_INTERVAL HZ
47934
47935-static atomic_t login_id = ATOMIC_INIT(0);
47936+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
47937
47938 static void session_maintenance_work(struct work_struct *);
47939 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
47940@@ -444,7 +444,7 @@ static void sbp_management_request_login(
47941 login->lun = se_lun;
47942 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
47943 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
47944- login->login_id = atomic_inc_return(&login_id);
47945+ login->login_id = atomic_inc_return_unchecked(&login_id);
47946
47947 login->tgt_agt = sbp_target_agent_register(login);
47948 if (IS_ERR(login->tgt_agt)) {
47949diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
47950index d90dbb0..6cbe585 100644
47951--- a/drivers/target/target_core_device.c
47952+++ b/drivers/target/target_core_device.c
47953@@ -1431,7 +1431,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
47954 spin_lock_init(&dev->se_tmr_lock);
47955 spin_lock_init(&dev->qf_cmd_lock);
47956 sema_init(&dev->caw_sem, 1);
47957- atomic_set(&dev->dev_ordered_id, 0);
47958+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
47959 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
47960 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
47961 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
47962diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
47963index 0b0009b..215e88e 100644
47964--- a/drivers/target/target_core_transport.c
47965+++ b/drivers/target/target_core_transport.c
47966@@ -1137,7 +1137,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
47967 * Used to determine when ORDERED commands should go from
47968 * Dormant to Active status.
47969 */
47970- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
47971+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
47972 smp_mb__after_atomic_inc();
47973 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
47974 cmd->se_ordered_id, cmd->sam_task_attr,
47975diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
47976index 33f83fe..d80f8e1 100644
47977--- a/drivers/tty/cyclades.c
47978+++ b/drivers/tty/cyclades.c
47979@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
47980 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
47981 info->port.count);
47982 #endif
47983- info->port.count++;
47984+ atomic_inc(&info->port.count);
47985 #ifdef CY_DEBUG_COUNT
47986 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
47987- current->pid, info->port.count);
47988+ current->pid, atomic_read(&info->port.count));
47989 #endif
47990
47991 /*
47992@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
47993 for (j = 0; j < cy_card[i].nports; j++) {
47994 info = &cy_card[i].ports[j];
47995
47996- if (info->port.count) {
47997+ if (atomic_read(&info->port.count)) {
47998 /* XXX is the ldisc num worth this? */
47999 struct tty_struct *tty;
48000 struct tty_ldisc *ld;
48001diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
48002index 9eba119..5070303 100644
48003--- a/drivers/tty/hvc/hvc_console.c
48004+++ b/drivers/tty/hvc/hvc_console.c
48005@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
48006
48007 spin_lock_irqsave(&hp->port.lock, flags);
48008 /* Check and then increment for fast path open. */
48009- if (hp->port.count++ > 0) {
48010+ if (atomic_inc_return(&hp->port.count) > 1) {
48011 spin_unlock_irqrestore(&hp->port.lock, flags);
48012 hvc_kick();
48013 return 0;
48014@@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
48015
48016 spin_lock_irqsave(&hp->port.lock, flags);
48017
48018- if (--hp->port.count == 0) {
48019+ if (atomic_dec_return(&hp->port.count) == 0) {
48020 spin_unlock_irqrestore(&hp->port.lock, flags);
48021 /* We are done with the tty pointer now. */
48022 tty_port_tty_set(&hp->port, NULL);
48023@@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
48024 */
48025 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
48026 } else {
48027- if (hp->port.count < 0)
48028+ if (atomic_read(&hp->port.count) < 0)
48029 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
48030- hp->vtermno, hp->port.count);
48031+ hp->vtermno, atomic_read(&hp->port.count));
48032 spin_unlock_irqrestore(&hp->port.lock, flags);
48033 }
48034 }
48035@@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty)
48036 * open->hangup case this can be called after the final close so prevent
48037 * that from happening for now.
48038 */
48039- if (hp->port.count <= 0) {
48040+ if (atomic_read(&hp->port.count) <= 0) {
48041 spin_unlock_irqrestore(&hp->port.lock, flags);
48042 return;
48043 }
48044
48045- hp->port.count = 0;
48046+ atomic_set(&hp->port.count, 0);
48047 spin_unlock_irqrestore(&hp->port.lock, flags);
48048 tty_port_tty_set(&hp->port, NULL);
48049
48050@@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
48051 return -EPIPE;
48052
48053 /* FIXME what's this (unprotected) check for? */
48054- if (hp->port.count <= 0)
48055+ if (atomic_read(&hp->port.count) <= 0)
48056 return -EIO;
48057
48058 spin_lock_irqsave(&hp->lock, flags);
48059diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
48060index 81e939e..95ead10 100644
48061--- a/drivers/tty/hvc/hvcs.c
48062+++ b/drivers/tty/hvc/hvcs.c
48063@@ -83,6 +83,7 @@
48064 #include <asm/hvcserver.h>
48065 #include <asm/uaccess.h>
48066 #include <asm/vio.h>
48067+#include <asm/local.h>
48068
48069 /*
48070 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
48071@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
48072
48073 spin_lock_irqsave(&hvcsd->lock, flags);
48074
48075- if (hvcsd->port.count > 0) {
48076+ if (atomic_read(&hvcsd->port.count) > 0) {
48077 spin_unlock_irqrestore(&hvcsd->lock, flags);
48078 printk(KERN_INFO "HVCS: vterm state unchanged. "
48079 "The hvcs device node is still in use.\n");
48080@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
48081 }
48082 }
48083
48084- hvcsd->port.count = 0;
48085+ atomic_set(&hvcsd->port.count, 0);
48086 hvcsd->port.tty = tty;
48087 tty->driver_data = hvcsd;
48088
48089@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
48090 unsigned long flags;
48091
48092 spin_lock_irqsave(&hvcsd->lock, flags);
48093- hvcsd->port.count++;
48094+ atomic_inc(&hvcsd->port.count);
48095 hvcsd->todo_mask |= HVCS_SCHED_READ;
48096 spin_unlock_irqrestore(&hvcsd->lock, flags);
48097
48098@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
48099 hvcsd = tty->driver_data;
48100
48101 spin_lock_irqsave(&hvcsd->lock, flags);
48102- if (--hvcsd->port.count == 0) {
48103+ if (atomic_dec_and_test(&hvcsd->port.count)) {
48104
48105 vio_disable_interrupts(hvcsd->vdev);
48106
48107@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
48108
48109 free_irq(irq, hvcsd);
48110 return;
48111- } else if (hvcsd->port.count < 0) {
48112+ } else if (atomic_read(&hvcsd->port.count) < 0) {
48113 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
48114 " is missmanaged.\n",
48115- hvcsd->vdev->unit_address, hvcsd->port.count);
48116+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
48117 }
48118
48119 spin_unlock_irqrestore(&hvcsd->lock, flags);
48120@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
48121
48122 spin_lock_irqsave(&hvcsd->lock, flags);
48123 /* Preserve this so that we know how many kref refs to put */
48124- temp_open_count = hvcsd->port.count;
48125+ temp_open_count = atomic_read(&hvcsd->port.count);
48126
48127 /*
48128 * Don't kref put inside the spinlock because the destruction
48129@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
48130 tty->driver_data = NULL;
48131 hvcsd->port.tty = NULL;
48132
48133- hvcsd->port.count = 0;
48134+ atomic_set(&hvcsd->port.count, 0);
48135
48136 /* This will drop any buffered data on the floor which is OK in a hangup
48137 * scenario. */
48138@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
48139 * the middle of a write operation? This is a crummy place to do this
48140 * but we want to keep it all in the spinlock.
48141 */
48142- if (hvcsd->port.count <= 0) {
48143+ if (atomic_read(&hvcsd->port.count) <= 0) {
48144 spin_unlock_irqrestore(&hvcsd->lock, flags);
48145 return -ENODEV;
48146 }
48147@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
48148 {
48149 struct hvcs_struct *hvcsd = tty->driver_data;
48150
48151- if (!hvcsd || hvcsd->port.count <= 0)
48152+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
48153 return 0;
48154
48155 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
48156diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
48157index 4190199..48f2920 100644
48158--- a/drivers/tty/hvc/hvsi.c
48159+++ b/drivers/tty/hvc/hvsi.c
48160@@ -85,7 +85,7 @@ struct hvsi_struct {
48161 int n_outbuf;
48162 uint32_t vtermno;
48163 uint32_t virq;
48164- atomic_t seqno; /* HVSI packet sequence number */
48165+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
48166 uint16_t mctrl;
48167 uint8_t state; /* HVSI protocol state */
48168 uint8_t flags;
48169@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
48170
48171 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
48172 packet.hdr.len = sizeof(struct hvsi_query_response);
48173- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48174+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48175 packet.verb = VSV_SEND_VERSION_NUMBER;
48176 packet.u.version = HVSI_VERSION;
48177 packet.query_seqno = query_seqno+1;
48178@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
48179
48180 packet.hdr.type = VS_QUERY_PACKET_HEADER;
48181 packet.hdr.len = sizeof(struct hvsi_query);
48182- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48183+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48184 packet.verb = verb;
48185
48186 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
48187@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
48188 int wrote;
48189
48190 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
48191- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48192+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48193 packet.hdr.len = sizeof(struct hvsi_control);
48194 packet.verb = VSV_SET_MODEM_CTL;
48195 packet.mask = HVSI_TSDTR;
48196@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
48197 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
48198
48199 packet.hdr.type = VS_DATA_PACKET_HEADER;
48200- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48201+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48202 packet.hdr.len = count + sizeof(struct hvsi_header);
48203 memcpy(&packet.data, buf, count);
48204
48205@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
48206 struct hvsi_control packet __ALIGNED__;
48207
48208 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
48209- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48210+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48211 packet.hdr.len = 6;
48212 packet.verb = VSV_CLOSE_PROTOCOL;
48213
48214diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
48215index ac27671..0f627ee 100644
48216--- a/drivers/tty/hvc/hvsi_lib.c
48217+++ b/drivers/tty/hvc/hvsi_lib.c
48218@@ -9,7 +9,7 @@
48219
48220 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
48221 {
48222- packet->seqno = atomic_inc_return(&pv->seqno);
48223+ packet->seqno = atomic_inc_return_unchecked(&pv->seqno);
48224
48225 /* Assumes that always succeeds, works in practice */
48226 return pv->put_chars(pv->termno, (char *)packet, packet->len);
48227@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
48228
48229 /* Reset state */
48230 pv->established = 0;
48231- atomic_set(&pv->seqno, 0);
48232+ atomic_set_unchecked(&pv->seqno, 0);
48233
48234 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
48235
48236@@ -265,7 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
48237 pv->mctrl_update = 0;
48238 q.hdr.type = VS_QUERY_PACKET_HEADER;
48239 q.hdr.len = sizeof(struct hvsi_query);
48240- q.hdr.seqno = atomic_inc_return(&pv->seqno);
48241+ q.hdr.seqno = atomic_inc_return_unchecked(&pv->seqno);
48242 q.verb = VSV_SEND_MODEM_CTL_STATUS;
48243 rc = hvsi_send_packet(pv, &q.hdr);
48244 if (rc <= 0) {
48245diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
48246index 8fd72ff..34a0bed 100644
48247--- a/drivers/tty/ipwireless/tty.c
48248+++ b/drivers/tty/ipwireless/tty.c
48249@@ -29,6 +29,7 @@
48250 #include <linux/tty_driver.h>
48251 #include <linux/tty_flip.h>
48252 #include <linux/uaccess.h>
48253+#include <asm/local.h>
48254
48255 #include "tty.h"
48256 #include "network.h"
48257@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
48258 mutex_unlock(&tty->ipw_tty_mutex);
48259 return -ENODEV;
48260 }
48261- if (tty->port.count == 0)
48262+ if (atomic_read(&tty->port.count) == 0)
48263 tty->tx_bytes_queued = 0;
48264
48265- tty->port.count++;
48266+ atomic_inc(&tty->port.count);
48267
48268 tty->port.tty = linux_tty;
48269 linux_tty->driver_data = tty;
48270@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
48271
48272 static void do_ipw_close(struct ipw_tty *tty)
48273 {
48274- tty->port.count--;
48275-
48276- if (tty->port.count == 0) {
48277+ if (atomic_dec_return(&tty->port.count) == 0) {
48278 struct tty_struct *linux_tty = tty->port.tty;
48279
48280 if (linux_tty != NULL) {
48281@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
48282 return;
48283
48284 mutex_lock(&tty->ipw_tty_mutex);
48285- if (tty->port.count == 0) {
48286+ if (atomic_read(&tty->port.count) == 0) {
48287 mutex_unlock(&tty->ipw_tty_mutex);
48288 return;
48289 }
48290@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
48291
48292 mutex_lock(&tty->ipw_tty_mutex);
48293
48294- if (!tty->port.count) {
48295+ if (!atomic_read(&tty->port.count)) {
48296 mutex_unlock(&tty->ipw_tty_mutex);
48297 return;
48298 }
48299@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
48300 return -ENODEV;
48301
48302 mutex_lock(&tty->ipw_tty_mutex);
48303- if (!tty->port.count) {
48304+ if (!atomic_read(&tty->port.count)) {
48305 mutex_unlock(&tty->ipw_tty_mutex);
48306 return -EINVAL;
48307 }
48308@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
48309 if (!tty)
48310 return -ENODEV;
48311
48312- if (!tty->port.count)
48313+ if (!atomic_read(&tty->port.count))
48314 return -EINVAL;
48315
48316 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
48317@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
48318 if (!tty)
48319 return 0;
48320
48321- if (!tty->port.count)
48322+ if (!atomic_read(&tty->port.count))
48323 return 0;
48324
48325 return tty->tx_bytes_queued;
48326@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
48327 if (!tty)
48328 return -ENODEV;
48329
48330- if (!tty->port.count)
48331+ if (!atomic_read(&tty->port.count))
48332 return -EINVAL;
48333
48334 return get_control_lines(tty);
48335@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
48336 if (!tty)
48337 return -ENODEV;
48338
48339- if (!tty->port.count)
48340+ if (!atomic_read(&tty->port.count))
48341 return -EINVAL;
48342
48343 return set_control_lines(tty, set, clear);
48344@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
48345 if (!tty)
48346 return -ENODEV;
48347
48348- if (!tty->port.count)
48349+ if (!atomic_read(&tty->port.count))
48350 return -EINVAL;
48351
48352 /* FIXME: Exactly how is the tty object locked here .. */
48353@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
48354 * are gone */
48355 mutex_lock(&ttyj->ipw_tty_mutex);
48356 }
48357- while (ttyj->port.count)
48358+ while (atomic_read(&ttyj->port.count))
48359 do_ipw_close(ttyj);
48360 ipwireless_disassociate_network_ttys(network,
48361 ttyj->channel_idx);
48362diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
48363index 1deaca4..c8582d4 100644
48364--- a/drivers/tty/moxa.c
48365+++ b/drivers/tty/moxa.c
48366@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
48367 }
48368
48369 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
48370- ch->port.count++;
48371+ atomic_inc(&ch->port.count);
48372 tty->driver_data = ch;
48373 tty_port_tty_set(&ch->port, tty);
48374 mutex_lock(&ch->port.mutex);
48375diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
48376index c0f76da..d974c32 100644
48377--- a/drivers/tty/n_gsm.c
48378+++ b/drivers/tty/n_gsm.c
48379@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
48380 spin_lock_init(&dlci->lock);
48381 mutex_init(&dlci->mutex);
48382 dlci->fifo = &dlci->_fifo;
48383- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
48384+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
48385 kfree(dlci);
48386 return NULL;
48387 }
48388@@ -2935,7 +2935,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
48389 struct gsm_dlci *dlci = tty->driver_data;
48390 struct tty_port *port = &dlci->port;
48391
48392- port->count++;
48393+ atomic_inc(&port->count);
48394 dlci_get(dlci);
48395 dlci_get(dlci->gsm->dlci[0]);
48396 mux_get(dlci->gsm);
48397diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
48398index 4d6f430..0810fa9 100644
48399--- a/drivers/tty/n_tty.c
48400+++ b/drivers/tty/n_tty.c
48401@@ -2504,6 +2504,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
48402 {
48403 *ops = tty_ldisc_N_TTY;
48404 ops->owner = NULL;
48405- ops->refcount = ops->flags = 0;
48406+ atomic_set(&ops->refcount, 0);
48407+ ops->flags = 0;
48408 }
48409 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
48410diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
48411index 25c9bc7..24077b7 100644
48412--- a/drivers/tty/pty.c
48413+++ b/drivers/tty/pty.c
48414@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
48415 panic("Couldn't register Unix98 pts driver");
48416
48417 /* Now create the /dev/ptmx special device */
48418+ pax_open_kernel();
48419 tty_default_fops(&ptmx_fops);
48420- ptmx_fops.open = ptmx_open;
48421+ *(void **)&ptmx_fops.open = ptmx_open;
48422+ pax_close_kernel();
48423
48424 cdev_init(&ptmx_cdev, &ptmx_fops);
48425 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
48426diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
48427index 354564e..fe50d9a 100644
48428--- a/drivers/tty/rocket.c
48429+++ b/drivers/tty/rocket.c
48430@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
48431 tty->driver_data = info;
48432 tty_port_tty_set(port, tty);
48433
48434- if (port->count++ == 0) {
48435+ if (atomic_inc_return(&port->count) == 1) {
48436 atomic_inc(&rp_num_ports_open);
48437
48438 #ifdef ROCKET_DEBUG_OPEN
48439@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
48440 #endif
48441 }
48442 #ifdef ROCKET_DEBUG_OPEN
48443- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
48444+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
48445 #endif
48446
48447 /*
48448@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
48449 spin_unlock_irqrestore(&info->port.lock, flags);
48450 return;
48451 }
48452- if (info->port.count)
48453+ if (atomic_read(&info->port.count))
48454 atomic_dec(&rp_num_ports_open);
48455 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
48456 spin_unlock_irqrestore(&info->port.lock, flags);
48457diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
48458index 1274499..f541382 100644
48459--- a/drivers/tty/serial/ioc4_serial.c
48460+++ b/drivers/tty/serial/ioc4_serial.c
48461@@ -437,7 +437,7 @@ struct ioc4_soft {
48462 } is_intr_info[MAX_IOC4_INTR_ENTS];
48463
48464 /* Number of entries active in the above array */
48465- atomic_t is_num_intrs;
48466+ atomic_unchecked_t is_num_intrs;
48467 } is_intr_type[IOC4_NUM_INTR_TYPES];
48468
48469 /* is_ir_lock must be held while
48470@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
48471 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
48472 || (type == IOC4_OTHER_INTR_TYPE)));
48473
48474- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
48475+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
48476 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
48477
48478 /* Save off the lower level interrupt handler */
48479@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
48480
48481 soft = arg;
48482 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
48483- num_intrs = (int)atomic_read(
48484+ num_intrs = (int)atomic_read_unchecked(
48485 &soft->is_intr_type[intr_type].is_num_intrs);
48486
48487 this_mir = this_ir = pending_intrs(soft, intr_type);
48488diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
48489index a260cde..6b2b5ce 100644
48490--- a/drivers/tty/serial/kgdboc.c
48491+++ b/drivers/tty/serial/kgdboc.c
48492@@ -24,8 +24,9 @@
48493 #define MAX_CONFIG_LEN 40
48494
48495 static struct kgdb_io kgdboc_io_ops;
48496+static struct kgdb_io kgdboc_io_ops_console;
48497
48498-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
48499+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
48500 static int configured = -1;
48501
48502 static char config[MAX_CONFIG_LEN];
48503@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
48504 kgdboc_unregister_kbd();
48505 if (configured == 1)
48506 kgdb_unregister_io_module(&kgdboc_io_ops);
48507+ else if (configured == 2)
48508+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
48509 }
48510
48511 static int configure_kgdboc(void)
48512@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
48513 int err;
48514 char *cptr = config;
48515 struct console *cons;
48516+ int is_console = 0;
48517
48518 err = kgdboc_option_setup(config);
48519 if (err || !strlen(config) || isspace(config[0]))
48520 goto noconfig;
48521
48522 err = -ENODEV;
48523- kgdboc_io_ops.is_console = 0;
48524 kgdb_tty_driver = NULL;
48525
48526 kgdboc_use_kms = 0;
48527@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
48528 int idx;
48529 if (cons->device && cons->device(cons, &idx) == p &&
48530 idx == tty_line) {
48531- kgdboc_io_ops.is_console = 1;
48532+ is_console = 1;
48533 break;
48534 }
48535 cons = cons->next;
48536@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
48537 kgdb_tty_line = tty_line;
48538
48539 do_register:
48540- err = kgdb_register_io_module(&kgdboc_io_ops);
48541+ if (is_console) {
48542+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
48543+ configured = 2;
48544+ } else {
48545+ err = kgdb_register_io_module(&kgdboc_io_ops);
48546+ configured = 1;
48547+ }
48548 if (err)
48549 goto noconfig;
48550
48551@@ -205,8 +214,6 @@ do_register:
48552 if (err)
48553 goto nmi_con_failed;
48554
48555- configured = 1;
48556-
48557 return 0;
48558
48559 nmi_con_failed:
48560@@ -223,7 +230,7 @@ noconfig:
48561 static int __init init_kgdboc(void)
48562 {
48563 /* Already configured? */
48564- if (configured == 1)
48565+ if (configured >= 1)
48566 return 0;
48567
48568 return configure_kgdboc();
48569@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
48570 if (config[len - 1] == '\n')
48571 config[len - 1] = '\0';
48572
48573- if (configured == 1)
48574+ if (configured >= 1)
48575 cleanup_kgdboc();
48576
48577 /* Go and configure with the new params. */
48578@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
48579 .post_exception = kgdboc_post_exp_handler,
48580 };
48581
48582+static struct kgdb_io kgdboc_io_ops_console = {
48583+ .name = "kgdboc",
48584+ .read_char = kgdboc_get_char,
48585+ .write_char = kgdboc_put_char,
48586+ .pre_exception = kgdboc_pre_exp_handler,
48587+ .post_exception = kgdboc_post_exp_handler,
48588+ .is_console = 1
48589+};
48590+
48591 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
48592 /* This is only available if kgdboc is a built in for early debugging */
48593 static int __init kgdboc_early_init(char *opt)
48594diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
48595index b5d779c..3622cfe 100644
48596--- a/drivers/tty/serial/msm_serial.c
48597+++ b/drivers/tty/serial/msm_serial.c
48598@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
48599 .cons = MSM_CONSOLE,
48600 };
48601
48602-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
48603+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
48604
48605 static const struct of_device_id msm_uartdm_table[] = {
48606 { .compatible = "qcom,msm-uartdm" },
48607@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
48608 int irq;
48609
48610 if (pdev->id == -1)
48611- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
48612+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
48613
48614 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
48615 return -ENXIO;
48616diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
48617index f3dfa19..342f2ff 100644
48618--- a/drivers/tty/serial/samsung.c
48619+++ b/drivers/tty/serial/samsung.c
48620@@ -456,11 +456,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
48621 }
48622 }
48623
48624+static int s3c64xx_serial_startup(struct uart_port *port);
48625 static int s3c24xx_serial_startup(struct uart_port *port)
48626 {
48627 struct s3c24xx_uart_port *ourport = to_ourport(port);
48628 int ret;
48629
48630+ /* Startup sequence is different for s3c64xx and higher SoC's */
48631+ if (s3c24xx_serial_has_interrupt_mask(port))
48632+ return s3c64xx_serial_startup(port);
48633+
48634 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
48635 port->mapbase, port->membase);
48636
48637@@ -1127,10 +1132,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
48638 /* setup info for port */
48639 port->dev = &platdev->dev;
48640
48641- /* Startup sequence is different for s3c64xx and higher SoC's */
48642- if (s3c24xx_serial_has_interrupt_mask(port))
48643- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
48644-
48645 port->uartclk = 1;
48646
48647 if (cfg->uart_flags & UPF_CONS_FLOW) {
48648diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
48649index 0f02351..07c59c5 100644
48650--- a/drivers/tty/serial/serial_core.c
48651+++ b/drivers/tty/serial/serial_core.c
48652@@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
48653 uart_flush_buffer(tty);
48654 uart_shutdown(tty, state);
48655 spin_lock_irqsave(&port->lock, flags);
48656- port->count = 0;
48657+ atomic_set(&port->count, 0);
48658 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
48659 spin_unlock_irqrestore(&port->lock, flags);
48660 tty_port_tty_set(port, NULL);
48661@@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
48662 goto end;
48663 }
48664
48665- port->count++;
48666+ atomic_inc(&port->count);
48667 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
48668 retval = -ENXIO;
48669 goto err_dec_count;
48670@@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
48671 /*
48672 * Make sure the device is in D0 state.
48673 */
48674- if (port->count == 1)
48675+ if (atomic_read(&port->count) == 1)
48676 uart_change_pm(state, UART_PM_STATE_ON);
48677
48678 /*
48679@@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
48680 end:
48681 return retval;
48682 err_dec_count:
48683- port->count--;
48684+ atomic_inc(&port->count);
48685 mutex_unlock(&port->mutex);
48686 goto end;
48687 }
48688diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
48689index e1ce141..6d4ed80 100644
48690--- a/drivers/tty/synclink.c
48691+++ b/drivers/tty/synclink.c
48692@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
48693
48694 if (debug_level >= DEBUG_LEVEL_INFO)
48695 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
48696- __FILE__,__LINE__, info->device_name, info->port.count);
48697+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
48698
48699 if (tty_port_close_start(&info->port, tty, filp) == 0)
48700 goto cleanup;
48701@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
48702 cleanup:
48703 if (debug_level >= DEBUG_LEVEL_INFO)
48704 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
48705- tty->driver->name, info->port.count);
48706+ tty->driver->name, atomic_read(&info->port.count));
48707
48708 } /* end of mgsl_close() */
48709
48710@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
48711
48712 mgsl_flush_buffer(tty);
48713 shutdown(info);
48714-
48715- info->port.count = 0;
48716+
48717+ atomic_set(&info->port.count, 0);
48718 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
48719 info->port.tty = NULL;
48720
48721@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
48722
48723 if (debug_level >= DEBUG_LEVEL_INFO)
48724 printk("%s(%d):block_til_ready before block on %s count=%d\n",
48725- __FILE__,__LINE__, tty->driver->name, port->count );
48726+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
48727
48728 spin_lock_irqsave(&info->irq_spinlock, flags);
48729 if (!tty_hung_up_p(filp)) {
48730 extra_count = true;
48731- port->count--;
48732+ atomic_dec(&port->count);
48733 }
48734 spin_unlock_irqrestore(&info->irq_spinlock, flags);
48735 port->blocked_open++;
48736@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
48737
48738 if (debug_level >= DEBUG_LEVEL_INFO)
48739 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
48740- __FILE__,__LINE__, tty->driver->name, port->count );
48741+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
48742
48743 tty_unlock(tty);
48744 schedule();
48745@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
48746
48747 /* FIXME: Racy on hangup during close wait */
48748 if (extra_count)
48749- port->count++;
48750+ atomic_inc(&port->count);
48751 port->blocked_open--;
48752
48753 if (debug_level >= DEBUG_LEVEL_INFO)
48754 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
48755- __FILE__,__LINE__, tty->driver->name, port->count );
48756+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
48757
48758 if (!retval)
48759 port->flags |= ASYNC_NORMAL_ACTIVE;
48760@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
48761
48762 if (debug_level >= DEBUG_LEVEL_INFO)
48763 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
48764- __FILE__,__LINE__,tty->driver->name, info->port.count);
48765+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
48766
48767 /* If port is closing, signal caller to try again */
48768 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
48769@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
48770 spin_unlock_irqrestore(&info->netlock, flags);
48771 goto cleanup;
48772 }
48773- info->port.count++;
48774+ atomic_inc(&info->port.count);
48775 spin_unlock_irqrestore(&info->netlock, flags);
48776
48777- if (info->port.count == 1) {
48778+ if (atomic_read(&info->port.count) == 1) {
48779 /* 1st open on this device, init hardware */
48780 retval = startup(info);
48781 if (retval < 0)
48782@@ -3446,8 +3446,8 @@ cleanup:
48783 if (retval) {
48784 if (tty->count == 1)
48785 info->port.tty = NULL; /* tty layer will release tty struct */
48786- if(info->port.count)
48787- info->port.count--;
48788+ if (atomic_read(&info->port.count))
48789+ atomic_dec(&info->port.count);
48790 }
48791
48792 return retval;
48793@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
48794 unsigned short new_crctype;
48795
48796 /* return error if TTY interface open */
48797- if (info->port.count)
48798+ if (atomic_read(&info->port.count))
48799 return -EBUSY;
48800
48801 switch (encoding)
48802@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
48803
48804 /* arbitrate between network and tty opens */
48805 spin_lock_irqsave(&info->netlock, flags);
48806- if (info->port.count != 0 || info->netcount != 0) {
48807+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
48808 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
48809 spin_unlock_irqrestore(&info->netlock, flags);
48810 return -EBUSY;
48811@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48812 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
48813
48814 /* return error if TTY interface open */
48815- if (info->port.count)
48816+ if (atomic_read(&info->port.count))
48817 return -EBUSY;
48818
48819 if (cmd != SIOCWANDEV)
48820diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
48821index 1abf946..1ee34fc 100644
48822--- a/drivers/tty/synclink_gt.c
48823+++ b/drivers/tty/synclink_gt.c
48824@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
48825 tty->driver_data = info;
48826 info->port.tty = tty;
48827
48828- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
48829+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
48830
48831 /* If port is closing, signal caller to try again */
48832 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
48833@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
48834 mutex_unlock(&info->port.mutex);
48835 goto cleanup;
48836 }
48837- info->port.count++;
48838+ atomic_inc(&info->port.count);
48839 spin_unlock_irqrestore(&info->netlock, flags);
48840
48841- if (info->port.count == 1) {
48842+ if (atomic_read(&info->port.count) == 1) {
48843 /* 1st open on this device, init hardware */
48844 retval = startup(info);
48845 if (retval < 0) {
48846@@ -715,8 +715,8 @@ cleanup:
48847 if (retval) {
48848 if (tty->count == 1)
48849 info->port.tty = NULL; /* tty layer will release tty struct */
48850- if(info->port.count)
48851- info->port.count--;
48852+ if(atomic_read(&info->port.count))
48853+ atomic_dec(&info->port.count);
48854 }
48855
48856 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
48857@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48858
48859 if (sanity_check(info, tty->name, "close"))
48860 return;
48861- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
48862+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
48863
48864 if (tty_port_close_start(&info->port, tty, filp) == 0)
48865 goto cleanup;
48866@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48867 tty_port_close_end(&info->port, tty);
48868 info->port.tty = NULL;
48869 cleanup:
48870- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
48871+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
48872 }
48873
48874 static void hangup(struct tty_struct *tty)
48875@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
48876 shutdown(info);
48877
48878 spin_lock_irqsave(&info->port.lock, flags);
48879- info->port.count = 0;
48880+ atomic_set(&info->port.count, 0);
48881 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
48882 info->port.tty = NULL;
48883 spin_unlock_irqrestore(&info->port.lock, flags);
48884@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
48885 unsigned short new_crctype;
48886
48887 /* return error if TTY interface open */
48888- if (info->port.count)
48889+ if (atomic_read(&info->port.count))
48890 return -EBUSY;
48891
48892 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
48893@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
48894
48895 /* arbitrate between network and tty opens */
48896 spin_lock_irqsave(&info->netlock, flags);
48897- if (info->port.count != 0 || info->netcount != 0) {
48898+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
48899 DBGINFO(("%s hdlc_open busy\n", dev->name));
48900 spin_unlock_irqrestore(&info->netlock, flags);
48901 return -EBUSY;
48902@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48903 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
48904
48905 /* return error if TTY interface open */
48906- if (info->port.count)
48907+ if (atomic_read(&info->port.count))
48908 return -EBUSY;
48909
48910 if (cmd != SIOCWANDEV)
48911@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
48912 if (port == NULL)
48913 continue;
48914 spin_lock(&port->lock);
48915- if ((port->port.count || port->netcount) &&
48916+ if ((atomic_read(&port->port.count) || port->netcount) &&
48917 port->pending_bh && !port->bh_running &&
48918 !port->bh_requested) {
48919 DBGISR(("%s bh queued\n", port->device_name));
48920@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
48921 spin_lock_irqsave(&info->lock, flags);
48922 if (!tty_hung_up_p(filp)) {
48923 extra_count = true;
48924- port->count--;
48925+ atomic_dec(&port->count);
48926 }
48927 spin_unlock_irqrestore(&info->lock, flags);
48928 port->blocked_open++;
48929@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
48930 remove_wait_queue(&port->open_wait, &wait);
48931
48932 if (extra_count)
48933- port->count++;
48934+ atomic_inc(&port->count);
48935 port->blocked_open--;
48936
48937 if (!retval)
48938diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
48939index dc6e969..5dc8786 100644
48940--- a/drivers/tty/synclinkmp.c
48941+++ b/drivers/tty/synclinkmp.c
48942@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
48943
48944 if (debug_level >= DEBUG_LEVEL_INFO)
48945 printk("%s(%d):%s open(), old ref count = %d\n",
48946- __FILE__,__LINE__,tty->driver->name, info->port.count);
48947+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
48948
48949 /* If port is closing, signal caller to try again */
48950 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
48951@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
48952 spin_unlock_irqrestore(&info->netlock, flags);
48953 goto cleanup;
48954 }
48955- info->port.count++;
48956+ atomic_inc(&info->port.count);
48957 spin_unlock_irqrestore(&info->netlock, flags);
48958
48959- if (info->port.count == 1) {
48960+ if (atomic_read(&info->port.count) == 1) {
48961 /* 1st open on this device, init hardware */
48962 retval = startup(info);
48963 if (retval < 0)
48964@@ -796,8 +796,8 @@ cleanup:
48965 if (retval) {
48966 if (tty->count == 1)
48967 info->port.tty = NULL; /* tty layer will release tty struct */
48968- if(info->port.count)
48969- info->port.count--;
48970+ if(atomic_read(&info->port.count))
48971+ atomic_dec(&info->port.count);
48972 }
48973
48974 return retval;
48975@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48976
48977 if (debug_level >= DEBUG_LEVEL_INFO)
48978 printk("%s(%d):%s close() entry, count=%d\n",
48979- __FILE__,__LINE__, info->device_name, info->port.count);
48980+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
48981
48982 if (tty_port_close_start(&info->port, tty, filp) == 0)
48983 goto cleanup;
48984@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48985 cleanup:
48986 if (debug_level >= DEBUG_LEVEL_INFO)
48987 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
48988- tty->driver->name, info->port.count);
48989+ tty->driver->name, atomic_read(&info->port.count));
48990 }
48991
48992 /* Called by tty_hangup() when a hangup is signaled.
48993@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
48994 shutdown(info);
48995
48996 spin_lock_irqsave(&info->port.lock, flags);
48997- info->port.count = 0;
48998+ atomic_set(&info->port.count, 0);
48999 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
49000 info->port.tty = NULL;
49001 spin_unlock_irqrestore(&info->port.lock, flags);
49002@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
49003 unsigned short new_crctype;
49004
49005 /* return error if TTY interface open */
49006- if (info->port.count)
49007+ if (atomic_read(&info->port.count))
49008 return -EBUSY;
49009
49010 switch (encoding)
49011@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
49012
49013 /* arbitrate between network and tty opens */
49014 spin_lock_irqsave(&info->netlock, flags);
49015- if (info->port.count != 0 || info->netcount != 0) {
49016+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
49017 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
49018 spin_unlock_irqrestore(&info->netlock, flags);
49019 return -EBUSY;
49020@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49021 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
49022
49023 /* return error if TTY interface open */
49024- if (info->port.count)
49025+ if (atomic_read(&info->port.count))
49026 return -EBUSY;
49027
49028 if (cmd != SIOCWANDEV)
49029@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
49030 * do not request bottom half processing if the
49031 * device is not open in a normal mode.
49032 */
49033- if ( port && (port->port.count || port->netcount) &&
49034+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
49035 port->pending_bh && !port->bh_running &&
49036 !port->bh_requested ) {
49037 if ( debug_level >= DEBUG_LEVEL_ISR )
49038@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
49039
49040 if (debug_level >= DEBUG_LEVEL_INFO)
49041 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
49042- __FILE__,__LINE__, tty->driver->name, port->count );
49043+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
49044
49045 spin_lock_irqsave(&info->lock, flags);
49046 if (!tty_hung_up_p(filp)) {
49047 extra_count = true;
49048- port->count--;
49049+ atomic_dec(&port->count);
49050 }
49051 spin_unlock_irqrestore(&info->lock, flags);
49052 port->blocked_open++;
49053@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
49054
49055 if (debug_level >= DEBUG_LEVEL_INFO)
49056 printk("%s(%d):%s block_til_ready() count=%d\n",
49057- __FILE__,__LINE__, tty->driver->name, port->count );
49058+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
49059
49060 tty_unlock(tty);
49061 schedule();
49062@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
49063 remove_wait_queue(&port->open_wait, &wait);
49064
49065 if (extra_count)
49066- port->count++;
49067+ atomic_inc(&port->count);
49068 port->blocked_open--;
49069
49070 if (debug_level >= DEBUG_LEVEL_INFO)
49071 printk("%s(%d):%s block_til_ready() after, count=%d\n",
49072- __FILE__,__LINE__, tty->driver->name, port->count );
49073+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
49074
49075 if (!retval)
49076 port->flags |= ASYNC_NORMAL_ACTIVE;
49077diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
49078index 40a9fe9..a3f10cc 100644
49079--- a/drivers/tty/sysrq.c
49080+++ b/drivers/tty/sysrq.c
49081@@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
49082 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
49083 size_t count, loff_t *ppos)
49084 {
49085- if (count) {
49086+ if (count && capable(CAP_SYS_ADMIN)) {
49087 char c;
49088
49089 if (get_user(c, buf))
49090diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
49091index c74a00a..02cf211a 100644
49092--- a/drivers/tty/tty_io.c
49093+++ b/drivers/tty/tty_io.c
49094@@ -3474,7 +3474,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
49095
49096 void tty_default_fops(struct file_operations *fops)
49097 {
49098- *fops = tty_fops;
49099+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
49100 }
49101
49102 /*
49103diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
49104index 6458e11..6cfc218 100644
49105--- a/drivers/tty/tty_ldisc.c
49106+++ b/drivers/tty/tty_ldisc.c
49107@@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
49108 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
49109 tty_ldiscs[disc] = new_ldisc;
49110 new_ldisc->num = disc;
49111- new_ldisc->refcount = 0;
49112+ atomic_set(&new_ldisc->refcount, 0);
49113 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
49114
49115 return ret;
49116@@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
49117 return -EINVAL;
49118
49119 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
49120- if (tty_ldiscs[disc]->refcount)
49121+ if (atomic_read(&tty_ldiscs[disc]->refcount))
49122 ret = -EBUSY;
49123 else
49124 tty_ldiscs[disc] = NULL;
49125@@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
49126 if (ldops) {
49127 ret = ERR_PTR(-EAGAIN);
49128 if (try_module_get(ldops->owner)) {
49129- ldops->refcount++;
49130+ atomic_inc(&ldops->refcount);
49131 ret = ldops;
49132 }
49133 }
49134@@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
49135 unsigned long flags;
49136
49137 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
49138- ldops->refcount--;
49139+ atomic_dec(&ldops->refcount);
49140 module_put(ldops->owner);
49141 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
49142 }
49143diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
49144index f597e88..b7f68ed 100644
49145--- a/drivers/tty/tty_port.c
49146+++ b/drivers/tty/tty_port.c
49147@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
49148 unsigned long flags;
49149
49150 spin_lock_irqsave(&port->lock, flags);
49151- port->count = 0;
49152+ atomic_set(&port->count, 0);
49153 port->flags &= ~ASYNC_NORMAL_ACTIVE;
49154 tty = port->tty;
49155 if (tty)
49156@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
49157 /* The port lock protects the port counts */
49158 spin_lock_irqsave(&port->lock, flags);
49159 if (!tty_hung_up_p(filp))
49160- port->count--;
49161+ atomic_dec(&port->count);
49162 port->blocked_open++;
49163 spin_unlock_irqrestore(&port->lock, flags);
49164
49165@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
49166 we must not mess that up further */
49167 spin_lock_irqsave(&port->lock, flags);
49168 if (!tty_hung_up_p(filp))
49169- port->count++;
49170+ atomic_inc(&port->count);
49171 port->blocked_open--;
49172 if (retval == 0)
49173 port->flags |= ASYNC_NORMAL_ACTIVE;
49174@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
49175 return 0;
49176 }
49177
49178- if (tty->count == 1 && port->count != 1) {
49179+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
49180 printk(KERN_WARNING
49181 "tty_port_close_start: tty->count = 1 port count = %d.\n",
49182- port->count);
49183- port->count = 1;
49184+ atomic_read(&port->count));
49185+ atomic_set(&port->count, 1);
49186 }
49187- if (--port->count < 0) {
49188+ if (atomic_dec_return(&port->count) < 0) {
49189 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
49190- port->count);
49191- port->count = 0;
49192+ atomic_read(&port->count));
49193+ atomic_set(&port->count, 0);
49194 }
49195
49196- if (port->count) {
49197+ if (atomic_read(&port->count)) {
49198 spin_unlock_irqrestore(&port->lock, flags);
49199 if (port->ops->drop)
49200 port->ops->drop(port);
49201@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
49202 {
49203 spin_lock_irq(&port->lock);
49204 if (!tty_hung_up_p(filp))
49205- ++port->count;
49206+ atomic_inc(&port->count);
49207 spin_unlock_irq(&port->lock);
49208 tty_port_tty_set(port, tty);
49209
49210diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
49211index d0e3a44..5f8b754 100644
49212--- a/drivers/tty/vt/keyboard.c
49213+++ b/drivers/tty/vt/keyboard.c
49214@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
49215 kbd->kbdmode == VC_OFF) &&
49216 value != KVAL(K_SAK))
49217 return; /* SAK is allowed even in raw mode */
49218+
49219+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49220+ {
49221+ void *func = fn_handler[value];
49222+ if (func == fn_show_state || func == fn_show_ptregs ||
49223+ func == fn_show_mem)
49224+ return;
49225+ }
49226+#endif
49227+
49228 fn_handler[value](vc);
49229 }
49230
49231@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
49232 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
49233 return -EFAULT;
49234
49235- if (!capable(CAP_SYS_TTY_CONFIG))
49236- perm = 0;
49237-
49238 switch (cmd) {
49239 case KDGKBENT:
49240 /* Ensure another thread doesn't free it under us */
49241@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
49242 spin_unlock_irqrestore(&kbd_event_lock, flags);
49243 return put_user(val, &user_kbe->kb_value);
49244 case KDSKBENT:
49245+ if (!capable(CAP_SYS_TTY_CONFIG))
49246+ perm = 0;
49247+
49248 if (!perm)
49249 return -EPERM;
49250 if (!i && v == K_NOSUCHMAP) {
49251@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
49252 int i, j, k;
49253 int ret;
49254
49255- if (!capable(CAP_SYS_TTY_CONFIG))
49256- perm = 0;
49257-
49258 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
49259 if (!kbs) {
49260 ret = -ENOMEM;
49261@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
49262 kfree(kbs);
49263 return ((p && *p) ? -EOVERFLOW : 0);
49264 case KDSKBSENT:
49265+ if (!capable(CAP_SYS_TTY_CONFIG))
49266+ perm = 0;
49267+
49268 if (!perm) {
49269 ret = -EPERM;
49270 goto reterr;
49271diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
49272index 0e808cf..d7d274b 100644
49273--- a/drivers/uio/uio.c
49274+++ b/drivers/uio/uio.c
49275@@ -25,6 +25,7 @@
49276 #include <linux/kobject.h>
49277 #include <linux/cdev.h>
49278 #include <linux/uio_driver.h>
49279+#include <asm/local.h>
49280
49281 #define UIO_MAX_DEVICES (1U << MINORBITS)
49282
49283@@ -32,7 +33,7 @@ struct uio_device {
49284 struct module *owner;
49285 struct device *dev;
49286 int minor;
49287- atomic_t event;
49288+ atomic_unchecked_t event;
49289 struct fasync_struct *async_queue;
49290 wait_queue_head_t wait;
49291 struct uio_info *info;
49292@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
49293 struct device_attribute *attr, char *buf)
49294 {
49295 struct uio_device *idev = dev_get_drvdata(dev);
49296- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
49297+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
49298 }
49299 static DEVICE_ATTR_RO(event);
49300
49301@@ -401,7 +402,7 @@ void uio_event_notify(struct uio_info *info)
49302 {
49303 struct uio_device *idev = info->uio_dev;
49304
49305- atomic_inc(&idev->event);
49306+ atomic_inc_unchecked(&idev->event);
49307 wake_up_interruptible(&idev->wait);
49308 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
49309 }
49310@@ -454,7 +455,7 @@ static int uio_open(struct inode *inode, struct file *filep)
49311 }
49312
49313 listener->dev = idev;
49314- listener->event_count = atomic_read(&idev->event);
49315+ listener->event_count = atomic_read_unchecked(&idev->event);
49316 filep->private_data = listener;
49317
49318 if (idev->info->open) {
49319@@ -505,7 +506,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
49320 return -EIO;
49321
49322 poll_wait(filep, &idev->wait, wait);
49323- if (listener->event_count != atomic_read(&idev->event))
49324+ if (listener->event_count != atomic_read_unchecked(&idev->event))
49325 return POLLIN | POLLRDNORM;
49326 return 0;
49327 }
49328@@ -530,7 +531,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
49329 do {
49330 set_current_state(TASK_INTERRUPTIBLE);
49331
49332- event_count = atomic_read(&idev->event);
49333+ event_count = atomic_read_unchecked(&idev->event);
49334 if (event_count != listener->event_count) {
49335 if (copy_to_user(buf, &event_count, count))
49336 retval = -EFAULT;
49337@@ -587,9 +588,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
49338 static int uio_find_mem_index(struct vm_area_struct *vma)
49339 {
49340 struct uio_device *idev = vma->vm_private_data;
49341+ unsigned long size;
49342
49343 if (vma->vm_pgoff < MAX_UIO_MAPS) {
49344- if (idev->info->mem[vma->vm_pgoff].size == 0)
49345+ size = idev->info->mem[vma->vm_pgoff].size;
49346+ if (size == 0)
49347+ return -1;
49348+ if (vma->vm_end - vma->vm_start > size)
49349 return -1;
49350 return (int)vma->vm_pgoff;
49351 }
49352@@ -647,6 +652,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
49353 return -EINVAL;
49354 mem = idev->info->mem + mi;
49355
49356+ if (mem->addr & ~PAGE_MASK)
49357+ return -ENODEV;
49358 if (vma->vm_end - vma->vm_start > mem->size)
49359 return -EINVAL;
49360
49361@@ -818,7 +825,7 @@ int __uio_register_device(struct module *owner,
49362 idev->owner = owner;
49363 idev->info = info;
49364 init_waitqueue_head(&idev->wait);
49365- atomic_set(&idev->event, 0);
49366+ atomic_set_unchecked(&idev->event, 0);
49367
49368 ret = uio_get_minor(idev);
49369 if (ret)
49370diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
49371index 8a7eb77..c00402f 100644
49372--- a/drivers/usb/atm/cxacru.c
49373+++ b/drivers/usb/atm/cxacru.c
49374@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
49375 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
49376 if (ret < 2)
49377 return -EINVAL;
49378- if (index < 0 || index > 0x7f)
49379+ if (index > 0x7f)
49380 return -EINVAL;
49381 pos += tmp;
49382
49383diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
49384index 25a7bfc..57f3cf5 100644
49385--- a/drivers/usb/atm/usbatm.c
49386+++ b/drivers/usb/atm/usbatm.c
49387@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49388 if (printk_ratelimit())
49389 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
49390 __func__, vpi, vci);
49391- atomic_inc(&vcc->stats->rx_err);
49392+ atomic_inc_unchecked(&vcc->stats->rx_err);
49393 return;
49394 }
49395
49396@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49397 if (length > ATM_MAX_AAL5_PDU) {
49398 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
49399 __func__, length, vcc);
49400- atomic_inc(&vcc->stats->rx_err);
49401+ atomic_inc_unchecked(&vcc->stats->rx_err);
49402 goto out;
49403 }
49404
49405@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49406 if (sarb->len < pdu_length) {
49407 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
49408 __func__, pdu_length, sarb->len, vcc);
49409- atomic_inc(&vcc->stats->rx_err);
49410+ atomic_inc_unchecked(&vcc->stats->rx_err);
49411 goto out;
49412 }
49413
49414 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
49415 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
49416 __func__, vcc);
49417- atomic_inc(&vcc->stats->rx_err);
49418+ atomic_inc_unchecked(&vcc->stats->rx_err);
49419 goto out;
49420 }
49421
49422@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49423 if (printk_ratelimit())
49424 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
49425 __func__, length);
49426- atomic_inc(&vcc->stats->rx_drop);
49427+ atomic_inc_unchecked(&vcc->stats->rx_drop);
49428 goto out;
49429 }
49430
49431@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49432
49433 vcc->push(vcc, skb);
49434
49435- atomic_inc(&vcc->stats->rx);
49436+ atomic_inc_unchecked(&vcc->stats->rx);
49437 out:
49438 skb_trim(sarb, 0);
49439 }
49440@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
49441 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
49442
49443 usbatm_pop(vcc, skb);
49444- atomic_inc(&vcc->stats->tx);
49445+ atomic_inc_unchecked(&vcc->stats->tx);
49446
49447 skb = skb_dequeue(&instance->sndqueue);
49448 }
49449@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
49450 if (!left--)
49451 return sprintf(page,
49452 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
49453- atomic_read(&atm_dev->stats.aal5.tx),
49454- atomic_read(&atm_dev->stats.aal5.tx_err),
49455- atomic_read(&atm_dev->stats.aal5.rx),
49456- atomic_read(&atm_dev->stats.aal5.rx_err),
49457- atomic_read(&atm_dev->stats.aal5.rx_drop));
49458+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
49459+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
49460+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
49461+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
49462+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
49463
49464 if (!left--) {
49465 if (instance->disconnected)
49466diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
49467index 2a3bbdf..91d72cf 100644
49468--- a/drivers/usb/core/devices.c
49469+++ b/drivers/usb/core/devices.c
49470@@ -126,7 +126,7 @@ static const char format_endpt[] =
49471 * time it gets called.
49472 */
49473 static struct device_connect_event {
49474- atomic_t count;
49475+ atomic_unchecked_t count;
49476 wait_queue_head_t wait;
49477 } device_event = {
49478 .count = ATOMIC_INIT(1),
49479@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
49480
49481 void usbfs_conn_disc_event(void)
49482 {
49483- atomic_add(2, &device_event.count);
49484+ atomic_add_unchecked(2, &device_event.count);
49485 wake_up(&device_event.wait);
49486 }
49487
49488@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
49489
49490 poll_wait(file, &device_event.wait, wait);
49491
49492- event_count = atomic_read(&device_event.count);
49493+ event_count = atomic_read_unchecked(&device_event.count);
49494 if (file->f_version != event_count) {
49495 file->f_version = event_count;
49496 return POLLIN | POLLRDNORM;
49497diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
49498index 71dc5d7..300db0e 100644
49499--- a/drivers/usb/core/devio.c
49500+++ b/drivers/usb/core/devio.c
49501@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
49502 struct dev_state *ps = file->private_data;
49503 struct usb_device *dev = ps->dev;
49504 ssize_t ret = 0;
49505- unsigned len;
49506+ size_t len;
49507 loff_t pos;
49508 int i;
49509
49510@@ -229,16 +229,16 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
49511 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
49512 struct usb_config_descriptor *config =
49513 (struct usb_config_descriptor *)dev->rawdescriptors[i];
49514- unsigned int length = le16_to_cpu(config->wTotalLength);
49515+ size_t length = le16_to_cpu(config->wTotalLength);
49516
49517 if (*ppos < pos + length) {
49518
49519 /* The descriptor may claim to be longer than it
49520 * really is. Here is the actual allocated length. */
49521- unsigned alloclen =
49522+ size_t alloclen =
49523 le16_to_cpu(dev->config[i].desc.wTotalLength);
49524
49525- len = length - (*ppos - pos);
49526+ len = length + pos - *ppos;
49527 if (len > nbytes)
49528 len = nbytes;
49529
49530diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
49531index f20a044..d1059aa 100644
49532--- a/drivers/usb/core/hcd.c
49533+++ b/drivers/usb/core/hcd.c
49534@@ -1552,7 +1552,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
49535 */
49536 usb_get_urb(urb);
49537 atomic_inc(&urb->use_count);
49538- atomic_inc(&urb->dev->urbnum);
49539+ atomic_inc_unchecked(&urb->dev->urbnum);
49540 usbmon_urb_submit(&hcd->self, urb);
49541
49542 /* NOTE requirements on root-hub callers (usbfs and the hub
49543@@ -1579,7 +1579,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
49544 urb->hcpriv = NULL;
49545 INIT_LIST_HEAD(&urb->urb_list);
49546 atomic_dec(&urb->use_count);
49547- atomic_dec(&urb->dev->urbnum);
49548+ atomic_dec_unchecked(&urb->dev->urbnum);
49549 if (atomic_read(&urb->reject))
49550 wake_up(&usb_kill_urb_queue);
49551 usb_put_urb(urb);
49552diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
49553index c5c3667..e54e5cd 100644
49554--- a/drivers/usb/core/hub.c
49555+++ b/drivers/usb/core/hub.c
49556@@ -27,6 +27,7 @@
49557 #include <linux/freezer.h>
49558 #include <linux/random.h>
49559 #include <linux/pm_qos.h>
49560+#include <linux/grsecurity.h>
49561
49562 #include <asm/uaccess.h>
49563 #include <asm/byteorder.h>
49564@@ -4467,6 +4468,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
49565 goto done;
49566 return;
49567 }
49568+
49569+ if (gr_handle_new_usb())
49570+ goto done;
49571+
49572 if (hub_is_superspeed(hub->hdev))
49573 unit_load = 150;
49574 else
49575diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
49576index 82927e1..4993dbf 100644
49577--- a/drivers/usb/core/message.c
49578+++ b/drivers/usb/core/message.c
49579@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
49580 * Return: If successful, the number of bytes transferred. Otherwise, a negative
49581 * error number.
49582 */
49583-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
49584+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
49585 __u8 requesttype, __u16 value, __u16 index, void *data,
49586 __u16 size, int timeout)
49587 {
49588@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
49589 * If successful, 0. Otherwise a negative error number. The number of actual
49590 * bytes transferred will be stored in the @actual_length paramater.
49591 */
49592-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
49593+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
49594 void *data, int len, int *actual_length, int timeout)
49595 {
49596 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
49597@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
49598 * bytes transferred will be stored in the @actual_length paramater.
49599 *
49600 */
49601-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
49602+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
49603 void *data, int len, int *actual_length, int timeout)
49604 {
49605 struct urb *urb;
49606diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
49607index ca516ac..6c36ee4 100644
49608--- a/drivers/usb/core/sysfs.c
49609+++ b/drivers/usb/core/sysfs.c
49610@@ -236,7 +236,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
49611 struct usb_device *udev;
49612
49613 udev = to_usb_device(dev);
49614- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
49615+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
49616 }
49617 static DEVICE_ATTR_RO(urbnum);
49618
49619diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
49620index 0a6ee2e..6f8d7e8 100644
49621--- a/drivers/usb/core/usb.c
49622+++ b/drivers/usb/core/usb.c
49623@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
49624 set_dev_node(&dev->dev, dev_to_node(bus->controller));
49625 dev->state = USB_STATE_ATTACHED;
49626 dev->lpm_disable_count = 1;
49627- atomic_set(&dev->urbnum, 0);
49628+ atomic_set_unchecked(&dev->urbnum, 0);
49629
49630 INIT_LIST_HEAD(&dev->ep0.urb_list);
49631 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
49632diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
49633index 02e44fc..3c4fe64 100644
49634--- a/drivers/usb/dwc3/gadget.c
49635+++ b/drivers/usb/dwc3/gadget.c
49636@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
49637 if (!usb_endpoint_xfer_isoc(desc))
49638 return 0;
49639
49640- memset(&trb_link, 0, sizeof(trb_link));
49641-
49642 /* Link TRB for ISOC. The HWO bit is never reset */
49643 trb_st_hw = &dep->trb_pool[0];
49644
49645diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
49646index 5e29dde..eca992f 100644
49647--- a/drivers/usb/early/ehci-dbgp.c
49648+++ b/drivers/usb/early/ehci-dbgp.c
49649@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
49650
49651 #ifdef CONFIG_KGDB
49652 static struct kgdb_io kgdbdbgp_io_ops;
49653-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
49654+static struct kgdb_io kgdbdbgp_io_ops_console;
49655+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
49656 #else
49657 #define dbgp_kgdb_mode (0)
49658 #endif
49659@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
49660 .write_char = kgdbdbgp_write_char,
49661 };
49662
49663+static struct kgdb_io kgdbdbgp_io_ops_console = {
49664+ .name = "kgdbdbgp",
49665+ .read_char = kgdbdbgp_read_char,
49666+ .write_char = kgdbdbgp_write_char,
49667+ .is_console = 1
49668+};
49669+
49670 static int kgdbdbgp_wait_time;
49671
49672 static int __init kgdbdbgp_parse_config(char *str)
49673@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
49674 ptr++;
49675 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
49676 }
49677- kgdb_register_io_module(&kgdbdbgp_io_ops);
49678- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
49679+ if (early_dbgp_console.index != -1)
49680+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
49681+ else
49682+ kgdb_register_io_module(&kgdbdbgp_io_ops);
49683
49684 return 0;
49685 }
49686diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
49687index b369292..9f3ba40 100644
49688--- a/drivers/usb/gadget/u_serial.c
49689+++ b/drivers/usb/gadget/u_serial.c
49690@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
49691 spin_lock_irq(&port->port_lock);
49692
49693 /* already open? Great. */
49694- if (port->port.count) {
49695+ if (atomic_read(&port->port.count)) {
49696 status = 0;
49697- port->port.count++;
49698+ atomic_inc(&port->port.count);
49699
49700 /* currently opening/closing? wait ... */
49701 } else if (port->openclose) {
49702@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
49703 tty->driver_data = port;
49704 port->port.tty = tty;
49705
49706- port->port.count = 1;
49707+ atomic_set(&port->port.count, 1);
49708 port->openclose = false;
49709
49710 /* if connected, start the I/O stream */
49711@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
49712
49713 spin_lock_irq(&port->port_lock);
49714
49715- if (port->port.count != 1) {
49716- if (port->port.count == 0)
49717+ if (atomic_read(&port->port.count) != 1) {
49718+ if (atomic_read(&port->port.count) == 0)
49719 WARN_ON(1);
49720 else
49721- --port->port.count;
49722+ atomic_dec(&port->port.count);
49723 goto exit;
49724 }
49725
49726@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
49727 * and sleep if necessary
49728 */
49729 port->openclose = true;
49730- port->port.count = 0;
49731+ atomic_set(&port->port.count, 0);
49732
49733 gser = port->port_usb;
49734 if (gser && gser->disconnect)
49735@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
49736 int cond;
49737
49738 spin_lock_irq(&port->port_lock);
49739- cond = (port->port.count == 0) && !port->openclose;
49740+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
49741 spin_unlock_irq(&port->port_lock);
49742 return cond;
49743 }
49744@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
49745 /* if it's already open, start I/O ... and notify the serial
49746 * protocol about open/close status (connect/disconnect).
49747 */
49748- if (port->port.count) {
49749+ if (atomic_read(&port->port.count)) {
49750 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
49751 gs_start_io(port);
49752 if (gser->connect)
49753@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
49754
49755 port->port_usb = NULL;
49756 gser->ioport = NULL;
49757- if (port->port.count > 0 || port->openclose) {
49758+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
49759 wake_up_interruptible(&port->drain_wait);
49760 if (port->port.tty)
49761 tty_hangup(port->port.tty);
49762@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
49763
49764 /* finally, free any unused/unusable I/O buffers */
49765 spin_lock_irqsave(&port->port_lock, flags);
49766- if (port->port.count == 0 && !port->openclose)
49767+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
49768 gs_buf_free(&port->port_write_buf);
49769 gs_free_requests(gser->out, &port->read_pool, NULL);
49770 gs_free_requests(gser->out, &port->read_queue, NULL);
49771diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
49772index 835fc08..f8b22bf 100644
49773--- a/drivers/usb/host/ehci-hub.c
49774+++ b/drivers/usb/host/ehci-hub.c
49775@@ -762,7 +762,7 @@ static struct urb *request_single_step_set_feature_urb(
49776 urb->transfer_flags = URB_DIR_IN;
49777 usb_get_urb(urb);
49778 atomic_inc(&urb->use_count);
49779- atomic_inc(&urb->dev->urbnum);
49780+ atomic_inc_unchecked(&urb->dev->urbnum);
49781 urb->setup_dma = dma_map_single(
49782 hcd->self.controller,
49783 urb->setup_packet,
49784@@ -829,7 +829,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
49785 urb->status = -EINPROGRESS;
49786 usb_get_urb(urb);
49787 atomic_inc(&urb->use_count);
49788- atomic_inc(&urb->dev->urbnum);
49789+ atomic_inc_unchecked(&urb->dev->urbnum);
49790 retval = submit_single_step_set_feature(hcd, urb, 0);
49791 if (!retval && !wait_for_completion_timeout(&done,
49792 msecs_to_jiffies(2000))) {
49793diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
49794index ba6a5d6..f88f7f3 100644
49795--- a/drivers/usb/misc/appledisplay.c
49796+++ b/drivers/usb/misc/appledisplay.c
49797@@ -83,7 +83,7 @@ struct appledisplay {
49798 spinlock_t lock;
49799 };
49800
49801-static atomic_t count_displays = ATOMIC_INIT(0);
49802+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
49803 static struct workqueue_struct *wq;
49804
49805 static void appledisplay_complete(struct urb *urb)
49806@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
49807
49808 /* Register backlight device */
49809 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
49810- atomic_inc_return(&count_displays) - 1);
49811+ atomic_inc_return_unchecked(&count_displays) - 1);
49812 memset(&props, 0, sizeof(struct backlight_properties));
49813 props.type = BACKLIGHT_RAW;
49814 props.max_brightness = 0xff;
49815diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
49816index c69bb50..215ef37 100644
49817--- a/drivers/usb/serial/console.c
49818+++ b/drivers/usb/serial/console.c
49819@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
49820
49821 info->port = port;
49822
49823- ++port->port.count;
49824+ atomic_inc(&port->port.count);
49825 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
49826 if (serial->type->set_termios) {
49827 /*
49828@@ -170,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
49829 }
49830 /* Now that any required fake tty operations are completed restore
49831 * the tty port count */
49832- --port->port.count;
49833+ atomic_dec(&port->port.count);
49834 /* The console is special in terms of closing the device so
49835 * indicate this port is now acting as a system console. */
49836 port->port.console = 1;
49837@@ -183,7 +183,7 @@ static int usb_console_setup(struct console *co, char *options)
49838 free_tty:
49839 kfree(tty);
49840 reset_open_count:
49841- port->port.count = 0;
49842+ atomic_set(&port->port.count, 0);
49843 usb_autopm_put_interface(serial->interface);
49844 error_get_interface:
49845 usb_serial_put(serial);
49846@@ -194,7 +194,7 @@ static int usb_console_setup(struct console *co, char *options)
49847 static void usb_console_write(struct console *co,
49848 const char *buf, unsigned count)
49849 {
49850- static struct usbcons_info *info = &usbcons_info;
49851+ struct usbcons_info *info = &usbcons_info;
49852 struct usb_serial_port *port = info->port;
49853 struct usb_serial *serial;
49854 int retval = -ENODEV;
49855diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
49856index 75f70f0..d467e1a 100644
49857--- a/drivers/usb/storage/usb.h
49858+++ b/drivers/usb/storage/usb.h
49859@@ -63,7 +63,7 @@ struct us_unusual_dev {
49860 __u8 useProtocol;
49861 __u8 useTransport;
49862 int (*initFunction)(struct us_data *);
49863-};
49864+} __do_const;
49865
49866
49867 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
49868diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
49869index cf250c2..ad9d904 100644
49870--- a/drivers/usb/wusbcore/wa-hc.h
49871+++ b/drivers/usb/wusbcore/wa-hc.h
49872@@ -199,7 +199,7 @@ struct wahc {
49873 spinlock_t xfer_list_lock;
49874 struct work_struct xfer_enqueue_work;
49875 struct work_struct xfer_error_work;
49876- atomic_t xfer_id_count;
49877+ atomic_unchecked_t xfer_id_count;
49878 };
49879
49880
49881@@ -255,7 +255,7 @@ static inline void wa_init(struct wahc *wa)
49882 spin_lock_init(&wa->xfer_list_lock);
49883 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
49884 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
49885- atomic_set(&wa->xfer_id_count, 1);
49886+ atomic_set_unchecked(&wa->xfer_id_count, 1);
49887 }
49888
49889 /**
49890diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
49891index 3dcf66f..8faaf6e 100644
49892--- a/drivers/usb/wusbcore/wa-xfer.c
49893+++ b/drivers/usb/wusbcore/wa-xfer.c
49894@@ -300,7 +300,7 @@ out:
49895 */
49896 static void wa_xfer_id_init(struct wa_xfer *xfer)
49897 {
49898- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
49899+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
49900 }
49901
49902 /*
49903diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
49904index 1eab4ac..e21efc9 100644
49905--- a/drivers/vfio/vfio.c
49906+++ b/drivers/vfio/vfio.c
49907@@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
49908 return 0;
49909
49910 /* TODO Prevent device auto probing */
49911- WARN("Device %s added to live group %d!\n", dev_name(dev),
49912+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
49913 iommu_group_id(group->iommu_group));
49914
49915 return 0;
49916diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
49917index 5174eba..86e764a 100644
49918--- a/drivers/vhost/vringh.c
49919+++ b/drivers/vhost/vringh.c
49920@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
49921
49922 static inline int putu16_kern(u16 *p, u16 val)
49923 {
49924- ACCESS_ONCE(*p) = val;
49925+ ACCESS_ONCE_RW(*p) = val;
49926 return 0;
49927 }
49928
49929diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
49930index e43401a..dd49b3f 100644
49931--- a/drivers/video/arcfb.c
49932+++ b/drivers/video/arcfb.c
49933@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
49934 return -ENOSPC;
49935
49936 err = 0;
49937- if ((count + p) > fbmemlength) {
49938+ if (count > (fbmemlength - p)) {
49939 count = fbmemlength - p;
49940 err = -ENOSPC;
49941 }
49942diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
49943index a4dfe8c..297ddd9 100644
49944--- a/drivers/video/aty/aty128fb.c
49945+++ b/drivers/video/aty/aty128fb.c
49946@@ -149,7 +149,7 @@ enum {
49947 };
49948
49949 /* Must match above enum */
49950-static char * const r128_family[] = {
49951+static const char * const r128_family[] = {
49952 "AGP",
49953 "PCI",
49954 "PRO AGP",
49955diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
49956index 9b0f12c..024673d 100644
49957--- a/drivers/video/aty/atyfb_base.c
49958+++ b/drivers/video/aty/atyfb_base.c
49959@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
49960 par->accel_flags = var->accel_flags; /* hack */
49961
49962 if (var->accel_flags) {
49963- info->fbops->fb_sync = atyfb_sync;
49964+ pax_open_kernel();
49965+ *(void **)&info->fbops->fb_sync = atyfb_sync;
49966+ pax_close_kernel();
49967 info->flags &= ~FBINFO_HWACCEL_DISABLED;
49968 } else {
49969- info->fbops->fb_sync = NULL;
49970+ pax_open_kernel();
49971+ *(void **)&info->fbops->fb_sync = NULL;
49972+ pax_close_kernel();
49973 info->flags |= FBINFO_HWACCEL_DISABLED;
49974 }
49975
49976diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
49977index 95ec042..e6affdd 100644
49978--- a/drivers/video/aty/mach64_cursor.c
49979+++ b/drivers/video/aty/mach64_cursor.c
49980@@ -7,6 +7,7 @@
49981 #include <linux/string.h>
49982
49983 #include <asm/io.h>
49984+#include <asm/pgtable.h>
49985
49986 #ifdef __sparc__
49987 #include <asm/fbio.h>
49988@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
49989 info->sprite.buf_align = 16; /* and 64 lines tall. */
49990 info->sprite.flags = FB_PIXMAP_IO;
49991
49992- info->fbops->fb_cursor = atyfb_cursor;
49993+ pax_open_kernel();
49994+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
49995+ pax_close_kernel();
49996
49997 return 0;
49998 }
49999diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
50000index bca6ccc..252107e 100644
50001--- a/drivers/video/backlight/kb3886_bl.c
50002+++ b/drivers/video/backlight/kb3886_bl.c
50003@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
50004 static unsigned long kb3886bl_flags;
50005 #define KB3886BL_SUSPENDED 0x01
50006
50007-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
50008+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
50009 {
50010 .ident = "Sahara Touch-iT",
50011 .matches = {
50012diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
50013index 900aa4e..6d49418 100644
50014--- a/drivers/video/fb_defio.c
50015+++ b/drivers/video/fb_defio.c
50016@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
50017
50018 BUG_ON(!fbdefio);
50019 mutex_init(&fbdefio->lock);
50020- info->fbops->fb_mmap = fb_deferred_io_mmap;
50021+ pax_open_kernel();
50022+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
50023+ pax_close_kernel();
50024 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
50025 INIT_LIST_HEAD(&fbdefio->pagelist);
50026 if (fbdefio->delay == 0) /* set a default of 1 s */
50027@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
50028 page->mapping = NULL;
50029 }
50030
50031- info->fbops->fb_mmap = NULL;
50032+ *(void **)&info->fbops->fb_mmap = NULL;
50033 mutex_destroy(&fbdefio->lock);
50034 }
50035 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
50036diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
50037index dacaf74..8478a46 100644
50038--- a/drivers/video/fbmem.c
50039+++ b/drivers/video/fbmem.c
50040@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
50041 image->dx += image->width + 8;
50042 }
50043 } else if (rotate == FB_ROTATE_UD) {
50044- for (x = 0; x < num && image->dx >= 0; x++) {
50045+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
50046 info->fbops->fb_imageblit(info, image);
50047 image->dx -= image->width + 8;
50048 }
50049@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
50050 image->dy += image->height + 8;
50051 }
50052 } else if (rotate == FB_ROTATE_CCW) {
50053- for (x = 0; x < num && image->dy >= 0; x++) {
50054+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
50055 info->fbops->fb_imageblit(info, image);
50056 image->dy -= image->height + 8;
50057 }
50058@@ -1175,7 +1175,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
50059 return -EFAULT;
50060 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
50061 return -EINVAL;
50062- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
50063+ if (con2fb.framebuffer >= FB_MAX)
50064 return -EINVAL;
50065 if (!registered_fb[con2fb.framebuffer])
50066 request_module("fb%d", con2fb.framebuffer);
50067diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
50068index 8d456dc..b4fa44b 100644
50069--- a/drivers/video/hyperv_fb.c
50070+++ b/drivers/video/hyperv_fb.c
50071@@ -233,7 +233,7 @@ static uint screen_fb_size;
50072 static inline int synthvid_send(struct hv_device *hdev,
50073 struct synthvid_msg *msg)
50074 {
50075- static atomic64_t request_id = ATOMIC64_INIT(0);
50076+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
50077 int ret;
50078
50079 msg->pipe_hdr.type = PIPE_MSG_DATA;
50080@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
50081
50082 ret = vmbus_sendpacket(hdev->channel, msg,
50083 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
50084- atomic64_inc_return(&request_id),
50085+ atomic64_inc_return_unchecked(&request_id),
50086 VM_PKT_DATA_INBAND, 0);
50087
50088 if (ret)
50089diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
50090index 7672d2e..b56437f 100644
50091--- a/drivers/video/i810/i810_accel.c
50092+++ b/drivers/video/i810/i810_accel.c
50093@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
50094 }
50095 }
50096 printk("ringbuffer lockup!!!\n");
50097+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
50098 i810_report_error(mmio);
50099 par->dev_flags |= LOCKUP;
50100 info->pixmap.scan_align = 1;
50101diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
50102index 3c14e43..2630570 100644
50103--- a/drivers/video/logo/logo_linux_clut224.ppm
50104+++ b/drivers/video/logo/logo_linux_clut224.ppm
50105@@ -2,1603 +2,1123 @@ P3
50106 # Standard 224-color Linux logo
50107 80 80
50108 255
50109- 0 0 0 0 0 0 0 0 0 0 0 0
50110- 0 0 0 0 0 0 0 0 0 0 0 0
50111- 0 0 0 0 0 0 0 0 0 0 0 0
50112- 0 0 0 0 0 0 0 0 0 0 0 0
50113- 0 0 0 0 0 0 0 0 0 0 0 0
50114- 0 0 0 0 0 0 0 0 0 0 0 0
50115- 0 0 0 0 0 0 0 0 0 0 0 0
50116- 0 0 0 0 0 0 0 0 0 0 0 0
50117- 0 0 0 0 0 0 0 0 0 0 0 0
50118- 6 6 6 6 6 6 10 10 10 10 10 10
50119- 10 10 10 6 6 6 6 6 6 6 6 6
50120- 0 0 0 0 0 0 0 0 0 0 0 0
50121- 0 0 0 0 0 0 0 0 0 0 0 0
50122- 0 0 0 0 0 0 0 0 0 0 0 0
50123- 0 0 0 0 0 0 0 0 0 0 0 0
50124- 0 0 0 0 0 0 0 0 0 0 0 0
50125- 0 0 0 0 0 0 0 0 0 0 0 0
50126- 0 0 0 0 0 0 0 0 0 0 0 0
50127- 0 0 0 0 0 0 0 0 0 0 0 0
50128- 0 0 0 0 0 0 0 0 0 0 0 0
50129- 0 0 0 0 0 0 0 0 0 0 0 0
50130- 0 0 0 0 0 0 0 0 0 0 0 0
50131- 0 0 0 0 0 0 0 0 0 0 0 0
50132- 0 0 0 0 0 0 0 0 0 0 0 0
50133- 0 0 0 0 0 0 0 0 0 0 0 0
50134- 0 0 0 0 0 0 0 0 0 0 0 0
50135- 0 0 0 0 0 0 0 0 0 0 0 0
50136- 0 0 0 0 0 0 0 0 0 0 0 0
50137- 0 0 0 6 6 6 10 10 10 14 14 14
50138- 22 22 22 26 26 26 30 30 30 34 34 34
50139- 30 30 30 30 30 30 26 26 26 18 18 18
50140- 14 14 14 10 10 10 6 6 6 0 0 0
50141- 0 0 0 0 0 0 0 0 0 0 0 0
50142- 0 0 0 0 0 0 0 0 0 0 0 0
50143- 0 0 0 0 0 0 0 0 0 0 0 0
50144- 0 0 0 0 0 0 0 0 0 0 0 0
50145- 0 0 0 0 0 0 0 0 0 0 0 0
50146- 0 0 0 0 0 0 0 0 0 0 0 0
50147- 0 0 0 0 0 0 0 0 0 0 0 0
50148- 0 0 0 0 0 0 0 0 0 0 0 0
50149- 0 0 0 0 0 0 0 0 0 0 0 0
50150- 0 0 0 0 0 1 0 0 1 0 0 0
50151- 0 0 0 0 0 0 0 0 0 0 0 0
50152- 0 0 0 0 0 0 0 0 0 0 0 0
50153- 0 0 0 0 0 0 0 0 0 0 0 0
50154- 0 0 0 0 0 0 0 0 0 0 0 0
50155- 0 0 0 0 0 0 0 0 0 0 0 0
50156- 0 0 0 0 0 0 0 0 0 0 0 0
50157- 6 6 6 14 14 14 26 26 26 42 42 42
50158- 54 54 54 66 66 66 78 78 78 78 78 78
50159- 78 78 78 74 74 74 66 66 66 54 54 54
50160- 42 42 42 26 26 26 18 18 18 10 10 10
50161- 6 6 6 0 0 0 0 0 0 0 0 0
50162- 0 0 0 0 0 0 0 0 0 0 0 0
50163- 0 0 0 0 0 0 0 0 0 0 0 0
50164- 0 0 0 0 0 0 0 0 0 0 0 0
50165- 0 0 0 0 0 0 0 0 0 0 0 0
50166- 0 0 0 0 0 0 0 0 0 0 0 0
50167- 0 0 0 0 0 0 0 0 0 0 0 0
50168- 0 0 0 0 0 0 0 0 0 0 0 0
50169- 0 0 0 0 0 0 0 0 0 0 0 0
50170- 0 0 1 0 0 0 0 0 0 0 0 0
50171- 0 0 0 0 0 0 0 0 0 0 0 0
50172- 0 0 0 0 0 0 0 0 0 0 0 0
50173- 0 0 0 0 0 0 0 0 0 0 0 0
50174- 0 0 0 0 0 0 0 0 0 0 0 0
50175- 0 0 0 0 0 0 0 0 0 0 0 0
50176- 0 0 0 0 0 0 0 0 0 10 10 10
50177- 22 22 22 42 42 42 66 66 66 86 86 86
50178- 66 66 66 38 38 38 38 38 38 22 22 22
50179- 26 26 26 34 34 34 54 54 54 66 66 66
50180- 86 86 86 70 70 70 46 46 46 26 26 26
50181- 14 14 14 6 6 6 0 0 0 0 0 0
50182- 0 0 0 0 0 0 0 0 0 0 0 0
50183- 0 0 0 0 0 0 0 0 0 0 0 0
50184- 0 0 0 0 0 0 0 0 0 0 0 0
50185- 0 0 0 0 0 0 0 0 0 0 0 0
50186- 0 0 0 0 0 0 0 0 0 0 0 0
50187- 0 0 0 0 0 0 0 0 0 0 0 0
50188- 0 0 0 0 0 0 0 0 0 0 0 0
50189- 0 0 0 0 0 0 0 0 0 0 0 0
50190- 0 0 1 0 0 1 0 0 1 0 0 0
50191- 0 0 0 0 0 0 0 0 0 0 0 0
50192- 0 0 0 0 0 0 0 0 0 0 0 0
50193- 0 0 0 0 0 0 0 0 0 0 0 0
50194- 0 0 0 0 0 0 0 0 0 0 0 0
50195- 0 0 0 0 0 0 0 0 0 0 0 0
50196- 0 0 0 0 0 0 10 10 10 26 26 26
50197- 50 50 50 82 82 82 58 58 58 6 6 6
50198- 2 2 6 2 2 6 2 2 6 2 2 6
50199- 2 2 6 2 2 6 2 2 6 2 2 6
50200- 6 6 6 54 54 54 86 86 86 66 66 66
50201- 38 38 38 18 18 18 6 6 6 0 0 0
50202- 0 0 0 0 0 0 0 0 0 0 0 0
50203- 0 0 0 0 0 0 0 0 0 0 0 0
50204- 0 0 0 0 0 0 0 0 0 0 0 0
50205- 0 0 0 0 0 0 0 0 0 0 0 0
50206- 0 0 0 0 0 0 0 0 0 0 0 0
50207- 0 0 0 0 0 0 0 0 0 0 0 0
50208- 0 0 0 0 0 0 0 0 0 0 0 0
50209- 0 0 0 0 0 0 0 0 0 0 0 0
50210- 0 0 0 0 0 0 0 0 0 0 0 0
50211- 0 0 0 0 0 0 0 0 0 0 0 0
50212- 0 0 0 0 0 0 0 0 0 0 0 0
50213- 0 0 0 0 0 0 0 0 0 0 0 0
50214- 0 0 0 0 0 0 0 0 0 0 0 0
50215- 0 0 0 0 0 0 0 0 0 0 0 0
50216- 0 0 0 6 6 6 22 22 22 50 50 50
50217- 78 78 78 34 34 34 2 2 6 2 2 6
50218- 2 2 6 2 2 6 2 2 6 2 2 6
50219- 2 2 6 2 2 6 2 2 6 2 2 6
50220- 2 2 6 2 2 6 6 6 6 70 70 70
50221- 78 78 78 46 46 46 22 22 22 6 6 6
50222- 0 0 0 0 0 0 0 0 0 0 0 0
50223- 0 0 0 0 0 0 0 0 0 0 0 0
50224- 0 0 0 0 0 0 0 0 0 0 0 0
50225- 0 0 0 0 0 0 0 0 0 0 0 0
50226- 0 0 0 0 0 0 0 0 0 0 0 0
50227- 0 0 0 0 0 0 0 0 0 0 0 0
50228- 0 0 0 0 0 0 0 0 0 0 0 0
50229- 0 0 0 0 0 0 0 0 0 0 0 0
50230- 0 0 1 0 0 1 0 0 1 0 0 0
50231- 0 0 0 0 0 0 0 0 0 0 0 0
50232- 0 0 0 0 0 0 0 0 0 0 0 0
50233- 0 0 0 0 0 0 0 0 0 0 0 0
50234- 0 0 0 0 0 0 0 0 0 0 0 0
50235- 0 0 0 0 0 0 0 0 0 0 0 0
50236- 6 6 6 18 18 18 42 42 42 82 82 82
50237- 26 26 26 2 2 6 2 2 6 2 2 6
50238- 2 2 6 2 2 6 2 2 6 2 2 6
50239- 2 2 6 2 2 6 2 2 6 14 14 14
50240- 46 46 46 34 34 34 6 6 6 2 2 6
50241- 42 42 42 78 78 78 42 42 42 18 18 18
50242- 6 6 6 0 0 0 0 0 0 0 0 0
50243- 0 0 0 0 0 0 0 0 0 0 0 0
50244- 0 0 0 0 0 0 0 0 0 0 0 0
50245- 0 0 0 0 0 0 0 0 0 0 0 0
50246- 0 0 0 0 0 0 0 0 0 0 0 0
50247- 0 0 0 0 0 0 0 0 0 0 0 0
50248- 0 0 0 0 0 0 0 0 0 0 0 0
50249- 0 0 0 0 0 0 0 0 0 0 0 0
50250- 0 0 1 0 0 0 0 0 1 0 0 0
50251- 0 0 0 0 0 0 0 0 0 0 0 0
50252- 0 0 0 0 0 0 0 0 0 0 0 0
50253- 0 0 0 0 0 0 0 0 0 0 0 0
50254- 0 0 0 0 0 0 0 0 0 0 0 0
50255- 0 0 0 0 0 0 0 0 0 0 0 0
50256- 10 10 10 30 30 30 66 66 66 58 58 58
50257- 2 2 6 2 2 6 2 2 6 2 2 6
50258- 2 2 6 2 2 6 2 2 6 2 2 6
50259- 2 2 6 2 2 6 2 2 6 26 26 26
50260- 86 86 86 101 101 101 46 46 46 10 10 10
50261- 2 2 6 58 58 58 70 70 70 34 34 34
50262- 10 10 10 0 0 0 0 0 0 0 0 0
50263- 0 0 0 0 0 0 0 0 0 0 0 0
50264- 0 0 0 0 0 0 0 0 0 0 0 0
50265- 0 0 0 0 0 0 0 0 0 0 0 0
50266- 0 0 0 0 0 0 0 0 0 0 0 0
50267- 0 0 0 0 0 0 0 0 0 0 0 0
50268- 0 0 0 0 0 0 0 0 0 0 0 0
50269- 0 0 0 0 0 0 0 0 0 0 0 0
50270- 0 0 1 0 0 1 0 0 1 0 0 0
50271- 0 0 0 0 0 0 0 0 0 0 0 0
50272- 0 0 0 0 0 0 0 0 0 0 0 0
50273- 0 0 0 0 0 0 0 0 0 0 0 0
50274- 0 0 0 0 0 0 0 0 0 0 0 0
50275- 0 0 0 0 0 0 0 0 0 0 0 0
50276- 14 14 14 42 42 42 86 86 86 10 10 10
50277- 2 2 6 2 2 6 2 2 6 2 2 6
50278- 2 2 6 2 2 6 2 2 6 2 2 6
50279- 2 2 6 2 2 6 2 2 6 30 30 30
50280- 94 94 94 94 94 94 58 58 58 26 26 26
50281- 2 2 6 6 6 6 78 78 78 54 54 54
50282- 22 22 22 6 6 6 0 0 0 0 0 0
50283- 0 0 0 0 0 0 0 0 0 0 0 0
50284- 0 0 0 0 0 0 0 0 0 0 0 0
50285- 0 0 0 0 0 0 0 0 0 0 0 0
50286- 0 0 0 0 0 0 0 0 0 0 0 0
50287- 0 0 0 0 0 0 0 0 0 0 0 0
50288- 0 0 0 0 0 0 0 0 0 0 0 0
50289- 0 0 0 0 0 0 0 0 0 0 0 0
50290- 0 0 0 0 0 0 0 0 0 0 0 0
50291- 0 0 0 0 0 0 0 0 0 0 0 0
50292- 0 0 0 0 0 0 0 0 0 0 0 0
50293- 0 0 0 0 0 0 0 0 0 0 0 0
50294- 0 0 0 0 0 0 0 0 0 0 0 0
50295- 0 0 0 0 0 0 0 0 0 6 6 6
50296- 22 22 22 62 62 62 62 62 62 2 2 6
50297- 2 2 6 2 2 6 2 2 6 2 2 6
50298- 2 2 6 2 2 6 2 2 6 2 2 6
50299- 2 2 6 2 2 6 2 2 6 26 26 26
50300- 54 54 54 38 38 38 18 18 18 10 10 10
50301- 2 2 6 2 2 6 34 34 34 82 82 82
50302- 38 38 38 14 14 14 0 0 0 0 0 0
50303- 0 0 0 0 0 0 0 0 0 0 0 0
50304- 0 0 0 0 0 0 0 0 0 0 0 0
50305- 0 0 0 0 0 0 0 0 0 0 0 0
50306- 0 0 0 0 0 0 0 0 0 0 0 0
50307- 0 0 0 0 0 0 0 0 0 0 0 0
50308- 0 0 0 0 0 0 0 0 0 0 0 0
50309- 0 0 0 0 0 0 0 0 0 0 0 0
50310- 0 0 0 0 0 1 0 0 1 0 0 0
50311- 0 0 0 0 0 0 0 0 0 0 0 0
50312- 0 0 0 0 0 0 0 0 0 0 0 0
50313- 0 0 0 0 0 0 0 0 0 0 0 0
50314- 0 0 0 0 0 0 0 0 0 0 0 0
50315- 0 0 0 0 0 0 0 0 0 6 6 6
50316- 30 30 30 78 78 78 30 30 30 2 2 6
50317- 2 2 6 2 2 6 2 2 6 2 2 6
50318- 2 2 6 2 2 6 2 2 6 2 2 6
50319- 2 2 6 2 2 6 2 2 6 10 10 10
50320- 10 10 10 2 2 6 2 2 6 2 2 6
50321- 2 2 6 2 2 6 2 2 6 78 78 78
50322- 50 50 50 18 18 18 6 6 6 0 0 0
50323- 0 0 0 0 0 0 0 0 0 0 0 0
50324- 0 0 0 0 0 0 0 0 0 0 0 0
50325- 0 0 0 0 0 0 0 0 0 0 0 0
50326- 0 0 0 0 0 0 0 0 0 0 0 0
50327- 0 0 0 0 0 0 0 0 0 0 0 0
50328- 0 0 0 0 0 0 0 0 0 0 0 0
50329- 0 0 0 0 0 0 0 0 0 0 0 0
50330- 0 0 1 0 0 0 0 0 0 0 0 0
50331- 0 0 0 0 0 0 0 0 0 0 0 0
50332- 0 0 0 0 0 0 0 0 0 0 0 0
50333- 0 0 0 0 0 0 0 0 0 0 0 0
50334- 0 0 0 0 0 0 0 0 0 0 0 0
50335- 0 0 0 0 0 0 0 0 0 10 10 10
50336- 38 38 38 86 86 86 14 14 14 2 2 6
50337- 2 2 6 2 2 6 2 2 6 2 2 6
50338- 2 2 6 2 2 6 2 2 6 2 2 6
50339- 2 2 6 2 2 6 2 2 6 2 2 6
50340- 2 2 6 2 2 6 2 2 6 2 2 6
50341- 2 2 6 2 2 6 2 2 6 54 54 54
50342- 66 66 66 26 26 26 6 6 6 0 0 0
50343- 0 0 0 0 0 0 0 0 0 0 0 0
50344- 0 0 0 0 0 0 0 0 0 0 0 0
50345- 0 0 0 0 0 0 0 0 0 0 0 0
50346- 0 0 0 0 0 0 0 0 0 0 0 0
50347- 0 0 0 0 0 0 0 0 0 0 0 0
50348- 0 0 0 0 0 0 0 0 0 0 0 0
50349- 0 0 0 0 0 0 0 0 0 0 0 0
50350- 0 0 0 0 0 1 0 0 1 0 0 0
50351- 0 0 0 0 0 0 0 0 0 0 0 0
50352- 0 0 0 0 0 0 0 0 0 0 0 0
50353- 0 0 0 0 0 0 0 0 0 0 0 0
50354- 0 0 0 0 0 0 0 0 0 0 0 0
50355- 0 0 0 0 0 0 0 0 0 14 14 14
50356- 42 42 42 82 82 82 2 2 6 2 2 6
50357- 2 2 6 6 6 6 10 10 10 2 2 6
50358- 2 2 6 2 2 6 2 2 6 2 2 6
50359- 2 2 6 2 2 6 2 2 6 6 6 6
50360- 14 14 14 10 10 10 2 2 6 2 2 6
50361- 2 2 6 2 2 6 2 2 6 18 18 18
50362- 82 82 82 34 34 34 10 10 10 0 0 0
50363- 0 0 0 0 0 0 0 0 0 0 0 0
50364- 0 0 0 0 0 0 0 0 0 0 0 0
50365- 0 0 0 0 0 0 0 0 0 0 0 0
50366- 0 0 0 0 0 0 0 0 0 0 0 0
50367- 0 0 0 0 0 0 0 0 0 0 0 0
50368- 0 0 0 0 0 0 0 0 0 0 0 0
50369- 0 0 0 0 0 0 0 0 0 0 0 0
50370- 0 0 1 0 0 0 0 0 0 0 0 0
50371- 0 0 0 0 0 0 0 0 0 0 0 0
50372- 0 0 0 0 0 0 0 0 0 0 0 0
50373- 0 0 0 0 0 0 0 0 0 0 0 0
50374- 0 0 0 0 0 0 0 0 0 0 0 0
50375- 0 0 0 0 0 0 0 0 0 14 14 14
50376- 46 46 46 86 86 86 2 2 6 2 2 6
50377- 6 6 6 6 6 6 22 22 22 34 34 34
50378- 6 6 6 2 2 6 2 2 6 2 2 6
50379- 2 2 6 2 2 6 18 18 18 34 34 34
50380- 10 10 10 50 50 50 22 22 22 2 2 6
50381- 2 2 6 2 2 6 2 2 6 10 10 10
50382- 86 86 86 42 42 42 14 14 14 0 0 0
50383- 0 0 0 0 0 0 0 0 0 0 0 0
50384- 0 0 0 0 0 0 0 0 0 0 0 0
50385- 0 0 0 0 0 0 0 0 0 0 0 0
50386- 0 0 0 0 0 0 0 0 0 0 0 0
50387- 0 0 0 0 0 0 0 0 0 0 0 0
50388- 0 0 0 0 0 0 0 0 0 0 0 0
50389- 0 0 0 0 0 0 0 0 0 0 0 0
50390- 0 0 1 0 0 1 0 0 1 0 0 0
50391- 0 0 0 0 0 0 0 0 0 0 0 0
50392- 0 0 0 0 0 0 0 0 0 0 0 0
50393- 0 0 0 0 0 0 0 0 0 0 0 0
50394- 0 0 0 0 0 0 0 0 0 0 0 0
50395- 0 0 0 0 0 0 0 0 0 14 14 14
50396- 46 46 46 86 86 86 2 2 6 2 2 6
50397- 38 38 38 116 116 116 94 94 94 22 22 22
50398- 22 22 22 2 2 6 2 2 6 2 2 6
50399- 14 14 14 86 86 86 138 138 138 162 162 162
50400-154 154 154 38 38 38 26 26 26 6 6 6
50401- 2 2 6 2 2 6 2 2 6 2 2 6
50402- 86 86 86 46 46 46 14 14 14 0 0 0
50403- 0 0 0 0 0 0 0 0 0 0 0 0
50404- 0 0 0 0 0 0 0 0 0 0 0 0
50405- 0 0 0 0 0 0 0 0 0 0 0 0
50406- 0 0 0 0 0 0 0 0 0 0 0 0
50407- 0 0 0 0 0 0 0 0 0 0 0 0
50408- 0 0 0 0 0 0 0 0 0 0 0 0
50409- 0 0 0 0 0 0 0 0 0 0 0 0
50410- 0 0 0 0 0 0 0 0 0 0 0 0
50411- 0 0 0 0 0 0 0 0 0 0 0 0
50412- 0 0 0 0 0 0 0 0 0 0 0 0
50413- 0 0 0 0 0 0 0 0 0 0 0 0
50414- 0 0 0 0 0 0 0 0 0 0 0 0
50415- 0 0 0 0 0 0 0 0 0 14 14 14
50416- 46 46 46 86 86 86 2 2 6 14 14 14
50417-134 134 134 198 198 198 195 195 195 116 116 116
50418- 10 10 10 2 2 6 2 2 6 6 6 6
50419-101 98 89 187 187 187 210 210 210 218 218 218
50420-214 214 214 134 134 134 14 14 14 6 6 6
50421- 2 2 6 2 2 6 2 2 6 2 2 6
50422- 86 86 86 50 50 50 18 18 18 6 6 6
50423- 0 0 0 0 0 0 0 0 0 0 0 0
50424- 0 0 0 0 0 0 0 0 0 0 0 0
50425- 0 0 0 0 0 0 0 0 0 0 0 0
50426- 0 0 0 0 0 0 0 0 0 0 0 0
50427- 0 0 0 0 0 0 0 0 0 0 0 0
50428- 0 0 0 0 0 0 0 0 0 0 0 0
50429- 0 0 0 0 0 0 0 0 1 0 0 0
50430- 0 0 1 0 0 1 0 0 1 0 0 0
50431- 0 0 0 0 0 0 0 0 0 0 0 0
50432- 0 0 0 0 0 0 0 0 0 0 0 0
50433- 0 0 0 0 0 0 0 0 0 0 0 0
50434- 0 0 0 0 0 0 0 0 0 0 0 0
50435- 0 0 0 0 0 0 0 0 0 14 14 14
50436- 46 46 46 86 86 86 2 2 6 54 54 54
50437-218 218 218 195 195 195 226 226 226 246 246 246
50438- 58 58 58 2 2 6 2 2 6 30 30 30
50439-210 210 210 253 253 253 174 174 174 123 123 123
50440-221 221 221 234 234 234 74 74 74 2 2 6
50441- 2 2 6 2 2 6 2 2 6 2 2 6
50442- 70 70 70 58 58 58 22 22 22 6 6 6
50443- 0 0 0 0 0 0 0 0 0 0 0 0
50444- 0 0 0 0 0 0 0 0 0 0 0 0
50445- 0 0 0 0 0 0 0 0 0 0 0 0
50446- 0 0 0 0 0 0 0 0 0 0 0 0
50447- 0 0 0 0 0 0 0 0 0 0 0 0
50448- 0 0 0 0 0 0 0 0 0 0 0 0
50449- 0 0 0 0 0 0 0 0 0 0 0 0
50450- 0 0 0 0 0 0 0 0 0 0 0 0
50451- 0 0 0 0 0 0 0 0 0 0 0 0
50452- 0 0 0 0 0 0 0 0 0 0 0 0
50453- 0 0 0 0 0 0 0 0 0 0 0 0
50454- 0 0 0 0 0 0 0 0 0 0 0 0
50455- 0 0 0 0 0 0 0 0 0 14 14 14
50456- 46 46 46 82 82 82 2 2 6 106 106 106
50457-170 170 170 26 26 26 86 86 86 226 226 226
50458-123 123 123 10 10 10 14 14 14 46 46 46
50459-231 231 231 190 190 190 6 6 6 70 70 70
50460- 90 90 90 238 238 238 158 158 158 2 2 6
50461- 2 2 6 2 2 6 2 2 6 2 2 6
50462- 70 70 70 58 58 58 22 22 22 6 6 6
50463- 0 0 0 0 0 0 0 0 0 0 0 0
50464- 0 0 0 0 0 0 0 0 0 0 0 0
50465- 0 0 0 0 0 0 0 0 0 0 0 0
50466- 0 0 0 0 0 0 0 0 0 0 0 0
50467- 0 0 0 0 0 0 0 0 0 0 0 0
50468- 0 0 0 0 0 0 0 0 0 0 0 0
50469- 0 0 0 0 0 0 0 0 1 0 0 0
50470- 0 0 1 0 0 1 0 0 1 0 0 0
50471- 0 0 0 0 0 0 0 0 0 0 0 0
50472- 0 0 0 0 0 0 0 0 0 0 0 0
50473- 0 0 0 0 0 0 0 0 0 0 0 0
50474- 0 0 0 0 0 0 0 0 0 0 0 0
50475- 0 0 0 0 0 0 0 0 0 14 14 14
50476- 42 42 42 86 86 86 6 6 6 116 116 116
50477-106 106 106 6 6 6 70 70 70 149 149 149
50478-128 128 128 18 18 18 38 38 38 54 54 54
50479-221 221 221 106 106 106 2 2 6 14 14 14
50480- 46 46 46 190 190 190 198 198 198 2 2 6
50481- 2 2 6 2 2 6 2 2 6 2 2 6
50482- 74 74 74 62 62 62 22 22 22 6 6 6
50483- 0 0 0 0 0 0 0 0 0 0 0 0
50484- 0 0 0 0 0 0 0 0 0 0 0 0
50485- 0 0 0 0 0 0 0 0 0 0 0 0
50486- 0 0 0 0 0 0 0 0 0 0 0 0
50487- 0 0 0 0 0 0 0 0 0 0 0 0
50488- 0 0 0 0 0 0 0 0 0 0 0 0
50489- 0 0 0 0 0 0 0 0 1 0 0 0
50490- 0 0 1 0 0 0 0 0 1 0 0 0
50491- 0 0 0 0 0 0 0 0 0 0 0 0
50492- 0 0 0 0 0 0 0 0 0 0 0 0
50493- 0 0 0 0 0 0 0 0 0 0 0 0
50494- 0 0 0 0 0 0 0 0 0 0 0 0
50495- 0 0 0 0 0 0 0 0 0 14 14 14
50496- 42 42 42 94 94 94 14 14 14 101 101 101
50497-128 128 128 2 2 6 18 18 18 116 116 116
50498-118 98 46 121 92 8 121 92 8 98 78 10
50499-162 162 162 106 106 106 2 2 6 2 2 6
50500- 2 2 6 195 195 195 195 195 195 6 6 6
50501- 2 2 6 2 2 6 2 2 6 2 2 6
50502- 74 74 74 62 62 62 22 22 22 6 6 6
50503- 0 0 0 0 0 0 0 0 0 0 0 0
50504- 0 0 0 0 0 0 0 0 0 0 0 0
50505- 0 0 0 0 0 0 0 0 0 0 0 0
50506- 0 0 0 0 0 0 0 0 0 0 0 0
50507- 0 0 0 0 0 0 0 0 0 0 0 0
50508- 0 0 0 0 0 0 0 0 0 0 0 0
50509- 0 0 0 0 0 0 0 0 1 0 0 1
50510- 0 0 1 0 0 0 0 0 1 0 0 0
50511- 0 0 0 0 0 0 0 0 0 0 0 0
50512- 0 0 0 0 0 0 0 0 0 0 0 0
50513- 0 0 0 0 0 0 0 0 0 0 0 0
50514- 0 0 0 0 0 0 0 0 0 0 0 0
50515- 0 0 0 0 0 0 0 0 0 10 10 10
50516- 38 38 38 90 90 90 14 14 14 58 58 58
50517-210 210 210 26 26 26 54 38 6 154 114 10
50518-226 170 11 236 186 11 225 175 15 184 144 12
50519-215 174 15 175 146 61 37 26 9 2 2 6
50520- 70 70 70 246 246 246 138 138 138 2 2 6
50521- 2 2 6 2 2 6 2 2 6 2 2 6
50522- 70 70 70 66 66 66 26 26 26 6 6 6
50523- 0 0 0 0 0 0 0 0 0 0 0 0
50524- 0 0 0 0 0 0 0 0 0 0 0 0
50525- 0 0 0 0 0 0 0 0 0 0 0 0
50526- 0 0 0 0 0 0 0 0 0 0 0 0
50527- 0 0 0 0 0 0 0 0 0 0 0 0
50528- 0 0 0 0 0 0 0 0 0 0 0 0
50529- 0 0 0 0 0 0 0 0 0 0 0 0
50530- 0 0 0 0 0 0 0 0 0 0 0 0
50531- 0 0 0 0 0 0 0 0 0 0 0 0
50532- 0 0 0 0 0 0 0 0 0 0 0 0
50533- 0 0 0 0 0 0 0 0 0 0 0 0
50534- 0 0 0 0 0 0 0 0 0 0 0 0
50535- 0 0 0 0 0 0 0 0 0 10 10 10
50536- 38 38 38 86 86 86 14 14 14 10 10 10
50537-195 195 195 188 164 115 192 133 9 225 175 15
50538-239 182 13 234 190 10 232 195 16 232 200 30
50539-245 207 45 241 208 19 232 195 16 184 144 12
50540-218 194 134 211 206 186 42 42 42 2 2 6
50541- 2 2 6 2 2 6 2 2 6 2 2 6
50542- 50 50 50 74 74 74 30 30 30 6 6 6
50543- 0 0 0 0 0 0 0 0 0 0 0 0
50544- 0 0 0 0 0 0 0 0 0 0 0 0
50545- 0 0 0 0 0 0 0 0 0 0 0 0
50546- 0 0 0 0 0 0 0 0 0 0 0 0
50547- 0 0 0 0 0 0 0 0 0 0 0 0
50548- 0 0 0 0 0 0 0 0 0 0 0 0
50549- 0 0 0 0 0 0 0 0 0 0 0 0
50550- 0 0 0 0 0 0 0 0 0 0 0 0
50551- 0 0 0 0 0 0 0 0 0 0 0 0
50552- 0 0 0 0 0 0 0 0 0 0 0 0
50553- 0 0 0 0 0 0 0 0 0 0 0 0
50554- 0 0 0 0 0 0 0 0 0 0 0 0
50555- 0 0 0 0 0 0 0 0 0 10 10 10
50556- 34 34 34 86 86 86 14 14 14 2 2 6
50557-121 87 25 192 133 9 219 162 10 239 182 13
50558-236 186 11 232 195 16 241 208 19 244 214 54
50559-246 218 60 246 218 38 246 215 20 241 208 19
50560-241 208 19 226 184 13 121 87 25 2 2 6
50561- 2 2 6 2 2 6 2 2 6 2 2 6
50562- 50 50 50 82 82 82 34 34 34 10 10 10
50563- 0 0 0 0 0 0 0 0 0 0 0 0
50564- 0 0 0 0 0 0 0 0 0 0 0 0
50565- 0 0 0 0 0 0 0 0 0 0 0 0
50566- 0 0 0 0 0 0 0 0 0 0 0 0
50567- 0 0 0 0 0 0 0 0 0 0 0 0
50568- 0 0 0 0 0 0 0 0 0 0 0 0
50569- 0 0 0 0 0 0 0 0 0 0 0 0
50570- 0 0 0 0 0 0 0 0 0 0 0 0
50571- 0 0 0 0 0 0 0 0 0 0 0 0
50572- 0 0 0 0 0 0 0 0 0 0 0 0
50573- 0 0 0 0 0 0 0 0 0 0 0 0
50574- 0 0 0 0 0 0 0 0 0 0 0 0
50575- 0 0 0 0 0 0 0 0 0 10 10 10
50576- 34 34 34 82 82 82 30 30 30 61 42 6
50577-180 123 7 206 145 10 230 174 11 239 182 13
50578-234 190 10 238 202 15 241 208 19 246 218 74
50579-246 218 38 246 215 20 246 215 20 246 215 20
50580-226 184 13 215 174 15 184 144 12 6 6 6
50581- 2 2 6 2 2 6 2 2 6 2 2 6
50582- 26 26 26 94 94 94 42 42 42 14 14 14
50583- 0 0 0 0 0 0 0 0 0 0 0 0
50584- 0 0 0 0 0 0 0 0 0 0 0 0
50585- 0 0 0 0 0 0 0 0 0 0 0 0
50586- 0 0 0 0 0 0 0 0 0 0 0 0
50587- 0 0 0 0 0 0 0 0 0 0 0 0
50588- 0 0 0 0 0 0 0 0 0 0 0 0
50589- 0 0 0 0 0 0 0 0 0 0 0 0
50590- 0 0 0 0 0 0 0 0 0 0 0 0
50591- 0 0 0 0 0 0 0 0 0 0 0 0
50592- 0 0 0 0 0 0 0 0 0 0 0 0
50593- 0 0 0 0 0 0 0 0 0 0 0 0
50594- 0 0 0 0 0 0 0 0 0 0 0 0
50595- 0 0 0 0 0 0 0 0 0 10 10 10
50596- 30 30 30 78 78 78 50 50 50 104 69 6
50597-192 133 9 216 158 10 236 178 12 236 186 11
50598-232 195 16 241 208 19 244 214 54 245 215 43
50599-246 215 20 246 215 20 241 208 19 198 155 10
50600-200 144 11 216 158 10 156 118 10 2 2 6
50601- 2 2 6 2 2 6 2 2 6 2 2 6
50602- 6 6 6 90 90 90 54 54 54 18 18 18
50603- 6 6 6 0 0 0 0 0 0 0 0 0
50604- 0 0 0 0 0 0 0 0 0 0 0 0
50605- 0 0 0 0 0 0 0 0 0 0 0 0
50606- 0 0 0 0 0 0 0 0 0 0 0 0
50607- 0 0 0 0 0 0 0 0 0 0 0 0
50608- 0 0 0 0 0 0 0 0 0 0 0 0
50609- 0 0 0 0 0 0 0 0 0 0 0 0
50610- 0 0 0 0 0 0 0 0 0 0 0 0
50611- 0 0 0 0 0 0 0 0 0 0 0 0
50612- 0 0 0 0 0 0 0 0 0 0 0 0
50613- 0 0 0 0 0 0 0 0 0 0 0 0
50614- 0 0 0 0 0 0 0 0 0 0 0 0
50615- 0 0 0 0 0 0 0 0 0 10 10 10
50616- 30 30 30 78 78 78 46 46 46 22 22 22
50617-137 92 6 210 162 10 239 182 13 238 190 10
50618-238 202 15 241 208 19 246 215 20 246 215 20
50619-241 208 19 203 166 17 185 133 11 210 150 10
50620-216 158 10 210 150 10 102 78 10 2 2 6
50621- 6 6 6 54 54 54 14 14 14 2 2 6
50622- 2 2 6 62 62 62 74 74 74 30 30 30
50623- 10 10 10 0 0 0 0 0 0 0 0 0
50624- 0 0 0 0 0 0 0 0 0 0 0 0
50625- 0 0 0 0 0 0 0 0 0 0 0 0
50626- 0 0 0 0 0 0 0 0 0 0 0 0
50627- 0 0 0 0 0 0 0 0 0 0 0 0
50628- 0 0 0 0 0 0 0 0 0 0 0 0
50629- 0 0 0 0 0 0 0 0 0 0 0 0
50630- 0 0 0 0 0 0 0 0 0 0 0 0
50631- 0 0 0 0 0 0 0 0 0 0 0 0
50632- 0 0 0 0 0 0 0 0 0 0 0 0
50633- 0 0 0 0 0 0 0 0 0 0 0 0
50634- 0 0 0 0 0 0 0 0 0 0 0 0
50635- 0 0 0 0 0 0 0 0 0 10 10 10
50636- 34 34 34 78 78 78 50 50 50 6 6 6
50637- 94 70 30 139 102 15 190 146 13 226 184 13
50638-232 200 30 232 195 16 215 174 15 190 146 13
50639-168 122 10 192 133 9 210 150 10 213 154 11
50640-202 150 34 182 157 106 101 98 89 2 2 6
50641- 2 2 6 78 78 78 116 116 116 58 58 58
50642- 2 2 6 22 22 22 90 90 90 46 46 46
50643- 18 18 18 6 6 6 0 0 0 0 0 0
50644- 0 0 0 0 0 0 0 0 0 0 0 0
50645- 0 0 0 0 0 0 0 0 0 0 0 0
50646- 0 0 0 0 0 0 0 0 0 0 0 0
50647- 0 0 0 0 0 0 0 0 0 0 0 0
50648- 0 0 0 0 0 0 0 0 0 0 0 0
50649- 0 0 0 0 0 0 0 0 0 0 0 0
50650- 0 0 0 0 0 0 0 0 0 0 0 0
50651- 0 0 0 0 0 0 0 0 0 0 0 0
50652- 0 0 0 0 0 0 0 0 0 0 0 0
50653- 0 0 0 0 0 0 0 0 0 0 0 0
50654- 0 0 0 0 0 0 0 0 0 0 0 0
50655- 0 0 0 0 0 0 0 0 0 10 10 10
50656- 38 38 38 86 86 86 50 50 50 6 6 6
50657-128 128 128 174 154 114 156 107 11 168 122 10
50658-198 155 10 184 144 12 197 138 11 200 144 11
50659-206 145 10 206 145 10 197 138 11 188 164 115
50660-195 195 195 198 198 198 174 174 174 14 14 14
50661- 2 2 6 22 22 22 116 116 116 116 116 116
50662- 22 22 22 2 2 6 74 74 74 70 70 70
50663- 30 30 30 10 10 10 0 0 0 0 0 0
50664- 0 0 0 0 0 0 0 0 0 0 0 0
50665- 0 0 0 0 0 0 0 0 0 0 0 0
50666- 0 0 0 0 0 0 0 0 0 0 0 0
50667- 0 0 0 0 0 0 0 0 0 0 0 0
50668- 0 0 0 0 0 0 0 0 0 0 0 0
50669- 0 0 0 0 0 0 0 0 0 0 0 0
50670- 0 0 0 0 0 0 0 0 0 0 0 0
50671- 0 0 0 0 0 0 0 0 0 0 0 0
50672- 0 0 0 0 0 0 0 0 0 0 0 0
50673- 0 0 0 0 0 0 0 0 0 0 0 0
50674- 0 0 0 0 0 0 0 0 0 0 0 0
50675- 0 0 0 0 0 0 6 6 6 18 18 18
50676- 50 50 50 101 101 101 26 26 26 10 10 10
50677-138 138 138 190 190 190 174 154 114 156 107 11
50678-197 138 11 200 144 11 197 138 11 192 133 9
50679-180 123 7 190 142 34 190 178 144 187 187 187
50680-202 202 202 221 221 221 214 214 214 66 66 66
50681- 2 2 6 2 2 6 50 50 50 62 62 62
50682- 6 6 6 2 2 6 10 10 10 90 90 90
50683- 50 50 50 18 18 18 6 6 6 0 0 0
50684- 0 0 0 0 0 0 0 0 0 0 0 0
50685- 0 0 0 0 0 0 0 0 0 0 0 0
50686- 0 0 0 0 0 0 0 0 0 0 0 0
50687- 0 0 0 0 0 0 0 0 0 0 0 0
50688- 0 0 0 0 0 0 0 0 0 0 0 0
50689- 0 0 0 0 0 0 0 0 0 0 0 0
50690- 0 0 0 0 0 0 0 0 0 0 0 0
50691- 0 0 0 0 0 0 0 0 0 0 0 0
50692- 0 0 0 0 0 0 0 0 0 0 0 0
50693- 0 0 0 0 0 0 0 0 0 0 0 0
50694- 0 0 0 0 0 0 0 0 0 0 0 0
50695- 0 0 0 0 0 0 10 10 10 34 34 34
50696- 74 74 74 74 74 74 2 2 6 6 6 6
50697-144 144 144 198 198 198 190 190 190 178 166 146
50698-154 121 60 156 107 11 156 107 11 168 124 44
50699-174 154 114 187 187 187 190 190 190 210 210 210
50700-246 246 246 253 253 253 253 253 253 182 182 182
50701- 6 6 6 2 2 6 2 2 6 2 2 6
50702- 2 2 6 2 2 6 2 2 6 62 62 62
50703- 74 74 74 34 34 34 14 14 14 0 0 0
50704- 0 0 0 0 0 0 0 0 0 0 0 0
50705- 0 0 0 0 0 0 0 0 0 0 0 0
50706- 0 0 0 0 0 0 0 0 0 0 0 0
50707- 0 0 0 0 0 0 0 0 0 0 0 0
50708- 0 0 0 0 0 0 0 0 0 0 0 0
50709- 0 0 0 0 0 0 0 0 0 0 0 0
50710- 0 0 0 0 0 0 0 0 0 0 0 0
50711- 0 0 0 0 0 0 0 0 0 0 0 0
50712- 0 0 0 0 0 0 0 0 0 0 0 0
50713- 0 0 0 0 0 0 0 0 0 0 0 0
50714- 0 0 0 0 0 0 0 0 0 0 0 0
50715- 0 0 0 10 10 10 22 22 22 54 54 54
50716- 94 94 94 18 18 18 2 2 6 46 46 46
50717-234 234 234 221 221 221 190 190 190 190 190 190
50718-190 190 190 187 187 187 187 187 187 190 190 190
50719-190 190 190 195 195 195 214 214 214 242 242 242
50720-253 253 253 253 253 253 253 253 253 253 253 253
50721- 82 82 82 2 2 6 2 2 6 2 2 6
50722- 2 2 6 2 2 6 2 2 6 14 14 14
50723- 86 86 86 54 54 54 22 22 22 6 6 6
50724- 0 0 0 0 0 0 0 0 0 0 0 0
50725- 0 0 0 0 0 0 0 0 0 0 0 0
50726- 0 0 0 0 0 0 0 0 0 0 0 0
50727- 0 0 0 0 0 0 0 0 0 0 0 0
50728- 0 0 0 0 0 0 0 0 0 0 0 0
50729- 0 0 0 0 0 0 0 0 0 0 0 0
50730- 0 0 0 0 0 0 0 0 0 0 0 0
50731- 0 0 0 0 0 0 0 0 0 0 0 0
50732- 0 0 0 0 0 0 0 0 0 0 0 0
50733- 0 0 0 0 0 0 0 0 0 0 0 0
50734- 0 0 0 0 0 0 0 0 0 0 0 0
50735- 6 6 6 18 18 18 46 46 46 90 90 90
50736- 46 46 46 18 18 18 6 6 6 182 182 182
50737-253 253 253 246 246 246 206 206 206 190 190 190
50738-190 190 190 190 190 190 190 190 190 190 190 190
50739-206 206 206 231 231 231 250 250 250 253 253 253
50740-253 253 253 253 253 253 253 253 253 253 253 253
50741-202 202 202 14 14 14 2 2 6 2 2 6
50742- 2 2 6 2 2 6 2 2 6 2 2 6
50743- 42 42 42 86 86 86 42 42 42 18 18 18
50744- 6 6 6 0 0 0 0 0 0 0 0 0
50745- 0 0 0 0 0 0 0 0 0 0 0 0
50746- 0 0 0 0 0 0 0 0 0 0 0 0
50747- 0 0 0 0 0 0 0 0 0 0 0 0
50748- 0 0 0 0 0 0 0 0 0 0 0 0
50749- 0 0 0 0 0 0 0 0 0 0 0 0
50750- 0 0 0 0 0 0 0 0 0 0 0 0
50751- 0 0 0 0 0 0 0 0 0 0 0 0
50752- 0 0 0 0 0 0 0 0 0 0 0 0
50753- 0 0 0 0 0 0 0 0 0 0 0 0
50754- 0 0 0 0 0 0 0 0 0 6 6 6
50755- 14 14 14 38 38 38 74 74 74 66 66 66
50756- 2 2 6 6 6 6 90 90 90 250 250 250
50757-253 253 253 253 253 253 238 238 238 198 198 198
50758-190 190 190 190 190 190 195 195 195 221 221 221
50759-246 246 246 253 253 253 253 253 253 253 253 253
50760-253 253 253 253 253 253 253 253 253 253 253 253
50761-253 253 253 82 82 82 2 2 6 2 2 6
50762- 2 2 6 2 2 6 2 2 6 2 2 6
50763- 2 2 6 78 78 78 70 70 70 34 34 34
50764- 14 14 14 6 6 6 0 0 0 0 0 0
50765- 0 0 0 0 0 0 0 0 0 0 0 0
50766- 0 0 0 0 0 0 0 0 0 0 0 0
50767- 0 0 0 0 0 0 0 0 0 0 0 0
50768- 0 0 0 0 0 0 0 0 0 0 0 0
50769- 0 0 0 0 0 0 0 0 0 0 0 0
50770- 0 0 0 0 0 0 0 0 0 0 0 0
50771- 0 0 0 0 0 0 0 0 0 0 0 0
50772- 0 0 0 0 0 0 0 0 0 0 0 0
50773- 0 0 0 0 0 0 0 0 0 0 0 0
50774- 0 0 0 0 0 0 0 0 0 14 14 14
50775- 34 34 34 66 66 66 78 78 78 6 6 6
50776- 2 2 6 18 18 18 218 218 218 253 253 253
50777-253 253 253 253 253 253 253 253 253 246 246 246
50778-226 226 226 231 231 231 246 246 246 253 253 253
50779-253 253 253 253 253 253 253 253 253 253 253 253
50780-253 253 253 253 253 253 253 253 253 253 253 253
50781-253 253 253 178 178 178 2 2 6 2 2 6
50782- 2 2 6 2 2 6 2 2 6 2 2 6
50783- 2 2 6 18 18 18 90 90 90 62 62 62
50784- 30 30 30 10 10 10 0 0 0 0 0 0
50785- 0 0 0 0 0 0 0 0 0 0 0 0
50786- 0 0 0 0 0 0 0 0 0 0 0 0
50787- 0 0 0 0 0 0 0 0 0 0 0 0
50788- 0 0 0 0 0 0 0 0 0 0 0 0
50789- 0 0 0 0 0 0 0 0 0 0 0 0
50790- 0 0 0 0 0 0 0 0 0 0 0 0
50791- 0 0 0 0 0 0 0 0 0 0 0 0
50792- 0 0 0 0 0 0 0 0 0 0 0 0
50793- 0 0 0 0 0 0 0 0 0 0 0 0
50794- 0 0 0 0 0 0 10 10 10 26 26 26
50795- 58 58 58 90 90 90 18 18 18 2 2 6
50796- 2 2 6 110 110 110 253 253 253 253 253 253
50797-253 253 253 253 253 253 253 253 253 253 253 253
50798-250 250 250 253 253 253 253 253 253 253 253 253
50799-253 253 253 253 253 253 253 253 253 253 253 253
50800-253 253 253 253 253 253 253 253 253 253 253 253
50801-253 253 253 231 231 231 18 18 18 2 2 6
50802- 2 2 6 2 2 6 2 2 6 2 2 6
50803- 2 2 6 2 2 6 18 18 18 94 94 94
50804- 54 54 54 26 26 26 10 10 10 0 0 0
50805- 0 0 0 0 0 0 0 0 0 0 0 0
50806- 0 0 0 0 0 0 0 0 0 0 0 0
50807- 0 0 0 0 0 0 0 0 0 0 0 0
50808- 0 0 0 0 0 0 0 0 0 0 0 0
50809- 0 0 0 0 0 0 0 0 0 0 0 0
50810- 0 0 0 0 0 0 0 0 0 0 0 0
50811- 0 0 0 0 0 0 0 0 0 0 0 0
50812- 0 0 0 0 0 0 0 0 0 0 0 0
50813- 0 0 0 0 0 0 0 0 0 0 0 0
50814- 0 0 0 6 6 6 22 22 22 50 50 50
50815- 90 90 90 26 26 26 2 2 6 2 2 6
50816- 14 14 14 195 195 195 250 250 250 253 253 253
50817-253 253 253 253 253 253 253 253 253 253 253 253
50818-253 253 253 253 253 253 253 253 253 253 253 253
50819-253 253 253 253 253 253 253 253 253 253 253 253
50820-253 253 253 253 253 253 253 253 253 253 253 253
50821-250 250 250 242 242 242 54 54 54 2 2 6
50822- 2 2 6 2 2 6 2 2 6 2 2 6
50823- 2 2 6 2 2 6 2 2 6 38 38 38
50824- 86 86 86 50 50 50 22 22 22 6 6 6
50825- 0 0 0 0 0 0 0 0 0 0 0 0
50826- 0 0 0 0 0 0 0 0 0 0 0 0
50827- 0 0 0 0 0 0 0 0 0 0 0 0
50828- 0 0 0 0 0 0 0 0 0 0 0 0
50829- 0 0 0 0 0 0 0 0 0 0 0 0
50830- 0 0 0 0 0 0 0 0 0 0 0 0
50831- 0 0 0 0 0 0 0 0 0 0 0 0
50832- 0 0 0 0 0 0 0 0 0 0 0 0
50833- 0 0 0 0 0 0 0 0 0 0 0 0
50834- 6 6 6 14 14 14 38 38 38 82 82 82
50835- 34 34 34 2 2 6 2 2 6 2 2 6
50836- 42 42 42 195 195 195 246 246 246 253 253 253
50837-253 253 253 253 253 253 253 253 253 250 250 250
50838-242 242 242 242 242 242 250 250 250 253 253 253
50839-253 253 253 253 253 253 253 253 253 253 253 253
50840-253 253 253 250 250 250 246 246 246 238 238 238
50841-226 226 226 231 231 231 101 101 101 6 6 6
50842- 2 2 6 2 2 6 2 2 6 2 2 6
50843- 2 2 6 2 2 6 2 2 6 2 2 6
50844- 38 38 38 82 82 82 42 42 42 14 14 14
50845- 6 6 6 0 0 0 0 0 0 0 0 0
50846- 0 0 0 0 0 0 0 0 0 0 0 0
50847- 0 0 0 0 0 0 0 0 0 0 0 0
50848- 0 0 0 0 0 0 0 0 0 0 0 0
50849- 0 0 0 0 0 0 0 0 0 0 0 0
50850- 0 0 0 0 0 0 0 0 0 0 0 0
50851- 0 0 0 0 0 0 0 0 0 0 0 0
50852- 0 0 0 0 0 0 0 0 0 0 0 0
50853- 0 0 0 0 0 0 0 0 0 0 0 0
50854- 10 10 10 26 26 26 62 62 62 66 66 66
50855- 2 2 6 2 2 6 2 2 6 6 6 6
50856- 70 70 70 170 170 170 206 206 206 234 234 234
50857-246 246 246 250 250 250 250 250 250 238 238 238
50858-226 226 226 231 231 231 238 238 238 250 250 250
50859-250 250 250 250 250 250 246 246 246 231 231 231
50860-214 214 214 206 206 206 202 202 202 202 202 202
50861-198 198 198 202 202 202 182 182 182 18 18 18
50862- 2 2 6 2 2 6 2 2 6 2 2 6
50863- 2 2 6 2 2 6 2 2 6 2 2 6
50864- 2 2 6 62 62 62 66 66 66 30 30 30
50865- 10 10 10 0 0 0 0 0 0 0 0 0
50866- 0 0 0 0 0 0 0 0 0 0 0 0
50867- 0 0 0 0 0 0 0 0 0 0 0 0
50868- 0 0 0 0 0 0 0 0 0 0 0 0
50869- 0 0 0 0 0 0 0 0 0 0 0 0
50870- 0 0 0 0 0 0 0 0 0 0 0 0
50871- 0 0 0 0 0 0 0 0 0 0 0 0
50872- 0 0 0 0 0 0 0 0 0 0 0 0
50873- 0 0 0 0 0 0 0 0 0 0 0 0
50874- 14 14 14 42 42 42 82 82 82 18 18 18
50875- 2 2 6 2 2 6 2 2 6 10 10 10
50876- 94 94 94 182 182 182 218 218 218 242 242 242
50877-250 250 250 253 253 253 253 253 253 250 250 250
50878-234 234 234 253 253 253 253 253 253 253 253 253
50879-253 253 253 253 253 253 253 253 253 246 246 246
50880-238 238 238 226 226 226 210 210 210 202 202 202
50881-195 195 195 195 195 195 210 210 210 158 158 158
50882- 6 6 6 14 14 14 50 50 50 14 14 14
50883- 2 2 6 2 2 6 2 2 6 2 2 6
50884- 2 2 6 6 6 6 86 86 86 46 46 46
50885- 18 18 18 6 6 6 0 0 0 0 0 0
50886- 0 0 0 0 0 0 0 0 0 0 0 0
50887- 0 0 0 0 0 0 0 0 0 0 0 0
50888- 0 0 0 0 0 0 0 0 0 0 0 0
50889- 0 0 0 0 0 0 0 0 0 0 0 0
50890- 0 0 0 0 0 0 0 0 0 0 0 0
50891- 0 0 0 0 0 0 0 0 0 0 0 0
50892- 0 0 0 0 0 0 0 0 0 0 0 0
50893- 0 0 0 0 0 0 0 0 0 6 6 6
50894- 22 22 22 54 54 54 70 70 70 2 2 6
50895- 2 2 6 10 10 10 2 2 6 22 22 22
50896-166 166 166 231 231 231 250 250 250 253 253 253
50897-253 253 253 253 253 253 253 253 253 250 250 250
50898-242 242 242 253 253 253 253 253 253 253 253 253
50899-253 253 253 253 253 253 253 253 253 253 253 253
50900-253 253 253 253 253 253 253 253 253 246 246 246
50901-231 231 231 206 206 206 198 198 198 226 226 226
50902- 94 94 94 2 2 6 6 6 6 38 38 38
50903- 30 30 30 2 2 6 2 2 6 2 2 6
50904- 2 2 6 2 2 6 62 62 62 66 66 66
50905- 26 26 26 10 10 10 0 0 0 0 0 0
50906- 0 0 0 0 0 0 0 0 0 0 0 0
50907- 0 0 0 0 0 0 0 0 0 0 0 0
50908- 0 0 0 0 0 0 0 0 0 0 0 0
50909- 0 0 0 0 0 0 0 0 0 0 0 0
50910- 0 0 0 0 0 0 0 0 0 0 0 0
50911- 0 0 0 0 0 0 0 0 0 0 0 0
50912- 0 0 0 0 0 0 0 0 0 0 0 0
50913- 0 0 0 0 0 0 0 0 0 10 10 10
50914- 30 30 30 74 74 74 50 50 50 2 2 6
50915- 26 26 26 26 26 26 2 2 6 106 106 106
50916-238 238 238 253 253 253 253 253 253 253 253 253
50917-253 253 253 253 253 253 253 253 253 253 253 253
50918-253 253 253 253 253 253 253 253 253 253 253 253
50919-253 253 253 253 253 253 253 253 253 253 253 253
50920-253 253 253 253 253 253 253 253 253 253 253 253
50921-253 253 253 246 246 246 218 218 218 202 202 202
50922-210 210 210 14 14 14 2 2 6 2 2 6
50923- 30 30 30 22 22 22 2 2 6 2 2 6
50924- 2 2 6 2 2 6 18 18 18 86 86 86
50925- 42 42 42 14 14 14 0 0 0 0 0 0
50926- 0 0 0 0 0 0 0 0 0 0 0 0
50927- 0 0 0 0 0 0 0 0 0 0 0 0
50928- 0 0 0 0 0 0 0 0 0 0 0 0
50929- 0 0 0 0 0 0 0 0 0 0 0 0
50930- 0 0 0 0 0 0 0 0 0 0 0 0
50931- 0 0 0 0 0 0 0 0 0 0 0 0
50932- 0 0 0 0 0 0 0 0 0 0 0 0
50933- 0 0 0 0 0 0 0 0 0 14 14 14
50934- 42 42 42 90 90 90 22 22 22 2 2 6
50935- 42 42 42 2 2 6 18 18 18 218 218 218
50936-253 253 253 253 253 253 253 253 253 253 253 253
50937-253 253 253 253 253 253 253 253 253 253 253 253
50938-253 253 253 253 253 253 253 253 253 253 253 253
50939-253 253 253 253 253 253 253 253 253 253 253 253
50940-253 253 253 253 253 253 253 253 253 253 253 253
50941-253 253 253 253 253 253 250 250 250 221 221 221
50942-218 218 218 101 101 101 2 2 6 14 14 14
50943- 18 18 18 38 38 38 10 10 10 2 2 6
50944- 2 2 6 2 2 6 2 2 6 78 78 78
50945- 58 58 58 22 22 22 6 6 6 0 0 0
50946- 0 0 0 0 0 0 0 0 0 0 0 0
50947- 0 0 0 0 0 0 0 0 0 0 0 0
50948- 0 0 0 0 0 0 0 0 0 0 0 0
50949- 0 0 0 0 0 0 0 0 0 0 0 0
50950- 0 0 0 0 0 0 0 0 0 0 0 0
50951- 0 0 0 0 0 0 0 0 0 0 0 0
50952- 0 0 0 0 0 0 0 0 0 0 0 0
50953- 0 0 0 0 0 0 6 6 6 18 18 18
50954- 54 54 54 82 82 82 2 2 6 26 26 26
50955- 22 22 22 2 2 6 123 123 123 253 253 253
50956-253 253 253 253 253 253 253 253 253 253 253 253
50957-253 253 253 253 253 253 253 253 253 253 253 253
50958-253 253 253 253 253 253 253 253 253 253 253 253
50959-253 253 253 253 253 253 253 253 253 253 253 253
50960-253 253 253 253 253 253 253 253 253 253 253 253
50961-253 253 253 253 253 253 253 253 253 250 250 250
50962-238 238 238 198 198 198 6 6 6 38 38 38
50963- 58 58 58 26 26 26 38 38 38 2 2 6
50964- 2 2 6 2 2 6 2 2 6 46 46 46
50965- 78 78 78 30 30 30 10 10 10 0 0 0
50966- 0 0 0 0 0 0 0 0 0 0 0 0
50967- 0 0 0 0 0 0 0 0 0 0 0 0
50968- 0 0 0 0 0 0 0 0 0 0 0 0
50969- 0 0 0 0 0 0 0 0 0 0 0 0
50970- 0 0 0 0 0 0 0 0 0 0 0 0
50971- 0 0 0 0 0 0 0 0 0 0 0 0
50972- 0 0 0 0 0 0 0 0 0 0 0 0
50973- 0 0 0 0 0 0 10 10 10 30 30 30
50974- 74 74 74 58 58 58 2 2 6 42 42 42
50975- 2 2 6 22 22 22 231 231 231 253 253 253
50976-253 253 253 253 253 253 253 253 253 253 253 253
50977-253 253 253 253 253 253 253 253 253 250 250 250
50978-253 253 253 253 253 253 253 253 253 253 253 253
50979-253 253 253 253 253 253 253 253 253 253 253 253
50980-253 253 253 253 253 253 253 253 253 253 253 253
50981-253 253 253 253 253 253 253 253 253 253 253 253
50982-253 253 253 246 246 246 46 46 46 38 38 38
50983- 42 42 42 14 14 14 38 38 38 14 14 14
50984- 2 2 6 2 2 6 2 2 6 6 6 6
50985- 86 86 86 46 46 46 14 14 14 0 0 0
50986- 0 0 0 0 0 0 0 0 0 0 0 0
50987- 0 0 0 0 0 0 0 0 0 0 0 0
50988- 0 0 0 0 0 0 0 0 0 0 0 0
50989- 0 0 0 0 0 0 0 0 0 0 0 0
50990- 0 0 0 0 0 0 0 0 0 0 0 0
50991- 0 0 0 0 0 0 0 0 0 0 0 0
50992- 0 0 0 0 0 0 0 0 0 0 0 0
50993- 0 0 0 6 6 6 14 14 14 42 42 42
50994- 90 90 90 18 18 18 18 18 18 26 26 26
50995- 2 2 6 116 116 116 253 253 253 253 253 253
50996-253 253 253 253 253 253 253 253 253 253 253 253
50997-253 253 253 253 253 253 250 250 250 238 238 238
50998-253 253 253 253 253 253 253 253 253 253 253 253
50999-253 253 253 253 253 253 253 253 253 253 253 253
51000-253 253 253 253 253 253 253 253 253 253 253 253
51001-253 253 253 253 253 253 253 253 253 253 253 253
51002-253 253 253 253 253 253 94 94 94 6 6 6
51003- 2 2 6 2 2 6 10 10 10 34 34 34
51004- 2 2 6 2 2 6 2 2 6 2 2 6
51005- 74 74 74 58 58 58 22 22 22 6 6 6
51006- 0 0 0 0 0 0 0 0 0 0 0 0
51007- 0 0 0 0 0 0 0 0 0 0 0 0
51008- 0 0 0 0 0 0 0 0 0 0 0 0
51009- 0 0 0 0 0 0 0 0 0 0 0 0
51010- 0 0 0 0 0 0 0 0 0 0 0 0
51011- 0 0 0 0 0 0 0 0 0 0 0 0
51012- 0 0 0 0 0 0 0 0 0 0 0 0
51013- 0 0 0 10 10 10 26 26 26 66 66 66
51014- 82 82 82 2 2 6 38 38 38 6 6 6
51015- 14 14 14 210 210 210 253 253 253 253 253 253
51016-253 253 253 253 253 253 253 253 253 253 253 253
51017-253 253 253 253 253 253 246 246 246 242 242 242
51018-253 253 253 253 253 253 253 253 253 253 253 253
51019-253 253 253 253 253 253 253 253 253 253 253 253
51020-253 253 253 253 253 253 253 253 253 253 253 253
51021-253 253 253 253 253 253 253 253 253 253 253 253
51022-253 253 253 253 253 253 144 144 144 2 2 6
51023- 2 2 6 2 2 6 2 2 6 46 46 46
51024- 2 2 6 2 2 6 2 2 6 2 2 6
51025- 42 42 42 74 74 74 30 30 30 10 10 10
51026- 0 0 0 0 0 0 0 0 0 0 0 0
51027- 0 0 0 0 0 0 0 0 0 0 0 0
51028- 0 0 0 0 0 0 0 0 0 0 0 0
51029- 0 0 0 0 0 0 0 0 0 0 0 0
51030- 0 0 0 0 0 0 0 0 0 0 0 0
51031- 0 0 0 0 0 0 0 0 0 0 0 0
51032- 0 0 0 0 0 0 0 0 0 0 0 0
51033- 6 6 6 14 14 14 42 42 42 90 90 90
51034- 26 26 26 6 6 6 42 42 42 2 2 6
51035- 74 74 74 250 250 250 253 253 253 253 253 253
51036-253 253 253 253 253 253 253 253 253 253 253 253
51037-253 253 253 253 253 253 242 242 242 242 242 242
51038-253 253 253 253 253 253 253 253 253 253 253 253
51039-253 253 253 253 253 253 253 253 253 253 253 253
51040-253 253 253 253 253 253 253 253 253 253 253 253
51041-253 253 253 253 253 253 253 253 253 253 253 253
51042-253 253 253 253 253 253 182 182 182 2 2 6
51043- 2 2 6 2 2 6 2 2 6 46 46 46
51044- 2 2 6 2 2 6 2 2 6 2 2 6
51045- 10 10 10 86 86 86 38 38 38 10 10 10
51046- 0 0 0 0 0 0 0 0 0 0 0 0
51047- 0 0 0 0 0 0 0 0 0 0 0 0
51048- 0 0 0 0 0 0 0 0 0 0 0 0
51049- 0 0 0 0 0 0 0 0 0 0 0 0
51050- 0 0 0 0 0 0 0 0 0 0 0 0
51051- 0 0 0 0 0 0 0 0 0 0 0 0
51052- 0 0 0 0 0 0 0 0 0 0 0 0
51053- 10 10 10 26 26 26 66 66 66 82 82 82
51054- 2 2 6 22 22 22 18 18 18 2 2 6
51055-149 149 149 253 253 253 253 253 253 253 253 253
51056-253 253 253 253 253 253 253 253 253 253 253 253
51057-253 253 253 253 253 253 234 234 234 242 242 242
51058-253 253 253 253 253 253 253 253 253 253 253 253
51059-253 253 253 253 253 253 253 253 253 253 253 253
51060-253 253 253 253 253 253 253 253 253 253 253 253
51061-253 253 253 253 253 253 253 253 253 253 253 253
51062-253 253 253 253 253 253 206 206 206 2 2 6
51063- 2 2 6 2 2 6 2 2 6 38 38 38
51064- 2 2 6 2 2 6 2 2 6 2 2 6
51065- 6 6 6 86 86 86 46 46 46 14 14 14
51066- 0 0 0 0 0 0 0 0 0 0 0 0
51067- 0 0 0 0 0 0 0 0 0 0 0 0
51068- 0 0 0 0 0 0 0 0 0 0 0 0
51069- 0 0 0 0 0 0 0 0 0 0 0 0
51070- 0 0 0 0 0 0 0 0 0 0 0 0
51071- 0 0 0 0 0 0 0 0 0 0 0 0
51072- 0 0 0 0 0 0 0 0 0 6 6 6
51073- 18 18 18 46 46 46 86 86 86 18 18 18
51074- 2 2 6 34 34 34 10 10 10 6 6 6
51075-210 210 210 253 253 253 253 253 253 253 253 253
51076-253 253 253 253 253 253 253 253 253 253 253 253
51077-253 253 253 253 253 253 234 234 234 242 242 242
51078-253 253 253 253 253 253 253 253 253 253 253 253
51079-253 253 253 253 253 253 253 253 253 253 253 253
51080-253 253 253 253 253 253 253 253 253 253 253 253
51081-253 253 253 253 253 253 253 253 253 253 253 253
51082-253 253 253 253 253 253 221 221 221 6 6 6
51083- 2 2 6 2 2 6 6 6 6 30 30 30
51084- 2 2 6 2 2 6 2 2 6 2 2 6
51085- 2 2 6 82 82 82 54 54 54 18 18 18
51086- 6 6 6 0 0 0 0 0 0 0 0 0
51087- 0 0 0 0 0 0 0 0 0 0 0 0
51088- 0 0 0 0 0 0 0 0 0 0 0 0
51089- 0 0 0 0 0 0 0 0 0 0 0 0
51090- 0 0 0 0 0 0 0 0 0 0 0 0
51091- 0 0 0 0 0 0 0 0 0 0 0 0
51092- 0 0 0 0 0 0 0 0 0 10 10 10
51093- 26 26 26 66 66 66 62 62 62 2 2 6
51094- 2 2 6 38 38 38 10 10 10 26 26 26
51095-238 238 238 253 253 253 253 253 253 253 253 253
51096-253 253 253 253 253 253 253 253 253 253 253 253
51097-253 253 253 253 253 253 231 231 231 238 238 238
51098-253 253 253 253 253 253 253 253 253 253 253 253
51099-253 253 253 253 253 253 253 253 253 253 253 253
51100-253 253 253 253 253 253 253 253 253 253 253 253
51101-253 253 253 253 253 253 253 253 253 253 253 253
51102-253 253 253 253 253 253 231 231 231 6 6 6
51103- 2 2 6 2 2 6 10 10 10 30 30 30
51104- 2 2 6 2 2 6 2 2 6 2 2 6
51105- 2 2 6 66 66 66 58 58 58 22 22 22
51106- 6 6 6 0 0 0 0 0 0 0 0 0
51107- 0 0 0 0 0 0 0 0 0 0 0 0
51108- 0 0 0 0 0 0 0 0 0 0 0 0
51109- 0 0 0 0 0 0 0 0 0 0 0 0
51110- 0 0 0 0 0 0 0 0 0 0 0 0
51111- 0 0 0 0 0 0 0 0 0 0 0 0
51112- 0 0 0 0 0 0 0 0 0 10 10 10
51113- 38 38 38 78 78 78 6 6 6 2 2 6
51114- 2 2 6 46 46 46 14 14 14 42 42 42
51115-246 246 246 253 253 253 253 253 253 253 253 253
51116-253 253 253 253 253 253 253 253 253 253 253 253
51117-253 253 253 253 253 253 231 231 231 242 242 242
51118-253 253 253 253 253 253 253 253 253 253 253 253
51119-253 253 253 253 253 253 253 253 253 253 253 253
51120-253 253 253 253 253 253 253 253 253 253 253 253
51121-253 253 253 253 253 253 253 253 253 253 253 253
51122-253 253 253 253 253 253 234 234 234 10 10 10
51123- 2 2 6 2 2 6 22 22 22 14 14 14
51124- 2 2 6 2 2 6 2 2 6 2 2 6
51125- 2 2 6 66 66 66 62 62 62 22 22 22
51126- 6 6 6 0 0 0 0 0 0 0 0 0
51127- 0 0 0 0 0 0 0 0 0 0 0 0
51128- 0 0 0 0 0 0 0 0 0 0 0 0
51129- 0 0 0 0 0 0 0 0 0 0 0 0
51130- 0 0 0 0 0 0 0 0 0 0 0 0
51131- 0 0 0 0 0 0 0 0 0 0 0 0
51132- 0 0 0 0 0 0 6 6 6 18 18 18
51133- 50 50 50 74 74 74 2 2 6 2 2 6
51134- 14 14 14 70 70 70 34 34 34 62 62 62
51135-250 250 250 253 253 253 253 253 253 253 253 253
51136-253 253 253 253 253 253 253 253 253 253 253 253
51137-253 253 253 253 253 253 231 231 231 246 246 246
51138-253 253 253 253 253 253 253 253 253 253 253 253
51139-253 253 253 253 253 253 253 253 253 253 253 253
51140-253 253 253 253 253 253 253 253 253 253 253 253
51141-253 253 253 253 253 253 253 253 253 253 253 253
51142-253 253 253 253 253 253 234 234 234 14 14 14
51143- 2 2 6 2 2 6 30 30 30 2 2 6
51144- 2 2 6 2 2 6 2 2 6 2 2 6
51145- 2 2 6 66 66 66 62 62 62 22 22 22
51146- 6 6 6 0 0 0 0 0 0 0 0 0
51147- 0 0 0 0 0 0 0 0 0 0 0 0
51148- 0 0 0 0 0 0 0 0 0 0 0 0
51149- 0 0 0 0 0 0 0 0 0 0 0 0
51150- 0 0 0 0 0 0 0 0 0 0 0 0
51151- 0 0 0 0 0 0 0 0 0 0 0 0
51152- 0 0 0 0 0 0 6 6 6 18 18 18
51153- 54 54 54 62 62 62 2 2 6 2 2 6
51154- 2 2 6 30 30 30 46 46 46 70 70 70
51155-250 250 250 253 253 253 253 253 253 253 253 253
51156-253 253 253 253 253 253 253 253 253 253 253 253
51157-253 253 253 253 253 253 231 231 231 246 246 246
51158-253 253 253 253 253 253 253 253 253 253 253 253
51159-253 253 253 253 253 253 253 253 253 253 253 253
51160-253 253 253 253 253 253 253 253 253 253 253 253
51161-253 253 253 253 253 253 253 253 253 253 253 253
51162-253 253 253 253 253 253 226 226 226 10 10 10
51163- 2 2 6 6 6 6 30 30 30 2 2 6
51164- 2 2 6 2 2 6 2 2 6 2 2 6
51165- 2 2 6 66 66 66 58 58 58 22 22 22
51166- 6 6 6 0 0 0 0 0 0 0 0 0
51167- 0 0 0 0 0 0 0 0 0 0 0 0
51168- 0 0 0 0 0 0 0 0 0 0 0 0
51169- 0 0 0 0 0 0 0 0 0 0 0 0
51170- 0 0 0 0 0 0 0 0 0 0 0 0
51171- 0 0 0 0 0 0 0 0 0 0 0 0
51172- 0 0 0 0 0 0 6 6 6 22 22 22
51173- 58 58 58 62 62 62 2 2 6 2 2 6
51174- 2 2 6 2 2 6 30 30 30 78 78 78
51175-250 250 250 253 253 253 253 253 253 253 253 253
51176-253 253 253 253 253 253 253 253 253 253 253 253
51177-253 253 253 253 253 253 231 231 231 246 246 246
51178-253 253 253 253 253 253 253 253 253 253 253 253
51179-253 253 253 253 253 253 253 253 253 253 253 253
51180-253 253 253 253 253 253 253 253 253 253 253 253
51181-253 253 253 253 253 253 253 253 253 253 253 253
51182-253 253 253 253 253 253 206 206 206 2 2 6
51183- 22 22 22 34 34 34 18 14 6 22 22 22
51184- 26 26 26 18 18 18 6 6 6 2 2 6
51185- 2 2 6 82 82 82 54 54 54 18 18 18
51186- 6 6 6 0 0 0 0 0 0 0 0 0
51187- 0 0 0 0 0 0 0 0 0 0 0 0
51188- 0 0 0 0 0 0 0 0 0 0 0 0
51189- 0 0 0 0 0 0 0 0 0 0 0 0
51190- 0 0 0 0 0 0 0 0 0 0 0 0
51191- 0 0 0 0 0 0 0 0 0 0 0 0
51192- 0 0 0 0 0 0 6 6 6 26 26 26
51193- 62 62 62 106 106 106 74 54 14 185 133 11
51194-210 162 10 121 92 8 6 6 6 62 62 62
51195-238 238 238 253 253 253 253 253 253 253 253 253
51196-253 253 253 253 253 253 253 253 253 253 253 253
51197-253 253 253 253 253 253 231 231 231 246 246 246
51198-253 253 253 253 253 253 253 253 253 253 253 253
51199-253 253 253 253 253 253 253 253 253 253 253 253
51200-253 253 253 253 253 253 253 253 253 253 253 253
51201-253 253 253 253 253 253 253 253 253 253 253 253
51202-253 253 253 253 253 253 158 158 158 18 18 18
51203- 14 14 14 2 2 6 2 2 6 2 2 6
51204- 6 6 6 18 18 18 66 66 66 38 38 38
51205- 6 6 6 94 94 94 50 50 50 18 18 18
51206- 6 6 6 0 0 0 0 0 0 0 0 0
51207- 0 0 0 0 0 0 0 0 0 0 0 0
51208- 0 0 0 0 0 0 0 0 0 0 0 0
51209- 0 0 0 0 0 0 0 0 0 0 0 0
51210- 0 0 0 0 0 0 0 0 0 0 0 0
51211- 0 0 0 0 0 0 0 0 0 6 6 6
51212- 10 10 10 10 10 10 18 18 18 38 38 38
51213- 78 78 78 142 134 106 216 158 10 242 186 14
51214-246 190 14 246 190 14 156 118 10 10 10 10
51215- 90 90 90 238 238 238 253 253 253 253 253 253
51216-253 253 253 253 253 253 253 253 253 253 253 253
51217-253 253 253 253 253 253 231 231 231 250 250 250
51218-253 253 253 253 253 253 253 253 253 253 253 253
51219-253 253 253 253 253 253 253 253 253 253 253 253
51220-253 253 253 253 253 253 253 253 253 253 253 253
51221-253 253 253 253 253 253 253 253 253 246 230 190
51222-238 204 91 238 204 91 181 142 44 37 26 9
51223- 2 2 6 2 2 6 2 2 6 2 2 6
51224- 2 2 6 2 2 6 38 38 38 46 46 46
51225- 26 26 26 106 106 106 54 54 54 18 18 18
51226- 6 6 6 0 0 0 0 0 0 0 0 0
51227- 0 0 0 0 0 0 0 0 0 0 0 0
51228- 0 0 0 0 0 0 0 0 0 0 0 0
51229- 0 0 0 0 0 0 0 0 0 0 0 0
51230- 0 0 0 0 0 0 0 0 0 0 0 0
51231- 0 0 0 6 6 6 14 14 14 22 22 22
51232- 30 30 30 38 38 38 50 50 50 70 70 70
51233-106 106 106 190 142 34 226 170 11 242 186 14
51234-246 190 14 246 190 14 246 190 14 154 114 10
51235- 6 6 6 74 74 74 226 226 226 253 253 253
51236-253 253 253 253 253 253 253 253 253 253 253 253
51237-253 253 253 253 253 253 231 231 231 250 250 250
51238-253 253 253 253 253 253 253 253 253 253 253 253
51239-253 253 253 253 253 253 253 253 253 253 253 253
51240-253 253 253 253 253 253 253 253 253 253 253 253
51241-253 253 253 253 253 253 253 253 253 228 184 62
51242-241 196 14 241 208 19 232 195 16 38 30 10
51243- 2 2 6 2 2 6 2 2 6 2 2 6
51244- 2 2 6 6 6 6 30 30 30 26 26 26
51245-203 166 17 154 142 90 66 66 66 26 26 26
51246- 6 6 6 0 0 0 0 0 0 0 0 0
51247- 0 0 0 0 0 0 0 0 0 0 0 0
51248- 0 0 0 0 0 0 0 0 0 0 0 0
51249- 0 0 0 0 0 0 0 0 0 0 0 0
51250- 0 0 0 0 0 0 0 0 0 0 0 0
51251- 6 6 6 18 18 18 38 38 38 58 58 58
51252- 78 78 78 86 86 86 101 101 101 123 123 123
51253-175 146 61 210 150 10 234 174 13 246 186 14
51254-246 190 14 246 190 14 246 190 14 238 190 10
51255-102 78 10 2 2 6 46 46 46 198 198 198
51256-253 253 253 253 253 253 253 253 253 253 253 253
51257-253 253 253 253 253 253 234 234 234 242 242 242
51258-253 253 253 253 253 253 253 253 253 253 253 253
51259-253 253 253 253 253 253 253 253 253 253 253 253
51260-253 253 253 253 253 253 253 253 253 253 253 253
51261-253 253 253 253 253 253 253 253 253 224 178 62
51262-242 186 14 241 196 14 210 166 10 22 18 6
51263- 2 2 6 2 2 6 2 2 6 2 2 6
51264- 2 2 6 2 2 6 6 6 6 121 92 8
51265-238 202 15 232 195 16 82 82 82 34 34 34
51266- 10 10 10 0 0 0 0 0 0 0 0 0
51267- 0 0 0 0 0 0 0 0 0 0 0 0
51268- 0 0 0 0 0 0 0 0 0 0 0 0
51269- 0 0 0 0 0 0 0 0 0 0 0 0
51270- 0 0 0 0 0 0 0 0 0 0 0 0
51271- 14 14 14 38 38 38 70 70 70 154 122 46
51272-190 142 34 200 144 11 197 138 11 197 138 11
51273-213 154 11 226 170 11 242 186 14 246 190 14
51274-246 190 14 246 190 14 246 190 14 246 190 14
51275-225 175 15 46 32 6 2 2 6 22 22 22
51276-158 158 158 250 250 250 253 253 253 253 253 253
51277-253 253 253 253 253 253 253 253 253 253 253 253
51278-253 253 253 253 253 253 253 253 253 253 253 253
51279-253 253 253 253 253 253 253 253 253 253 253 253
51280-253 253 253 253 253 253 253 253 253 253 253 253
51281-253 253 253 250 250 250 242 242 242 224 178 62
51282-239 182 13 236 186 11 213 154 11 46 32 6
51283- 2 2 6 2 2 6 2 2 6 2 2 6
51284- 2 2 6 2 2 6 61 42 6 225 175 15
51285-238 190 10 236 186 11 112 100 78 42 42 42
51286- 14 14 14 0 0 0 0 0 0 0 0 0
51287- 0 0 0 0 0 0 0 0 0 0 0 0
51288- 0 0 0 0 0 0 0 0 0 0 0 0
51289- 0 0 0 0 0 0 0 0 0 0 0 0
51290- 0 0 0 0 0 0 0 0 0 6 6 6
51291- 22 22 22 54 54 54 154 122 46 213 154 11
51292-226 170 11 230 174 11 226 170 11 226 170 11
51293-236 178 12 242 186 14 246 190 14 246 190 14
51294-246 190 14 246 190 14 246 190 14 246 190 14
51295-241 196 14 184 144 12 10 10 10 2 2 6
51296- 6 6 6 116 116 116 242 242 242 253 253 253
51297-253 253 253 253 253 253 253 253 253 253 253 253
51298-253 253 253 253 253 253 253 253 253 253 253 253
51299-253 253 253 253 253 253 253 253 253 253 253 253
51300-253 253 253 253 253 253 253 253 253 253 253 253
51301-253 253 253 231 231 231 198 198 198 214 170 54
51302-236 178 12 236 178 12 210 150 10 137 92 6
51303- 18 14 6 2 2 6 2 2 6 2 2 6
51304- 6 6 6 70 47 6 200 144 11 236 178 12
51305-239 182 13 239 182 13 124 112 88 58 58 58
51306- 22 22 22 6 6 6 0 0 0 0 0 0
51307- 0 0 0 0 0 0 0 0 0 0 0 0
51308- 0 0 0 0 0 0 0 0 0 0 0 0
51309- 0 0 0 0 0 0 0 0 0 0 0 0
51310- 0 0 0 0 0 0 0 0 0 10 10 10
51311- 30 30 30 70 70 70 180 133 36 226 170 11
51312-239 182 13 242 186 14 242 186 14 246 186 14
51313-246 190 14 246 190 14 246 190 14 246 190 14
51314-246 190 14 246 190 14 246 190 14 246 190 14
51315-246 190 14 232 195 16 98 70 6 2 2 6
51316- 2 2 6 2 2 6 66 66 66 221 221 221
51317-253 253 253 253 253 253 253 253 253 253 253 253
51318-253 253 253 253 253 253 253 253 253 253 253 253
51319-253 253 253 253 253 253 253 253 253 253 253 253
51320-253 253 253 253 253 253 253 253 253 253 253 253
51321-253 253 253 206 206 206 198 198 198 214 166 58
51322-230 174 11 230 174 11 216 158 10 192 133 9
51323-163 110 8 116 81 8 102 78 10 116 81 8
51324-167 114 7 197 138 11 226 170 11 239 182 13
51325-242 186 14 242 186 14 162 146 94 78 78 78
51326- 34 34 34 14 14 14 6 6 6 0 0 0
51327- 0 0 0 0 0 0 0 0 0 0 0 0
51328- 0 0 0 0 0 0 0 0 0 0 0 0
51329- 0 0 0 0 0 0 0 0 0 0 0 0
51330- 0 0 0 0 0 0 0 0 0 6 6 6
51331- 30 30 30 78 78 78 190 142 34 226 170 11
51332-239 182 13 246 190 14 246 190 14 246 190 14
51333-246 190 14 246 190 14 246 190 14 246 190 14
51334-246 190 14 246 190 14 246 190 14 246 190 14
51335-246 190 14 241 196 14 203 166 17 22 18 6
51336- 2 2 6 2 2 6 2 2 6 38 38 38
51337-218 218 218 253 253 253 253 253 253 253 253 253
51338-253 253 253 253 253 253 253 253 253 253 253 253
51339-253 253 253 253 253 253 253 253 253 253 253 253
51340-253 253 253 253 253 253 253 253 253 253 253 253
51341-250 250 250 206 206 206 198 198 198 202 162 69
51342-226 170 11 236 178 12 224 166 10 210 150 10
51343-200 144 11 197 138 11 192 133 9 197 138 11
51344-210 150 10 226 170 11 242 186 14 246 190 14
51345-246 190 14 246 186 14 225 175 15 124 112 88
51346- 62 62 62 30 30 30 14 14 14 6 6 6
51347- 0 0 0 0 0 0 0 0 0 0 0 0
51348- 0 0 0 0 0 0 0 0 0 0 0 0
51349- 0 0 0 0 0 0 0 0 0 0 0 0
51350- 0 0 0 0 0 0 0 0 0 10 10 10
51351- 30 30 30 78 78 78 174 135 50 224 166 10
51352-239 182 13 246 190 14 246 190 14 246 190 14
51353-246 190 14 246 190 14 246 190 14 246 190 14
51354-246 190 14 246 190 14 246 190 14 246 190 14
51355-246 190 14 246 190 14 241 196 14 139 102 15
51356- 2 2 6 2 2 6 2 2 6 2 2 6
51357- 78 78 78 250 250 250 253 253 253 253 253 253
51358-253 253 253 253 253 253 253 253 253 253 253 253
51359-253 253 253 253 253 253 253 253 253 253 253 253
51360-253 253 253 253 253 253 253 253 253 253 253 253
51361-250 250 250 214 214 214 198 198 198 190 150 46
51362-219 162 10 236 178 12 234 174 13 224 166 10
51363-216 158 10 213 154 11 213 154 11 216 158 10
51364-226 170 11 239 182 13 246 190 14 246 190 14
51365-246 190 14 246 190 14 242 186 14 206 162 42
51366-101 101 101 58 58 58 30 30 30 14 14 14
51367- 6 6 6 0 0 0 0 0 0 0 0 0
51368- 0 0 0 0 0 0 0 0 0 0 0 0
51369- 0 0 0 0 0 0 0 0 0 0 0 0
51370- 0 0 0 0 0 0 0 0 0 10 10 10
51371- 30 30 30 74 74 74 174 135 50 216 158 10
51372-236 178 12 246 190 14 246 190 14 246 190 14
51373-246 190 14 246 190 14 246 190 14 246 190 14
51374-246 190 14 246 190 14 246 190 14 246 190 14
51375-246 190 14 246 190 14 241 196 14 226 184 13
51376- 61 42 6 2 2 6 2 2 6 2 2 6
51377- 22 22 22 238 238 238 253 253 253 253 253 253
51378-253 253 253 253 253 253 253 253 253 253 253 253
51379-253 253 253 253 253 253 253 253 253 253 253 253
51380-253 253 253 253 253 253 253 253 253 253 253 253
51381-253 253 253 226 226 226 187 187 187 180 133 36
51382-216 158 10 236 178 12 239 182 13 236 178 12
51383-230 174 11 226 170 11 226 170 11 230 174 11
51384-236 178 12 242 186 14 246 190 14 246 190 14
51385-246 190 14 246 190 14 246 186 14 239 182 13
51386-206 162 42 106 106 106 66 66 66 34 34 34
51387- 14 14 14 6 6 6 0 0 0 0 0 0
51388- 0 0 0 0 0 0 0 0 0 0 0 0
51389- 0 0 0 0 0 0 0 0 0 0 0 0
51390- 0 0 0 0 0 0 0 0 0 6 6 6
51391- 26 26 26 70 70 70 163 133 67 213 154 11
51392-236 178 12 246 190 14 246 190 14 246 190 14
51393-246 190 14 246 190 14 246 190 14 246 190 14
51394-246 190 14 246 190 14 246 190 14 246 190 14
51395-246 190 14 246 190 14 246 190 14 241 196 14
51396-190 146 13 18 14 6 2 2 6 2 2 6
51397- 46 46 46 246 246 246 253 253 253 253 253 253
51398-253 253 253 253 253 253 253 253 253 253 253 253
51399-253 253 253 253 253 253 253 253 253 253 253 253
51400-253 253 253 253 253 253 253 253 253 253 253 253
51401-253 253 253 221 221 221 86 86 86 156 107 11
51402-216 158 10 236 178 12 242 186 14 246 186 14
51403-242 186 14 239 182 13 239 182 13 242 186 14
51404-242 186 14 246 186 14 246 190 14 246 190 14
51405-246 190 14 246 190 14 246 190 14 246 190 14
51406-242 186 14 225 175 15 142 122 72 66 66 66
51407- 30 30 30 10 10 10 0 0 0 0 0 0
51408- 0 0 0 0 0 0 0 0 0 0 0 0
51409- 0 0 0 0 0 0 0 0 0 0 0 0
51410- 0 0 0 0 0 0 0 0 0 6 6 6
51411- 26 26 26 70 70 70 163 133 67 210 150 10
51412-236 178 12 246 190 14 246 190 14 246 190 14
51413-246 190 14 246 190 14 246 190 14 246 190 14
51414-246 190 14 246 190 14 246 190 14 246 190 14
51415-246 190 14 246 190 14 246 190 14 246 190 14
51416-232 195 16 121 92 8 34 34 34 106 106 106
51417-221 221 221 253 253 253 253 253 253 253 253 253
51418-253 253 253 253 253 253 253 253 253 253 253 253
51419-253 253 253 253 253 253 253 253 253 253 253 253
51420-253 253 253 253 253 253 253 253 253 253 253 253
51421-242 242 242 82 82 82 18 14 6 163 110 8
51422-216 158 10 236 178 12 242 186 14 246 190 14
51423-246 190 14 246 190 14 246 190 14 246 190 14
51424-246 190 14 246 190 14 246 190 14 246 190 14
51425-246 190 14 246 190 14 246 190 14 246 190 14
51426-246 190 14 246 190 14 242 186 14 163 133 67
51427- 46 46 46 18 18 18 6 6 6 0 0 0
51428- 0 0 0 0 0 0 0 0 0 0 0 0
51429- 0 0 0 0 0 0 0 0 0 0 0 0
51430- 0 0 0 0 0 0 0 0 0 10 10 10
51431- 30 30 30 78 78 78 163 133 67 210 150 10
51432-236 178 12 246 186 14 246 190 14 246 190 14
51433-246 190 14 246 190 14 246 190 14 246 190 14
51434-246 190 14 246 190 14 246 190 14 246 190 14
51435-246 190 14 246 190 14 246 190 14 246 190 14
51436-241 196 14 215 174 15 190 178 144 253 253 253
51437-253 253 253 253 253 253 253 253 253 253 253 253
51438-253 253 253 253 253 253 253 253 253 253 253 253
51439-253 253 253 253 253 253 253 253 253 253 253 253
51440-253 253 253 253 253 253 253 253 253 218 218 218
51441- 58 58 58 2 2 6 22 18 6 167 114 7
51442-216 158 10 236 178 12 246 186 14 246 190 14
51443-246 190 14 246 190 14 246 190 14 246 190 14
51444-246 190 14 246 190 14 246 190 14 246 190 14
51445-246 190 14 246 190 14 246 190 14 246 190 14
51446-246 190 14 246 186 14 242 186 14 190 150 46
51447- 54 54 54 22 22 22 6 6 6 0 0 0
51448- 0 0 0 0 0 0 0 0 0 0 0 0
51449- 0 0 0 0 0 0 0 0 0 0 0 0
51450- 0 0 0 0 0 0 0 0 0 14 14 14
51451- 38 38 38 86 86 86 180 133 36 213 154 11
51452-236 178 12 246 186 14 246 190 14 246 190 14
51453-246 190 14 246 190 14 246 190 14 246 190 14
51454-246 190 14 246 190 14 246 190 14 246 190 14
51455-246 190 14 246 190 14 246 190 14 246 190 14
51456-246 190 14 232 195 16 190 146 13 214 214 214
51457-253 253 253 253 253 253 253 253 253 253 253 253
51458-253 253 253 253 253 253 253 253 253 253 253 253
51459-253 253 253 253 253 253 253 253 253 253 253 253
51460-253 253 253 250 250 250 170 170 170 26 26 26
51461- 2 2 6 2 2 6 37 26 9 163 110 8
51462-219 162 10 239 182 13 246 186 14 246 190 14
51463-246 190 14 246 190 14 246 190 14 246 190 14
51464-246 190 14 246 190 14 246 190 14 246 190 14
51465-246 190 14 246 190 14 246 190 14 246 190 14
51466-246 186 14 236 178 12 224 166 10 142 122 72
51467- 46 46 46 18 18 18 6 6 6 0 0 0
51468- 0 0 0 0 0 0 0 0 0 0 0 0
51469- 0 0 0 0 0 0 0 0 0 0 0 0
51470- 0 0 0 0 0 0 6 6 6 18 18 18
51471- 50 50 50 109 106 95 192 133 9 224 166 10
51472-242 186 14 246 190 14 246 190 14 246 190 14
51473-246 190 14 246 190 14 246 190 14 246 190 14
51474-246 190 14 246 190 14 246 190 14 246 190 14
51475-246 190 14 246 190 14 246 190 14 246 190 14
51476-242 186 14 226 184 13 210 162 10 142 110 46
51477-226 226 226 253 253 253 253 253 253 253 253 253
51478-253 253 253 253 253 253 253 253 253 253 253 253
51479-253 253 253 253 253 253 253 253 253 253 253 253
51480-198 198 198 66 66 66 2 2 6 2 2 6
51481- 2 2 6 2 2 6 50 34 6 156 107 11
51482-219 162 10 239 182 13 246 186 14 246 190 14
51483-246 190 14 246 190 14 246 190 14 246 190 14
51484-246 190 14 246 190 14 246 190 14 246 190 14
51485-246 190 14 246 190 14 246 190 14 242 186 14
51486-234 174 13 213 154 11 154 122 46 66 66 66
51487- 30 30 30 10 10 10 0 0 0 0 0 0
51488- 0 0 0 0 0 0 0 0 0 0 0 0
51489- 0 0 0 0 0 0 0 0 0 0 0 0
51490- 0 0 0 0 0 0 6 6 6 22 22 22
51491- 58 58 58 154 121 60 206 145 10 234 174 13
51492-242 186 14 246 186 14 246 190 14 246 190 14
51493-246 190 14 246 190 14 246 190 14 246 190 14
51494-246 190 14 246 190 14 246 190 14 246 190 14
51495-246 190 14 246 190 14 246 190 14 246 190 14
51496-246 186 14 236 178 12 210 162 10 163 110 8
51497- 61 42 6 138 138 138 218 218 218 250 250 250
51498-253 253 253 253 253 253 253 253 253 250 250 250
51499-242 242 242 210 210 210 144 144 144 66 66 66
51500- 6 6 6 2 2 6 2 2 6 2 2 6
51501- 2 2 6 2 2 6 61 42 6 163 110 8
51502-216 158 10 236 178 12 246 190 14 246 190 14
51503-246 190 14 246 190 14 246 190 14 246 190 14
51504-246 190 14 246 190 14 246 190 14 246 190 14
51505-246 190 14 239 182 13 230 174 11 216 158 10
51506-190 142 34 124 112 88 70 70 70 38 38 38
51507- 18 18 18 6 6 6 0 0 0 0 0 0
51508- 0 0 0 0 0 0 0 0 0 0 0 0
51509- 0 0 0 0 0 0 0 0 0 0 0 0
51510- 0 0 0 0 0 0 6 6 6 22 22 22
51511- 62 62 62 168 124 44 206 145 10 224 166 10
51512-236 178 12 239 182 13 242 186 14 242 186 14
51513-246 186 14 246 190 14 246 190 14 246 190 14
51514-246 190 14 246 190 14 246 190 14 246 190 14
51515-246 190 14 246 190 14 246 190 14 246 190 14
51516-246 190 14 236 178 12 216 158 10 175 118 6
51517- 80 54 7 2 2 6 6 6 6 30 30 30
51518- 54 54 54 62 62 62 50 50 50 38 38 38
51519- 14 14 14 2 2 6 2 2 6 2 2 6
51520- 2 2 6 2 2 6 2 2 6 2 2 6
51521- 2 2 6 6 6 6 80 54 7 167 114 7
51522-213 154 11 236 178 12 246 190 14 246 190 14
51523-246 190 14 246 190 14 246 190 14 246 190 14
51524-246 190 14 242 186 14 239 182 13 239 182 13
51525-230 174 11 210 150 10 174 135 50 124 112 88
51526- 82 82 82 54 54 54 34 34 34 18 18 18
51527- 6 6 6 0 0 0 0 0 0 0 0 0
51528- 0 0 0 0 0 0 0 0 0 0 0 0
51529- 0 0 0 0 0 0 0 0 0 0 0 0
51530- 0 0 0 0 0 0 6 6 6 18 18 18
51531- 50 50 50 158 118 36 192 133 9 200 144 11
51532-216 158 10 219 162 10 224 166 10 226 170 11
51533-230 174 11 236 178 12 239 182 13 239 182 13
51534-242 186 14 246 186 14 246 190 14 246 190 14
51535-246 190 14 246 190 14 246 190 14 246 190 14
51536-246 186 14 230 174 11 210 150 10 163 110 8
51537-104 69 6 10 10 10 2 2 6 2 2 6
51538- 2 2 6 2 2 6 2 2 6 2 2 6
51539- 2 2 6 2 2 6 2 2 6 2 2 6
51540- 2 2 6 2 2 6 2 2 6 2 2 6
51541- 2 2 6 6 6 6 91 60 6 167 114 7
51542-206 145 10 230 174 11 242 186 14 246 190 14
51543-246 190 14 246 190 14 246 186 14 242 186 14
51544-239 182 13 230 174 11 224 166 10 213 154 11
51545-180 133 36 124 112 88 86 86 86 58 58 58
51546- 38 38 38 22 22 22 10 10 10 6 6 6
51547- 0 0 0 0 0 0 0 0 0 0 0 0
51548- 0 0 0 0 0 0 0 0 0 0 0 0
51549- 0 0 0 0 0 0 0 0 0 0 0 0
51550- 0 0 0 0 0 0 0 0 0 14 14 14
51551- 34 34 34 70 70 70 138 110 50 158 118 36
51552-167 114 7 180 123 7 192 133 9 197 138 11
51553-200 144 11 206 145 10 213 154 11 219 162 10
51554-224 166 10 230 174 11 239 182 13 242 186 14
51555-246 186 14 246 186 14 246 186 14 246 186 14
51556-239 182 13 216 158 10 185 133 11 152 99 6
51557-104 69 6 18 14 6 2 2 6 2 2 6
51558- 2 2 6 2 2 6 2 2 6 2 2 6
51559- 2 2 6 2 2 6 2 2 6 2 2 6
51560- 2 2 6 2 2 6 2 2 6 2 2 6
51561- 2 2 6 6 6 6 80 54 7 152 99 6
51562-192 133 9 219 162 10 236 178 12 239 182 13
51563-246 186 14 242 186 14 239 182 13 236 178 12
51564-224 166 10 206 145 10 192 133 9 154 121 60
51565- 94 94 94 62 62 62 42 42 42 22 22 22
51566- 14 14 14 6 6 6 0 0 0 0 0 0
51567- 0 0 0 0 0 0 0 0 0 0 0 0
51568- 0 0 0 0 0 0 0 0 0 0 0 0
51569- 0 0 0 0 0 0 0 0 0 0 0 0
51570- 0 0 0 0 0 0 0 0 0 6 6 6
51571- 18 18 18 34 34 34 58 58 58 78 78 78
51572-101 98 89 124 112 88 142 110 46 156 107 11
51573-163 110 8 167 114 7 175 118 6 180 123 7
51574-185 133 11 197 138 11 210 150 10 219 162 10
51575-226 170 11 236 178 12 236 178 12 234 174 13
51576-219 162 10 197 138 11 163 110 8 130 83 6
51577- 91 60 6 10 10 10 2 2 6 2 2 6
51578- 18 18 18 38 38 38 38 38 38 38 38 38
51579- 38 38 38 38 38 38 38 38 38 38 38 38
51580- 38 38 38 38 38 38 26 26 26 2 2 6
51581- 2 2 6 6 6 6 70 47 6 137 92 6
51582-175 118 6 200 144 11 219 162 10 230 174 11
51583-234 174 13 230 174 11 219 162 10 210 150 10
51584-192 133 9 163 110 8 124 112 88 82 82 82
51585- 50 50 50 30 30 30 14 14 14 6 6 6
51586- 0 0 0 0 0 0 0 0 0 0 0 0
51587- 0 0 0 0 0 0 0 0 0 0 0 0
51588- 0 0 0 0 0 0 0 0 0 0 0 0
51589- 0 0 0 0 0 0 0 0 0 0 0 0
51590- 0 0 0 0 0 0 0 0 0 0 0 0
51591- 6 6 6 14 14 14 22 22 22 34 34 34
51592- 42 42 42 58 58 58 74 74 74 86 86 86
51593-101 98 89 122 102 70 130 98 46 121 87 25
51594-137 92 6 152 99 6 163 110 8 180 123 7
51595-185 133 11 197 138 11 206 145 10 200 144 11
51596-180 123 7 156 107 11 130 83 6 104 69 6
51597- 50 34 6 54 54 54 110 110 110 101 98 89
51598- 86 86 86 82 82 82 78 78 78 78 78 78
51599- 78 78 78 78 78 78 78 78 78 78 78 78
51600- 78 78 78 82 82 82 86 86 86 94 94 94
51601-106 106 106 101 101 101 86 66 34 124 80 6
51602-156 107 11 180 123 7 192 133 9 200 144 11
51603-206 145 10 200 144 11 192 133 9 175 118 6
51604-139 102 15 109 106 95 70 70 70 42 42 42
51605- 22 22 22 10 10 10 0 0 0 0 0 0
51606- 0 0 0 0 0 0 0 0 0 0 0 0
51607- 0 0 0 0 0 0 0 0 0 0 0 0
51608- 0 0 0 0 0 0 0 0 0 0 0 0
51609- 0 0 0 0 0 0 0 0 0 0 0 0
51610- 0 0 0 0 0 0 0 0 0 0 0 0
51611- 0 0 0 0 0 0 6 6 6 10 10 10
51612- 14 14 14 22 22 22 30 30 30 38 38 38
51613- 50 50 50 62 62 62 74 74 74 90 90 90
51614-101 98 89 112 100 78 121 87 25 124 80 6
51615-137 92 6 152 99 6 152 99 6 152 99 6
51616-138 86 6 124 80 6 98 70 6 86 66 30
51617-101 98 89 82 82 82 58 58 58 46 46 46
51618- 38 38 38 34 34 34 34 34 34 34 34 34
51619- 34 34 34 34 34 34 34 34 34 34 34 34
51620- 34 34 34 34 34 34 38 38 38 42 42 42
51621- 54 54 54 82 82 82 94 86 76 91 60 6
51622-134 86 6 156 107 11 167 114 7 175 118 6
51623-175 118 6 167 114 7 152 99 6 121 87 25
51624-101 98 89 62 62 62 34 34 34 18 18 18
51625- 6 6 6 0 0 0 0 0 0 0 0 0
51626- 0 0 0 0 0 0 0 0 0 0 0 0
51627- 0 0 0 0 0 0 0 0 0 0 0 0
51628- 0 0 0 0 0 0 0 0 0 0 0 0
51629- 0 0 0 0 0 0 0 0 0 0 0 0
51630- 0 0 0 0 0 0 0 0 0 0 0 0
51631- 0 0 0 0 0 0 0 0 0 0 0 0
51632- 0 0 0 6 6 6 6 6 6 10 10 10
51633- 18 18 18 22 22 22 30 30 30 42 42 42
51634- 50 50 50 66 66 66 86 86 86 101 98 89
51635-106 86 58 98 70 6 104 69 6 104 69 6
51636-104 69 6 91 60 6 82 62 34 90 90 90
51637- 62 62 62 38 38 38 22 22 22 14 14 14
51638- 10 10 10 10 10 10 10 10 10 10 10 10
51639- 10 10 10 10 10 10 6 6 6 10 10 10
51640- 10 10 10 10 10 10 10 10 10 14 14 14
51641- 22 22 22 42 42 42 70 70 70 89 81 66
51642- 80 54 7 104 69 6 124 80 6 137 92 6
51643-134 86 6 116 81 8 100 82 52 86 86 86
51644- 58 58 58 30 30 30 14 14 14 6 6 6
51645- 0 0 0 0 0 0 0 0 0 0 0 0
51646- 0 0 0 0 0 0 0 0 0 0 0 0
51647- 0 0 0 0 0 0 0 0 0 0 0 0
51648- 0 0 0 0 0 0 0 0 0 0 0 0
51649- 0 0 0 0 0 0 0 0 0 0 0 0
51650- 0 0 0 0 0 0 0 0 0 0 0 0
51651- 0 0 0 0 0 0 0 0 0 0 0 0
51652- 0 0 0 0 0 0 0 0 0 0 0 0
51653- 0 0 0 6 6 6 10 10 10 14 14 14
51654- 18 18 18 26 26 26 38 38 38 54 54 54
51655- 70 70 70 86 86 86 94 86 76 89 81 66
51656- 89 81 66 86 86 86 74 74 74 50 50 50
51657- 30 30 30 14 14 14 6 6 6 0 0 0
51658- 0 0 0 0 0 0 0 0 0 0 0 0
51659- 0 0 0 0 0 0 0 0 0 0 0 0
51660- 0 0 0 0 0 0 0 0 0 0 0 0
51661- 6 6 6 18 18 18 34 34 34 58 58 58
51662- 82 82 82 89 81 66 89 81 66 89 81 66
51663- 94 86 66 94 86 76 74 74 74 50 50 50
51664- 26 26 26 14 14 14 6 6 6 0 0 0
51665- 0 0 0 0 0 0 0 0 0 0 0 0
51666- 0 0 0 0 0 0 0 0 0 0 0 0
51667- 0 0 0 0 0 0 0 0 0 0 0 0
51668- 0 0 0 0 0 0 0 0 0 0 0 0
51669- 0 0 0 0 0 0 0 0 0 0 0 0
51670- 0 0 0 0 0 0 0 0 0 0 0 0
51671- 0 0 0 0 0 0 0 0 0 0 0 0
51672- 0 0 0 0 0 0 0 0 0 0 0 0
51673- 0 0 0 0 0 0 0 0 0 0 0 0
51674- 6 6 6 6 6 6 14 14 14 18 18 18
51675- 30 30 30 38 38 38 46 46 46 54 54 54
51676- 50 50 50 42 42 42 30 30 30 18 18 18
51677- 10 10 10 0 0 0 0 0 0 0 0 0
51678- 0 0 0 0 0 0 0 0 0 0 0 0
51679- 0 0 0 0 0 0 0 0 0 0 0 0
51680- 0 0 0 0 0 0 0 0 0 0 0 0
51681- 0 0 0 6 6 6 14 14 14 26 26 26
51682- 38 38 38 50 50 50 58 58 58 58 58 58
51683- 54 54 54 42 42 42 30 30 30 18 18 18
51684- 10 10 10 0 0 0 0 0 0 0 0 0
51685- 0 0 0 0 0 0 0 0 0 0 0 0
51686- 0 0 0 0 0 0 0 0 0 0 0 0
51687- 0 0 0 0 0 0 0 0 0 0 0 0
51688- 0 0 0 0 0 0 0 0 0 0 0 0
51689- 0 0 0 0 0 0 0 0 0 0 0 0
51690- 0 0 0 0 0 0 0 0 0 0 0 0
51691- 0 0 0 0 0 0 0 0 0 0 0 0
51692- 0 0 0 0 0 0 0 0 0 0 0 0
51693- 0 0 0 0 0 0 0 0 0 0 0 0
51694- 0 0 0 0 0 0 0 0 0 6 6 6
51695- 6 6 6 10 10 10 14 14 14 18 18 18
51696- 18 18 18 14 14 14 10 10 10 6 6 6
51697- 0 0 0 0 0 0 0 0 0 0 0 0
51698- 0 0 0 0 0 0 0 0 0 0 0 0
51699- 0 0 0 0 0 0 0 0 0 0 0 0
51700- 0 0 0 0 0 0 0 0 0 0 0 0
51701- 0 0 0 0 0 0 0 0 0 6 6 6
51702- 14 14 14 18 18 18 22 22 22 22 22 22
51703- 18 18 18 14 14 14 10 10 10 6 6 6
51704- 0 0 0 0 0 0 0 0 0 0 0 0
51705- 0 0 0 0 0 0 0 0 0 0 0 0
51706- 0 0 0 0 0 0 0 0 0 0 0 0
51707- 0 0 0 0 0 0 0 0 0 0 0 0
51708- 0 0 0 0 0 0 0 0 0 0 0 0
51709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51722+4 4 4 4 4 4
51723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51736+4 4 4 4 4 4
51737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51750+4 4 4 4 4 4
51751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51764+4 4 4 4 4 4
51765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51778+4 4 4 4 4 4
51779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51792+4 4 4 4 4 4
51793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51797+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
51798+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
51799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51802+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
51803+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
51804+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
51805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51806+4 4 4 4 4 4
51807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51811+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
51812+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
51813+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51816+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
51817+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
51818+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
51819+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51820+4 4 4 4 4 4
51821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51825+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
51826+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
51827+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
51828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51830+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
51831+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
51832+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
51833+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
51834+4 4 4 4 4 4
51835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51838+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
51839+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
51840+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
51841+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
51842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51843+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
51844+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
51845+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
51846+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
51847+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
51848+4 4 4 4 4 4
51849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51852+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
51853+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
51854+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
51855+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
51856+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51857+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
51858+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
51859+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
51860+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
51861+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
51862+4 4 4 4 4 4
51863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51866+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
51867+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
51868+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
51869+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
51870+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
51871+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
51872+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
51873+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
51874+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
51875+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
51876+4 4 4 4 4 4
51877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51879+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
51880+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
51881+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
51882+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
51883+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
51884+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
51885+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
51886+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
51887+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
51888+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
51889+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
51890+4 4 4 4 4 4
51891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51893+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
51894+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
51895+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
51896+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
51897+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
51898+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
51899+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
51900+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
51901+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
51902+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
51903+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
51904+4 4 4 4 4 4
51905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51907+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
51908+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
51909+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
51910+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
51911+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
51912+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
51913+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
51914+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
51915+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
51916+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
51917+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
51918+4 4 4 4 4 4
51919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51921+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
51922+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
51923+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
51924+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
51925+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
51926+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
51927+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
51928+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
51929+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
51930+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
51931+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
51932+4 4 4 4 4 4
51933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51934+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
51935+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
51936+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
51937+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
51938+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
51939+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
51940+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
51941+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
51942+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
51943+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
51944+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
51945+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
51946+4 4 4 4 4 4
51947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51948+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
51949+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
51950+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
51951+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
51952+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
51953+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
51954+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
51955+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
51956+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
51957+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
51958+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
51959+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
51960+0 0 0 4 4 4
51961+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
51962+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
51963+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
51964+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
51965+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
51966+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
51967+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
51968+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
51969+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
51970+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
51971+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
51972+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
51973+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
51974+2 0 0 0 0 0
51975+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
51976+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
51977+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
51978+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
51979+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
51980+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
51981+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
51982+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
51983+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
51984+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
51985+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
51986+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
51987+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
51988+37 38 37 0 0 0
51989+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
51990+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
51991+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
51992+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
51993+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
51994+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
51995+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
51996+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
51997+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
51998+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
51999+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
52000+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
52001+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
52002+85 115 134 4 0 0
52003+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
52004+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
52005+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
52006+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
52007+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
52008+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
52009+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
52010+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
52011+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
52012+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
52013+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
52014+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
52015+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
52016+60 73 81 4 0 0
52017+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
52018+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
52019+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
52020+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
52021+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
52022+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
52023+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
52024+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
52025+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
52026+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
52027+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
52028+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
52029+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
52030+16 19 21 4 0 0
52031+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
52032+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
52033+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
52034+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
52035+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
52036+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
52037+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
52038+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
52039+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
52040+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
52041+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
52042+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
52043+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
52044+4 0 0 4 3 3
52045+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
52046+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
52047+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
52048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
52049+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
52050+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
52051+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
52052+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
52053+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
52054+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
52055+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
52056+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
52057+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
52058+3 2 2 4 4 4
52059+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
52060+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
52061+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
52062+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
52063+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
52064+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
52065+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
52066+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
52067+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
52068+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
52069+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
52070+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
52071+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
52072+4 4 4 4 4 4
52073+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
52074+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
52075+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
52076+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
52077+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
52078+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
52079+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
52080+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
52081+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
52082+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
52083+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
52084+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
52085+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
52086+4 4 4 4 4 4
52087+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
52088+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
52089+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
52090+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
52091+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
52092+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
52093+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
52094+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
52095+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
52096+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
52097+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
52098+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
52099+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
52100+5 5 5 5 5 5
52101+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
52102+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
52103+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
52104+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
52105+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
52106+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52107+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
52108+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
52109+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
52110+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
52111+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
52112+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
52113+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
52114+5 5 5 4 4 4
52115+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
52116+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
52117+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
52118+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
52119+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
52120+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
52121+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
52122+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
52123+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
52124+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
52125+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
52126+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
52127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52128+4 4 4 4 4 4
52129+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
52130+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
52131+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
52132+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
52133+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
52134+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52135+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52136+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
52137+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
52138+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
52139+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
52140+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
52141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52142+4 4 4 4 4 4
52143+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
52144+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
52145+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
52146+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
52147+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
52148+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
52149+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
52150+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
52151+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
52152+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
52153+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
52154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52156+4 4 4 4 4 4
52157+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
52158+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
52159+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
52160+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
52161+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
52162+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52163+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52164+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
52165+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
52166+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
52167+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
52168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52170+4 4 4 4 4 4
52171+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
52172+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
52173+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
52174+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
52175+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
52176+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
52177+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
52178+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
52179+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
52180+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
52181+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52184+4 4 4 4 4 4
52185+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
52186+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
52187+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
52188+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
52189+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
52190+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
52191+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
52192+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
52193+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
52194+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
52195+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
52196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52198+4 4 4 4 4 4
52199+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
52200+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
52201+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
52202+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
52203+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
52204+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
52205+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
52206+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
52207+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
52208+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
52209+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
52210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52212+4 4 4 4 4 4
52213+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
52214+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
52215+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
52216+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
52217+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
52218+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
52219+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
52220+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
52221+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
52222+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
52223+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52226+4 4 4 4 4 4
52227+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
52228+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
52229+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
52230+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
52231+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52232+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
52233+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
52234+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
52235+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
52236+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
52237+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52240+4 4 4 4 4 4
52241+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
52242+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
52243+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
52244+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
52245+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52246+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
52247+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
52248+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
52249+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
52250+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
52251+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52254+4 4 4 4 4 4
52255+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
52256+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
52257+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
52258+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
52259+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52260+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
52261+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
52262+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
52263+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
52264+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52265+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52268+4 4 4 4 4 4
52269+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
52270+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
52271+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
52272+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
52273+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
52274+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
52275+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
52276+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
52277+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52278+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52279+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52282+4 4 4 4 4 4
52283+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
52284+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
52285+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
52286+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
52287+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52288+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
52289+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
52290+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
52291+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
52292+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52293+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52296+4 4 4 4 4 4
52297+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
52298+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
52299+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
52300+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
52301+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
52302+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
52303+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
52304+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
52305+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52306+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52307+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52310+4 4 4 4 4 4
52311+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
52312+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
52313+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
52314+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
52315+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
52316+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
52317+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
52318+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
52319+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
52320+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52321+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52324+4 4 4 4 4 4
52325+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
52326+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
52327+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
52328+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
52329+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
52330+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
52331+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
52332+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
52333+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52334+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52335+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52338+4 4 4 4 4 4
52339+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
52340+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
52341+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
52342+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
52343+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
52344+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
52345+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
52346+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
52347+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
52348+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52349+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52352+4 4 4 4 4 4
52353+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
52354+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
52355+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
52356+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
52357+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
52358+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
52359+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
52360+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
52361+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52362+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52363+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52366+4 4 4 4 4 4
52367+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52368+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
52369+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
52370+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
52371+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
52372+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
52373+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
52374+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
52375+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
52376+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52377+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52380+4 4 4 4 4 4
52381+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
52382+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
52383+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
52384+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
52385+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
52386+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
52387+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52388+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
52389+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52390+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52391+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52394+4 4 4 4 4 4
52395+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52396+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
52397+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
52398+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
52399+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
52400+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
52401+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52402+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
52403+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
52404+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52405+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52408+4 4 4 4 4 4
52409+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
52410+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
52411+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
52412+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
52413+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
52414+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
52415+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
52416+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
52417+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
52418+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52419+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52422+4 4 4 4 4 4
52423+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52424+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
52425+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
52426+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
52427+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
52428+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
52429+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
52430+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
52431+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
52432+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52433+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52436+4 4 4 4 4 4
52437+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
52438+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
52439+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
52440+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
52441+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
52442+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
52443+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
52444+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
52445+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
52446+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52447+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52450+4 4 4 4 4 4
52451+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52452+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
52453+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
52454+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
52455+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
52456+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
52457+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
52458+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
52459+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
52460+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52461+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52464+4 4 4 4 4 4
52465+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
52466+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
52467+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
52468+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
52469+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
52470+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
52471+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
52472+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
52473+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
52474+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
52475+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52478+4 4 4 4 4 4
52479+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
52480+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
52481+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
52482+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
52483+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
52484+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
52485+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
52486+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
52487+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
52488+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
52489+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52492+4 4 4 4 4 4
52493+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
52494+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
52495+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
52496+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
52497+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
52498+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
52499+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52500+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
52501+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
52502+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
52503+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52506+4 4 4 4 4 4
52507+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
52508+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
52509+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
52510+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
52511+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
52512+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
52513+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
52514+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
52515+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
52516+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
52517+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52520+4 4 4 4 4 4
52521+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
52522+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
52523+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
52524+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
52525+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
52526+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
52527+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
52528+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
52529+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
52530+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
52531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52534+4 4 4 4 4 4
52535+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
52536+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
52537+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
52538+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
52539+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
52540+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
52541+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
52542+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
52543+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
52544+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
52545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52548+4 4 4 4 4 4
52549+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
52550+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
52551+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
52552+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
52553+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
52554+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
52555+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
52556+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
52557+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
52558+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
52559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52562+4 4 4 4 4 4
52563+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
52564+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
52565+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
52566+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
52567+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
52568+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
52569+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
52570+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
52571+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
52572+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52576+4 4 4 4 4 4
52577+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
52578+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
52579+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
52580+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
52581+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
52582+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
52583+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
52584+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
52585+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
52586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52590+4 4 4 4 4 4
52591+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
52592+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
52593+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
52594+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
52595+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
52596+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
52597+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
52598+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
52599+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
52600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52604+4 4 4 4 4 4
52605+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
52606+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
52607+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
52608+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
52609+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
52610+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
52611+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
52612+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
52613+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52618+4 4 4 4 4 4
52619+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
52620+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
52621+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
52622+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
52623+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
52624+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
52625+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
52626+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
52627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52632+4 4 4 4 4 4
52633+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
52634+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
52635+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
52636+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
52637+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
52638+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
52639+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
52640+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
52641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52646+4 4 4 4 4 4
52647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
52648+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
52649+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
52650+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
52651+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
52652+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
52653+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
52654+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
52655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52660+4 4 4 4 4 4
52661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52662+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
52663+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
52664+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
52665+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
52666+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
52667+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
52668+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
52669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52674+4 4 4 4 4 4
52675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52676+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
52677+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
52678+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
52679+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
52680+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
52681+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
52682+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52688+4 4 4 4 4 4
52689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52691+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
52692+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
52693+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
52694+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
52695+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
52696+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52702+4 4 4 4 4 4
52703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
52706+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
52707+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
52708+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
52709+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
52710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52716+4 4 4 4 4 4
52717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52720+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
52721+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
52722+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
52723+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
52724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52730+4 4 4 4 4 4
52731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52734+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
52735+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
52736+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
52737+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
52738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52744+4 4 4 4 4 4
52745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52748+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
52749+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
52750+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
52751+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
52752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52758+4 4 4 4 4 4
52759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
52763+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
52764+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
52765+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52772+4 4 4 4 4 4
52773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52777+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
52778+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
52779+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
52780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52786+4 4 4 4 4 4
52787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52791+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
52792+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
52793+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52800+4 4 4 4 4 4
52801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52805+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
52806+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
52807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52814+4 4 4 4 4 4
52815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52819+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
52820+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
52821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52828+4 4 4 4 4 4
52829diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
52830index fe92eed..106e085 100644
52831--- a/drivers/video/mb862xx/mb862xxfb_accel.c
52832+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
52833@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
52834 struct mb862xxfb_par *par = info->par;
52835
52836 if (info->var.bits_per_pixel == 32) {
52837- info->fbops->fb_fillrect = cfb_fillrect;
52838- info->fbops->fb_copyarea = cfb_copyarea;
52839- info->fbops->fb_imageblit = cfb_imageblit;
52840+ pax_open_kernel();
52841+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
52842+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
52843+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
52844+ pax_close_kernel();
52845 } else {
52846 outreg(disp, GC_L0EM, 3);
52847- info->fbops->fb_fillrect = mb86290fb_fillrect;
52848- info->fbops->fb_copyarea = mb86290fb_copyarea;
52849- info->fbops->fb_imageblit = mb86290fb_imageblit;
52850+ pax_open_kernel();
52851+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
52852+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
52853+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
52854+ pax_close_kernel();
52855 }
52856 outreg(draw, GDC_REG_DRAW_BASE, 0);
52857 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
52858diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
52859index ff22871..b129bed 100644
52860--- a/drivers/video/nvidia/nvidia.c
52861+++ b/drivers/video/nvidia/nvidia.c
52862@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
52863 info->fix.line_length = (info->var.xres_virtual *
52864 info->var.bits_per_pixel) >> 3;
52865 if (info->var.accel_flags) {
52866- info->fbops->fb_imageblit = nvidiafb_imageblit;
52867- info->fbops->fb_fillrect = nvidiafb_fillrect;
52868- info->fbops->fb_copyarea = nvidiafb_copyarea;
52869- info->fbops->fb_sync = nvidiafb_sync;
52870+ pax_open_kernel();
52871+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
52872+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
52873+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
52874+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
52875+ pax_close_kernel();
52876 info->pixmap.scan_align = 4;
52877 info->flags &= ~FBINFO_HWACCEL_DISABLED;
52878 info->flags |= FBINFO_READS_FAST;
52879 NVResetGraphics(info);
52880 } else {
52881- info->fbops->fb_imageblit = cfb_imageblit;
52882- info->fbops->fb_fillrect = cfb_fillrect;
52883- info->fbops->fb_copyarea = cfb_copyarea;
52884- info->fbops->fb_sync = NULL;
52885+ pax_open_kernel();
52886+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
52887+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
52888+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
52889+ *(void **)&info->fbops->fb_sync = NULL;
52890+ pax_close_kernel();
52891 info->pixmap.scan_align = 1;
52892 info->flags |= FBINFO_HWACCEL_DISABLED;
52893 info->flags &= ~FBINFO_READS_FAST;
52894@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
52895 info->pixmap.size = 8 * 1024;
52896 info->pixmap.flags = FB_PIXMAP_SYSTEM;
52897
52898- if (!hwcur)
52899- info->fbops->fb_cursor = NULL;
52900+ if (!hwcur) {
52901+ pax_open_kernel();
52902+ *(void **)&info->fbops->fb_cursor = NULL;
52903+ pax_close_kernel();
52904+ }
52905
52906 info->var.accel_flags = (!noaccel);
52907
52908diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
52909index fafe7c9..93197b9 100644
52910--- a/drivers/video/omap2/dss/display.c
52911+++ b/drivers/video/omap2/dss/display.c
52912@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
52913 snprintf(dssdev->alias, sizeof(dssdev->alias),
52914 "display%d", disp_num_counter++);
52915
52916+ pax_open_kernel();
52917 if (drv && drv->get_resolution == NULL)
52918- drv->get_resolution = omapdss_default_get_resolution;
52919+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
52920 if (drv && drv->get_recommended_bpp == NULL)
52921- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
52922+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
52923 if (drv && drv->get_timings == NULL)
52924- drv->get_timings = omapdss_default_get_timings;
52925+ *(void **)&drv->get_timings = omapdss_default_get_timings;
52926+ pax_close_kernel();
52927
52928 mutex_lock(&panel_list_mutex);
52929 list_add_tail(&dssdev->panel_list, &panel_list);
52930diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
52931index 05c2dc3..ea1f391 100644
52932--- a/drivers/video/s1d13xxxfb.c
52933+++ b/drivers/video/s1d13xxxfb.c
52934@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
52935
52936 switch(prod_id) {
52937 case S1D13506_PROD_ID: /* activate acceleration */
52938- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
52939- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
52940+ pax_open_kernel();
52941+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
52942+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
52943+ pax_close_kernel();
52944 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
52945 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
52946 break;
52947diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
52948index e188ada..aac63c8 100644
52949--- a/drivers/video/smscufx.c
52950+++ b/drivers/video/smscufx.c
52951@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
52952 fb_deferred_io_cleanup(info);
52953 kfree(info->fbdefio);
52954 info->fbdefio = NULL;
52955- info->fbops->fb_mmap = ufx_ops_mmap;
52956+ pax_open_kernel();
52957+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
52958+ pax_close_kernel();
52959 }
52960
52961 pr_debug("released /dev/fb%d user=%d count=%d",
52962diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
52963index d2e5bc3..4cb05d1 100644
52964--- a/drivers/video/udlfb.c
52965+++ b/drivers/video/udlfb.c
52966@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
52967 dlfb_urb_completion(urb);
52968
52969 error:
52970- atomic_add(bytes_sent, &dev->bytes_sent);
52971- atomic_add(bytes_identical, &dev->bytes_identical);
52972- atomic_add(width*height*2, &dev->bytes_rendered);
52973+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
52974+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
52975+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
52976 end_cycles = get_cycles();
52977- atomic_add(((unsigned int) ((end_cycles - start_cycles)
52978+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
52979 >> 10)), /* Kcycles */
52980 &dev->cpu_kcycles_used);
52981
52982@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
52983 dlfb_urb_completion(urb);
52984
52985 error:
52986- atomic_add(bytes_sent, &dev->bytes_sent);
52987- atomic_add(bytes_identical, &dev->bytes_identical);
52988- atomic_add(bytes_rendered, &dev->bytes_rendered);
52989+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
52990+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
52991+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
52992 end_cycles = get_cycles();
52993- atomic_add(((unsigned int) ((end_cycles - start_cycles)
52994+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
52995 >> 10)), /* Kcycles */
52996 &dev->cpu_kcycles_used);
52997 }
52998@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
52999 fb_deferred_io_cleanup(info);
53000 kfree(info->fbdefio);
53001 info->fbdefio = NULL;
53002- info->fbops->fb_mmap = dlfb_ops_mmap;
53003+ pax_open_kernel();
53004+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
53005+ pax_close_kernel();
53006 }
53007
53008 pr_warn("released /dev/fb%d user=%d count=%d\n",
53009@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
53010 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53011 struct dlfb_data *dev = fb_info->par;
53012 return snprintf(buf, PAGE_SIZE, "%u\n",
53013- atomic_read(&dev->bytes_rendered));
53014+ atomic_read_unchecked(&dev->bytes_rendered));
53015 }
53016
53017 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
53018@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
53019 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53020 struct dlfb_data *dev = fb_info->par;
53021 return snprintf(buf, PAGE_SIZE, "%u\n",
53022- atomic_read(&dev->bytes_identical));
53023+ atomic_read_unchecked(&dev->bytes_identical));
53024 }
53025
53026 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
53027@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
53028 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53029 struct dlfb_data *dev = fb_info->par;
53030 return snprintf(buf, PAGE_SIZE, "%u\n",
53031- atomic_read(&dev->bytes_sent));
53032+ atomic_read_unchecked(&dev->bytes_sent));
53033 }
53034
53035 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
53036@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
53037 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53038 struct dlfb_data *dev = fb_info->par;
53039 return snprintf(buf, PAGE_SIZE, "%u\n",
53040- atomic_read(&dev->cpu_kcycles_used));
53041+ atomic_read_unchecked(&dev->cpu_kcycles_used));
53042 }
53043
53044 static ssize_t edid_show(
53045@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
53046 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53047 struct dlfb_data *dev = fb_info->par;
53048
53049- atomic_set(&dev->bytes_rendered, 0);
53050- atomic_set(&dev->bytes_identical, 0);
53051- atomic_set(&dev->bytes_sent, 0);
53052- atomic_set(&dev->cpu_kcycles_used, 0);
53053+ atomic_set_unchecked(&dev->bytes_rendered, 0);
53054+ atomic_set_unchecked(&dev->bytes_identical, 0);
53055+ atomic_set_unchecked(&dev->bytes_sent, 0);
53056+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
53057
53058 return count;
53059 }
53060diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
53061index 7aec6f3..e3b2d55 100644
53062--- a/drivers/video/uvesafb.c
53063+++ b/drivers/video/uvesafb.c
53064@@ -19,6 +19,7 @@
53065 #include <linux/io.h>
53066 #include <linux/mutex.h>
53067 #include <linux/slab.h>
53068+#include <linux/moduleloader.h>
53069 #include <video/edid.h>
53070 #include <video/uvesafb.h>
53071 #ifdef CONFIG_X86
53072@@ -566,10 +567,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
53073 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
53074 par->pmi_setpal = par->ypan = 0;
53075 } else {
53076+
53077+#ifdef CONFIG_PAX_KERNEXEC
53078+#ifdef CONFIG_MODULES
53079+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
53080+#endif
53081+ if (!par->pmi_code) {
53082+ par->pmi_setpal = par->ypan = 0;
53083+ return 0;
53084+ }
53085+#endif
53086+
53087 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
53088 + task->t.regs.edi);
53089+
53090+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53091+ pax_open_kernel();
53092+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
53093+ pax_close_kernel();
53094+
53095+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
53096+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
53097+#else
53098 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
53099 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
53100+#endif
53101+
53102 printk(KERN_INFO "uvesafb: protected mode interface info at "
53103 "%04x:%04x\n",
53104 (u16)task->t.regs.es, (u16)task->t.regs.edi);
53105@@ -814,13 +837,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
53106 par->ypan = ypan;
53107
53108 if (par->pmi_setpal || par->ypan) {
53109+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
53110 if (__supported_pte_mask & _PAGE_NX) {
53111 par->pmi_setpal = par->ypan = 0;
53112 printk(KERN_WARNING "uvesafb: NX protection is active, "
53113 "better not use the PMI.\n");
53114- } else {
53115+ } else
53116+#endif
53117 uvesafb_vbe_getpmi(task, par);
53118- }
53119 }
53120 #else
53121 /* The protected mode interface is not available on non-x86. */
53122@@ -1454,8 +1478,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
53123 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
53124
53125 /* Disable blanking if the user requested so. */
53126- if (!blank)
53127- info->fbops->fb_blank = NULL;
53128+ if (!blank) {
53129+ pax_open_kernel();
53130+ *(void **)&info->fbops->fb_blank = NULL;
53131+ pax_close_kernel();
53132+ }
53133
53134 /*
53135 * Find out how much IO memory is required for the mode with
53136@@ -1531,8 +1558,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
53137 info->flags = FBINFO_FLAG_DEFAULT |
53138 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
53139
53140- if (!par->ypan)
53141- info->fbops->fb_pan_display = NULL;
53142+ if (!par->ypan) {
53143+ pax_open_kernel();
53144+ *(void **)&info->fbops->fb_pan_display = NULL;
53145+ pax_close_kernel();
53146+ }
53147 }
53148
53149 static void uvesafb_init_mtrr(struct fb_info *info)
53150@@ -1796,6 +1826,11 @@ out:
53151 if (par->vbe_modes)
53152 kfree(par->vbe_modes);
53153
53154+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53155+ if (par->pmi_code)
53156+ module_free_exec(NULL, par->pmi_code);
53157+#endif
53158+
53159 framebuffer_release(info);
53160 return err;
53161 }
53162@@ -1823,6 +1858,12 @@ static int uvesafb_remove(struct platform_device *dev)
53163 kfree(par->vbe_state_orig);
53164 if (par->vbe_state_saved)
53165 kfree(par->vbe_state_saved);
53166+
53167+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53168+ if (par->pmi_code)
53169+ module_free_exec(NULL, par->pmi_code);
53170+#endif
53171+
53172 }
53173
53174 framebuffer_release(info);
53175diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
53176index bd83233..7d8a5aa 100644
53177--- a/drivers/video/vesafb.c
53178+++ b/drivers/video/vesafb.c
53179@@ -9,6 +9,7 @@
53180 */
53181
53182 #include <linux/module.h>
53183+#include <linux/moduleloader.h>
53184 #include <linux/kernel.h>
53185 #include <linux/errno.h>
53186 #include <linux/string.h>
53187@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
53188 static int vram_total; /* Set total amount of memory */
53189 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
53190 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
53191-static void (*pmi_start)(void) __read_mostly;
53192-static void (*pmi_pal) (void) __read_mostly;
53193+static void (*pmi_start)(void) __read_only;
53194+static void (*pmi_pal) (void) __read_only;
53195 static int depth __read_mostly;
53196 static int vga_compat __read_mostly;
53197 /* --------------------------------------------------------------------- */
53198@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
53199 unsigned int size_remap;
53200 unsigned int size_total;
53201 char *option = NULL;
53202+ void *pmi_code = NULL;
53203
53204 /* ignore error return of fb_get_options */
53205 fb_get_options("vesafb", &option);
53206@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
53207 size_remap = size_total;
53208 vesafb_fix.smem_len = size_remap;
53209
53210-#ifndef __i386__
53211- screen_info.vesapm_seg = 0;
53212-#endif
53213-
53214 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
53215 printk(KERN_WARNING
53216 "vesafb: cannot reserve video memory at 0x%lx\n",
53217@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
53218 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
53219 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
53220
53221+#ifdef __i386__
53222+
53223+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53224+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
53225+ if (!pmi_code)
53226+#elif !defined(CONFIG_PAX_KERNEXEC)
53227+ if (0)
53228+#endif
53229+
53230+#endif
53231+ screen_info.vesapm_seg = 0;
53232+
53233 if (screen_info.vesapm_seg) {
53234- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
53235- screen_info.vesapm_seg,screen_info.vesapm_off);
53236+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
53237+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
53238 }
53239
53240 if (screen_info.vesapm_seg < 0xc000)
53241@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
53242
53243 if (ypan || pmi_setpal) {
53244 unsigned short *pmi_base;
53245+
53246 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
53247- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
53248- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
53249+
53250+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53251+ pax_open_kernel();
53252+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
53253+#else
53254+ pmi_code = pmi_base;
53255+#endif
53256+
53257+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
53258+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
53259+
53260+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53261+ pmi_start = ktva_ktla(pmi_start);
53262+ pmi_pal = ktva_ktla(pmi_pal);
53263+ pax_close_kernel();
53264+#endif
53265+
53266 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
53267 if (pmi_base[3]) {
53268 printk(KERN_INFO "vesafb: pmi: ports = ");
53269@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
53270 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
53271 (ypan ? FBINFO_HWACCEL_YPAN : 0);
53272
53273- if (!ypan)
53274- info->fbops->fb_pan_display = NULL;
53275+ if (!ypan) {
53276+ pax_open_kernel();
53277+ *(void **)&info->fbops->fb_pan_display = NULL;
53278+ pax_close_kernel();
53279+ }
53280
53281 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
53282 err = -ENOMEM;
53283@@ -493,6 +522,11 @@ static int vesafb_probe(struct platform_device *dev)
53284 info->node, info->fix.id);
53285 return 0;
53286 err:
53287+
53288+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53289+ module_free_exec(NULL, pmi_code);
53290+#endif
53291+
53292 if (info->screen_base)
53293 iounmap(info->screen_base);
53294 framebuffer_release(info);
53295diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
53296index 88714ae..16c2e11 100644
53297--- a/drivers/video/via/via_clock.h
53298+++ b/drivers/video/via/via_clock.h
53299@@ -56,7 +56,7 @@ struct via_clock {
53300
53301 void (*set_engine_pll_state)(u8 state);
53302 void (*set_engine_pll)(struct via_pll_config config);
53303-};
53304+} __no_const;
53305
53306
53307 static inline u32 get_pll_internal_frequency(u32 ref_freq,
53308diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
53309index fef20db..d28b1ab 100644
53310--- a/drivers/xen/xenfs/xenstored.c
53311+++ b/drivers/xen/xenfs/xenstored.c
53312@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
53313 static int xsd_kva_open(struct inode *inode, struct file *file)
53314 {
53315 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
53316+#ifdef CONFIG_GRKERNSEC_HIDESYM
53317+ NULL);
53318+#else
53319 xen_store_interface);
53320+#endif
53321+
53322 if (!file->private_data)
53323 return -ENOMEM;
53324 return 0;
53325diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
53326index 9ff073f..05cef23 100644
53327--- a/fs/9p/vfs_addr.c
53328+++ b/fs/9p/vfs_addr.c
53329@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
53330
53331 retval = v9fs_file_write_internal(inode,
53332 v9inode->writeback_fid,
53333- (__force const char __user *)buffer,
53334+ (const char __force_user *)buffer,
53335 len, &offset, 0);
53336 if (retval > 0)
53337 retval = 0;
53338diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
53339index 94de6d1..8d81256 100644
53340--- a/fs/9p/vfs_inode.c
53341+++ b/fs/9p/vfs_inode.c
53342@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
53343 void
53344 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53345 {
53346- char *s = nd_get_link(nd);
53347+ const char *s = nd_get_link(nd);
53348
53349 p9_debug(P9_DEBUG_VFS, " %s %s\n",
53350 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
53351diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
53352index 370b24c..ff0be7b 100644
53353--- a/fs/Kconfig.binfmt
53354+++ b/fs/Kconfig.binfmt
53355@@ -103,7 +103,7 @@ config HAVE_AOUT
53356
53357 config BINFMT_AOUT
53358 tristate "Kernel support for a.out and ECOFF binaries"
53359- depends on HAVE_AOUT
53360+ depends on HAVE_AOUT && BROKEN
53361 ---help---
53362 A.out (Assembler.OUTput) is a set of formats for libraries and
53363 executables used in the earliest versions of UNIX. Linux used
53364diff --git a/fs/afs/inode.c b/fs/afs/inode.c
53365index 789bc25..fafaeea 100644
53366--- a/fs/afs/inode.c
53367+++ b/fs/afs/inode.c
53368@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
53369 struct afs_vnode *vnode;
53370 struct super_block *sb;
53371 struct inode *inode;
53372- static atomic_t afs_autocell_ino;
53373+ static atomic_unchecked_t afs_autocell_ino;
53374
53375 _enter("{%x:%u},%*.*s,",
53376 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
53377@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
53378 data.fid.unique = 0;
53379 data.fid.vnode = 0;
53380
53381- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
53382+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
53383 afs_iget5_autocell_test, afs_iget5_set,
53384 &data);
53385 if (!inode) {
53386diff --git a/fs/aio.c b/fs/aio.c
53387index 6efb7f6..ec354de 100644
53388--- a/fs/aio.c
53389+++ b/fs/aio.c
53390@@ -338,7 +338,7 @@ static int aio_setup_ring(struct kioctx *ctx)
53391 size += sizeof(struct io_event) * nr_events;
53392
53393 nr_pages = PFN_UP(size);
53394- if (nr_pages < 0)
53395+ if (nr_pages <= 0)
53396 return -EINVAL;
53397
53398 file = aio_private_file(ctx, nr_pages);
53399@@ -652,7 +652,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
53400 aio_nr += ctx->max_reqs;
53401 spin_unlock(&aio_nr_lock);
53402
53403- percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
53404+ percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
53405+ percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
53406
53407 err = ioctx_add_table(ctx, mm);
53408 if (err)
53409diff --git a/fs/attr.c b/fs/attr.c
53410index 1449adb..a2038c2 100644
53411--- a/fs/attr.c
53412+++ b/fs/attr.c
53413@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
53414 unsigned long limit;
53415
53416 limit = rlimit(RLIMIT_FSIZE);
53417+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
53418 if (limit != RLIM_INFINITY && offset > limit)
53419 goto out_sig;
53420 if (offset > inode->i_sb->s_maxbytes)
53421diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
53422index 689e40d..515cac5 100644
53423--- a/fs/autofs4/waitq.c
53424+++ b/fs/autofs4/waitq.c
53425@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
53426 {
53427 unsigned long sigpipe, flags;
53428 mm_segment_t fs;
53429- const char *data = (const char *)addr;
53430+ const char __user *data = (const char __force_user *)addr;
53431 ssize_t wr = 0;
53432
53433 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
53434@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
53435 return 1;
53436 }
53437
53438+#ifdef CONFIG_GRKERNSEC_HIDESYM
53439+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
53440+#endif
53441+
53442 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
53443 enum autofs_notify notify)
53444 {
53445@@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
53446
53447 /* If this is a direct mount request create a dummy name */
53448 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
53449+#ifdef CONFIG_GRKERNSEC_HIDESYM
53450+ /* this name does get written to userland via autofs4_write() */
53451+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
53452+#else
53453 qstr.len = sprintf(name, "%p", dentry);
53454+#endif
53455 else {
53456 qstr.len = autofs4_getpath(sbi, dentry, &name);
53457 if (!qstr.len) {
53458diff --git a/fs/befs/endian.h b/fs/befs/endian.h
53459index 2722387..56059b5 100644
53460--- a/fs/befs/endian.h
53461+++ b/fs/befs/endian.h
53462@@ -11,7 +11,7 @@
53463
53464 #include <asm/byteorder.h>
53465
53466-static inline u64
53467+static inline u64 __intentional_overflow(-1)
53468 fs64_to_cpu(const struct super_block *sb, fs64 n)
53469 {
53470 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
53471@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
53472 return (__force fs64)cpu_to_be64(n);
53473 }
53474
53475-static inline u32
53476+static inline u32 __intentional_overflow(-1)
53477 fs32_to_cpu(const struct super_block *sb, fs32 n)
53478 {
53479 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
53480@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
53481 return (__force fs32)cpu_to_be32(n);
53482 }
53483
53484-static inline u16
53485+static inline u16 __intentional_overflow(-1)
53486 fs16_to_cpu(const struct super_block *sb, fs16 n)
53487 {
53488 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
53489diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
53490index e9c75e2..1baece1 100644
53491--- a/fs/befs/linuxvfs.c
53492+++ b/fs/befs/linuxvfs.c
53493@@ -514,7 +514,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53494 {
53495 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
53496 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
53497- char *link = nd_get_link(nd);
53498+ const char *link = nd_get_link(nd);
53499 if (!IS_ERR(link))
53500 kfree(link);
53501 }
53502diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
53503index 89dec7f..361b0d75 100644
53504--- a/fs/binfmt_aout.c
53505+++ b/fs/binfmt_aout.c
53506@@ -16,6 +16,7 @@
53507 #include <linux/string.h>
53508 #include <linux/fs.h>
53509 #include <linux/file.h>
53510+#include <linux/security.h>
53511 #include <linux/stat.h>
53512 #include <linux/fcntl.h>
53513 #include <linux/ptrace.h>
53514@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
53515 #endif
53516 # define START_STACK(u) ((void __user *)u.start_stack)
53517
53518+ memset(&dump, 0, sizeof(dump));
53519+
53520 fs = get_fs();
53521 set_fs(KERNEL_DS);
53522 has_dumped = 1;
53523@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
53524
53525 /* If the size of the dump file exceeds the rlimit, then see what would happen
53526 if we wrote the stack, but not the data area. */
53527+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
53528 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
53529 dump.u_dsize = 0;
53530
53531 /* Make sure we have enough room to write the stack and data areas. */
53532+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
53533 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
53534 dump.u_ssize = 0;
53535
53536@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
53537 rlim = rlimit(RLIMIT_DATA);
53538 if (rlim >= RLIM_INFINITY)
53539 rlim = ~0;
53540+
53541+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
53542 if (ex.a_data + ex.a_bss > rlim)
53543 return -ENOMEM;
53544
53545@@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
53546
53547 install_exec_creds(bprm);
53548
53549+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53550+ current->mm->pax_flags = 0UL;
53551+#endif
53552+
53553+#ifdef CONFIG_PAX_PAGEEXEC
53554+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
53555+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
53556+
53557+#ifdef CONFIG_PAX_EMUTRAMP
53558+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
53559+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
53560+#endif
53561+
53562+#ifdef CONFIG_PAX_MPROTECT
53563+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
53564+ current->mm->pax_flags |= MF_PAX_MPROTECT;
53565+#endif
53566+
53567+ }
53568+#endif
53569+
53570 if (N_MAGIC(ex) == OMAGIC) {
53571 unsigned long text_addr, map_size;
53572 loff_t pos;
53573@@ -322,7 +350,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
53574 }
53575
53576 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
53577- PROT_READ | PROT_WRITE | PROT_EXEC,
53578+ PROT_READ | PROT_WRITE,
53579 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
53580 fd_offset + ex.a_text);
53581 if (error != N_DATADDR(ex)) {
53582diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
53583index 4c94a79..228e9da 100644
53584--- a/fs/binfmt_elf.c
53585+++ b/fs/binfmt_elf.c
53586@@ -34,6 +34,7 @@
53587 #include <linux/utsname.h>
53588 #include <linux/coredump.h>
53589 #include <linux/sched.h>
53590+#include <linux/xattr.h>
53591 #include <asm/uaccess.h>
53592 #include <asm/param.h>
53593 #include <asm/page.h>
53594@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
53595 #define elf_core_dump NULL
53596 #endif
53597
53598+#ifdef CONFIG_PAX_MPROTECT
53599+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
53600+#endif
53601+
53602+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53603+static void elf_handle_mmap(struct file *file);
53604+#endif
53605+
53606 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
53607 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
53608 #else
53609@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
53610 .load_binary = load_elf_binary,
53611 .load_shlib = load_elf_library,
53612 .core_dump = elf_core_dump,
53613+
53614+#ifdef CONFIG_PAX_MPROTECT
53615+ .handle_mprotect= elf_handle_mprotect,
53616+#endif
53617+
53618+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53619+ .handle_mmap = elf_handle_mmap,
53620+#endif
53621+
53622 .min_coredump = ELF_EXEC_PAGESIZE,
53623 };
53624
53625@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
53626
53627 static int set_brk(unsigned long start, unsigned long end)
53628 {
53629+ unsigned long e = end;
53630+
53631 start = ELF_PAGEALIGN(start);
53632 end = ELF_PAGEALIGN(end);
53633 if (end > start) {
53634@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
53635 if (BAD_ADDR(addr))
53636 return addr;
53637 }
53638- current->mm->start_brk = current->mm->brk = end;
53639+ current->mm->start_brk = current->mm->brk = e;
53640 return 0;
53641 }
53642
53643@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
53644 elf_addr_t __user *u_rand_bytes;
53645 const char *k_platform = ELF_PLATFORM;
53646 const char *k_base_platform = ELF_BASE_PLATFORM;
53647- unsigned char k_rand_bytes[16];
53648+ u32 k_rand_bytes[4];
53649 int items;
53650 elf_addr_t *elf_info;
53651 int ei_index = 0;
53652 const struct cred *cred = current_cred();
53653 struct vm_area_struct *vma;
53654+ unsigned long saved_auxv[AT_VECTOR_SIZE];
53655
53656 /*
53657 * In some cases (e.g. Hyper-Threading), we want to avoid L1
53658@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
53659 * Generate 16 random bytes for userspace PRNG seeding.
53660 */
53661 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
53662- u_rand_bytes = (elf_addr_t __user *)
53663- STACK_ALLOC(p, sizeof(k_rand_bytes));
53664+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
53665+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
53666+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
53667+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
53668+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
53669+ u_rand_bytes = (elf_addr_t __user *) p;
53670 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
53671 return -EFAULT;
53672
53673@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
53674 return -EFAULT;
53675 current->mm->env_end = p;
53676
53677+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
53678+
53679 /* Put the elf_info on the stack in the right place. */
53680 sp = (elf_addr_t __user *)envp + 1;
53681- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
53682+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
53683 return -EFAULT;
53684 return 0;
53685 }
53686@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
53687 an ELF header */
53688
53689 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53690- struct file *interpreter, unsigned long *interp_map_addr,
53691- unsigned long no_base)
53692+ struct file *interpreter, unsigned long no_base)
53693 {
53694 struct elf_phdr *elf_phdata;
53695 struct elf_phdr *eppnt;
53696- unsigned long load_addr = 0;
53697+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
53698 int load_addr_set = 0;
53699 unsigned long last_bss = 0, elf_bss = 0;
53700- unsigned long error = ~0UL;
53701+ unsigned long error = -EINVAL;
53702 unsigned long total_size;
53703 int retval, i, size;
53704
53705@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53706 goto out_close;
53707 }
53708
53709+#ifdef CONFIG_PAX_SEGMEXEC
53710+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
53711+ pax_task_size = SEGMEXEC_TASK_SIZE;
53712+#endif
53713+
53714 eppnt = elf_phdata;
53715 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
53716 if (eppnt->p_type == PT_LOAD) {
53717@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53718 map_addr = elf_map(interpreter, load_addr + vaddr,
53719 eppnt, elf_prot, elf_type, total_size);
53720 total_size = 0;
53721- if (!*interp_map_addr)
53722- *interp_map_addr = map_addr;
53723 error = map_addr;
53724 if (BAD_ADDR(map_addr))
53725 goto out_close;
53726@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53727 k = load_addr + eppnt->p_vaddr;
53728 if (BAD_ADDR(k) ||
53729 eppnt->p_filesz > eppnt->p_memsz ||
53730- eppnt->p_memsz > TASK_SIZE ||
53731- TASK_SIZE - eppnt->p_memsz < k) {
53732+ eppnt->p_memsz > pax_task_size ||
53733+ pax_task_size - eppnt->p_memsz < k) {
53734 error = -ENOMEM;
53735 goto out_close;
53736 }
53737@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53738 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
53739
53740 /* Map the last of the bss segment */
53741- error = vm_brk(elf_bss, last_bss - elf_bss);
53742- if (BAD_ADDR(error))
53743- goto out_close;
53744+ if (last_bss > elf_bss) {
53745+ error = vm_brk(elf_bss, last_bss - elf_bss);
53746+ if (BAD_ADDR(error))
53747+ goto out_close;
53748+ }
53749 }
53750
53751 error = load_addr;
53752@@ -538,6 +569,322 @@ out:
53753 return error;
53754 }
53755
53756+#ifdef CONFIG_PAX_PT_PAX_FLAGS
53757+#ifdef CONFIG_PAX_SOFTMODE
53758+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
53759+{
53760+ unsigned long pax_flags = 0UL;
53761+
53762+#ifdef CONFIG_PAX_PAGEEXEC
53763+ if (elf_phdata->p_flags & PF_PAGEEXEC)
53764+ pax_flags |= MF_PAX_PAGEEXEC;
53765+#endif
53766+
53767+#ifdef CONFIG_PAX_SEGMEXEC
53768+ if (elf_phdata->p_flags & PF_SEGMEXEC)
53769+ pax_flags |= MF_PAX_SEGMEXEC;
53770+#endif
53771+
53772+#ifdef CONFIG_PAX_EMUTRAMP
53773+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
53774+ pax_flags |= MF_PAX_EMUTRAMP;
53775+#endif
53776+
53777+#ifdef CONFIG_PAX_MPROTECT
53778+ if (elf_phdata->p_flags & PF_MPROTECT)
53779+ pax_flags |= MF_PAX_MPROTECT;
53780+#endif
53781+
53782+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53783+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
53784+ pax_flags |= MF_PAX_RANDMMAP;
53785+#endif
53786+
53787+ return pax_flags;
53788+}
53789+#endif
53790+
53791+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
53792+{
53793+ unsigned long pax_flags = 0UL;
53794+
53795+#ifdef CONFIG_PAX_PAGEEXEC
53796+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
53797+ pax_flags |= MF_PAX_PAGEEXEC;
53798+#endif
53799+
53800+#ifdef CONFIG_PAX_SEGMEXEC
53801+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
53802+ pax_flags |= MF_PAX_SEGMEXEC;
53803+#endif
53804+
53805+#ifdef CONFIG_PAX_EMUTRAMP
53806+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
53807+ pax_flags |= MF_PAX_EMUTRAMP;
53808+#endif
53809+
53810+#ifdef CONFIG_PAX_MPROTECT
53811+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
53812+ pax_flags |= MF_PAX_MPROTECT;
53813+#endif
53814+
53815+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53816+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
53817+ pax_flags |= MF_PAX_RANDMMAP;
53818+#endif
53819+
53820+ return pax_flags;
53821+}
53822+#endif
53823+
53824+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
53825+#ifdef CONFIG_PAX_SOFTMODE
53826+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
53827+{
53828+ unsigned long pax_flags = 0UL;
53829+
53830+#ifdef CONFIG_PAX_PAGEEXEC
53831+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
53832+ pax_flags |= MF_PAX_PAGEEXEC;
53833+#endif
53834+
53835+#ifdef CONFIG_PAX_SEGMEXEC
53836+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
53837+ pax_flags |= MF_PAX_SEGMEXEC;
53838+#endif
53839+
53840+#ifdef CONFIG_PAX_EMUTRAMP
53841+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
53842+ pax_flags |= MF_PAX_EMUTRAMP;
53843+#endif
53844+
53845+#ifdef CONFIG_PAX_MPROTECT
53846+ if (pax_flags_softmode & MF_PAX_MPROTECT)
53847+ pax_flags |= MF_PAX_MPROTECT;
53848+#endif
53849+
53850+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53851+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
53852+ pax_flags |= MF_PAX_RANDMMAP;
53853+#endif
53854+
53855+ return pax_flags;
53856+}
53857+#endif
53858+
53859+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
53860+{
53861+ unsigned long pax_flags = 0UL;
53862+
53863+#ifdef CONFIG_PAX_PAGEEXEC
53864+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
53865+ pax_flags |= MF_PAX_PAGEEXEC;
53866+#endif
53867+
53868+#ifdef CONFIG_PAX_SEGMEXEC
53869+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
53870+ pax_flags |= MF_PAX_SEGMEXEC;
53871+#endif
53872+
53873+#ifdef CONFIG_PAX_EMUTRAMP
53874+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
53875+ pax_flags |= MF_PAX_EMUTRAMP;
53876+#endif
53877+
53878+#ifdef CONFIG_PAX_MPROTECT
53879+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
53880+ pax_flags |= MF_PAX_MPROTECT;
53881+#endif
53882+
53883+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53884+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
53885+ pax_flags |= MF_PAX_RANDMMAP;
53886+#endif
53887+
53888+ return pax_flags;
53889+}
53890+#endif
53891+
53892+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53893+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
53894+{
53895+ unsigned long pax_flags = 0UL;
53896+
53897+#ifdef CONFIG_PAX_EI_PAX
53898+
53899+#ifdef CONFIG_PAX_PAGEEXEC
53900+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
53901+ pax_flags |= MF_PAX_PAGEEXEC;
53902+#endif
53903+
53904+#ifdef CONFIG_PAX_SEGMEXEC
53905+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
53906+ pax_flags |= MF_PAX_SEGMEXEC;
53907+#endif
53908+
53909+#ifdef CONFIG_PAX_EMUTRAMP
53910+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
53911+ pax_flags |= MF_PAX_EMUTRAMP;
53912+#endif
53913+
53914+#ifdef CONFIG_PAX_MPROTECT
53915+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
53916+ pax_flags |= MF_PAX_MPROTECT;
53917+#endif
53918+
53919+#ifdef CONFIG_PAX_ASLR
53920+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
53921+ pax_flags |= MF_PAX_RANDMMAP;
53922+#endif
53923+
53924+#else
53925+
53926+#ifdef CONFIG_PAX_PAGEEXEC
53927+ pax_flags |= MF_PAX_PAGEEXEC;
53928+#endif
53929+
53930+#ifdef CONFIG_PAX_SEGMEXEC
53931+ pax_flags |= MF_PAX_SEGMEXEC;
53932+#endif
53933+
53934+#ifdef CONFIG_PAX_MPROTECT
53935+ pax_flags |= MF_PAX_MPROTECT;
53936+#endif
53937+
53938+#ifdef CONFIG_PAX_RANDMMAP
53939+ if (randomize_va_space)
53940+ pax_flags |= MF_PAX_RANDMMAP;
53941+#endif
53942+
53943+#endif
53944+
53945+ return pax_flags;
53946+}
53947+
53948+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
53949+{
53950+
53951+#ifdef CONFIG_PAX_PT_PAX_FLAGS
53952+ unsigned long i;
53953+
53954+ for (i = 0UL; i < elf_ex->e_phnum; i++)
53955+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
53956+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
53957+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
53958+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
53959+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
53960+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
53961+ return ~0UL;
53962+
53963+#ifdef CONFIG_PAX_SOFTMODE
53964+ if (pax_softmode)
53965+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
53966+ else
53967+#endif
53968+
53969+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
53970+ break;
53971+ }
53972+#endif
53973+
53974+ return ~0UL;
53975+}
53976+
53977+static unsigned long pax_parse_xattr_pax(struct file * const file)
53978+{
53979+
53980+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
53981+ ssize_t xattr_size, i;
53982+ unsigned char xattr_value[sizeof("pemrs") - 1];
53983+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
53984+
53985+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
53986+ switch (xattr_size) {
53987+ default:
53988+ return ~0UL;
53989+
53990+ case -ENODATA:
53991+ break;
53992+
53993+ case 0 ... sizeof xattr_value:
53994+ for (i = 0; i < xattr_size; i++)
53995+ switch (xattr_value[i]) {
53996+ default:
53997+ return ~0UL;
53998+
53999+#define parse_flag(option1, option2, flag) \
54000+ case option1: \
54001+ if (pax_flags_hardmode & MF_PAX_##flag) \
54002+ return ~0UL; \
54003+ pax_flags_hardmode |= MF_PAX_##flag; \
54004+ break; \
54005+ case option2: \
54006+ if (pax_flags_softmode & MF_PAX_##flag) \
54007+ return ~0UL; \
54008+ pax_flags_softmode |= MF_PAX_##flag; \
54009+ break;
54010+
54011+ parse_flag('p', 'P', PAGEEXEC);
54012+ parse_flag('e', 'E', EMUTRAMP);
54013+ parse_flag('m', 'M', MPROTECT);
54014+ parse_flag('r', 'R', RANDMMAP);
54015+ parse_flag('s', 'S', SEGMEXEC);
54016+
54017+#undef parse_flag
54018+ }
54019+ break;
54020+ }
54021+
54022+ if (pax_flags_hardmode & pax_flags_softmode)
54023+ return ~0UL;
54024+
54025+#ifdef CONFIG_PAX_SOFTMODE
54026+ if (pax_softmode)
54027+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
54028+ else
54029+#endif
54030+
54031+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
54032+#else
54033+ return ~0UL;
54034+#endif
54035+
54036+}
54037+
54038+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
54039+{
54040+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
54041+
54042+ pax_flags = pax_parse_ei_pax(elf_ex);
54043+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
54044+ xattr_pax_flags = pax_parse_xattr_pax(file);
54045+
54046+ if (pt_pax_flags == ~0UL)
54047+ pt_pax_flags = xattr_pax_flags;
54048+ else if (xattr_pax_flags == ~0UL)
54049+ xattr_pax_flags = pt_pax_flags;
54050+ if (pt_pax_flags != xattr_pax_flags)
54051+ return -EINVAL;
54052+ if (pt_pax_flags != ~0UL)
54053+ pax_flags = pt_pax_flags;
54054+
54055+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
54056+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54057+ if ((__supported_pte_mask & _PAGE_NX))
54058+ pax_flags &= ~MF_PAX_SEGMEXEC;
54059+ else
54060+ pax_flags &= ~MF_PAX_PAGEEXEC;
54061+ }
54062+#endif
54063+
54064+ if (0 > pax_check_flags(&pax_flags))
54065+ return -EINVAL;
54066+
54067+ current->mm->pax_flags = pax_flags;
54068+ return 0;
54069+}
54070+#endif
54071+
54072 /*
54073 * These are the functions used to load ELF style executables and shared
54074 * libraries. There is no binary dependent code anywhere else.
54075@@ -554,6 +901,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
54076 {
54077 unsigned int random_variable = 0;
54078
54079+#ifdef CONFIG_PAX_RANDUSTACK
54080+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
54081+ return stack_top - current->mm->delta_stack;
54082+#endif
54083+
54084 if ((current->flags & PF_RANDOMIZE) &&
54085 !(current->personality & ADDR_NO_RANDOMIZE)) {
54086 random_variable = get_random_int() & STACK_RND_MASK;
54087@@ -572,7 +924,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
54088 unsigned long load_addr = 0, load_bias = 0;
54089 int load_addr_set = 0;
54090 char * elf_interpreter = NULL;
54091- unsigned long error;
54092+ unsigned long error = 0;
54093 struct elf_phdr *elf_ppnt, *elf_phdata;
54094 unsigned long elf_bss, elf_brk;
54095 int retval, i;
54096@@ -582,12 +934,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
54097 unsigned long start_code, end_code, start_data, end_data;
54098 unsigned long reloc_func_desc __maybe_unused = 0;
54099 int executable_stack = EXSTACK_DEFAULT;
54100- unsigned long def_flags = 0;
54101 struct pt_regs *regs = current_pt_regs();
54102 struct {
54103 struct elfhdr elf_ex;
54104 struct elfhdr interp_elf_ex;
54105 } *loc;
54106+ unsigned long pax_task_size;
54107
54108 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
54109 if (!loc) {
54110@@ -723,11 +1075,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
54111 goto out_free_dentry;
54112
54113 /* OK, This is the point of no return */
54114- current->mm->def_flags = def_flags;
54115+ current->mm->def_flags = 0;
54116
54117 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
54118 may depend on the personality. */
54119 SET_PERSONALITY(loc->elf_ex);
54120+
54121+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54122+ current->mm->pax_flags = 0UL;
54123+#endif
54124+
54125+#ifdef CONFIG_PAX_DLRESOLVE
54126+ current->mm->call_dl_resolve = 0UL;
54127+#endif
54128+
54129+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
54130+ current->mm->call_syscall = 0UL;
54131+#endif
54132+
54133+#ifdef CONFIG_PAX_ASLR
54134+ current->mm->delta_mmap = 0UL;
54135+ current->mm->delta_stack = 0UL;
54136+#endif
54137+
54138+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54139+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
54140+ send_sig(SIGKILL, current, 0);
54141+ goto out_free_dentry;
54142+ }
54143+#endif
54144+
54145+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54146+ pax_set_initial_flags(bprm);
54147+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54148+ if (pax_set_initial_flags_func)
54149+ (pax_set_initial_flags_func)(bprm);
54150+#endif
54151+
54152+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54153+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
54154+ current->mm->context.user_cs_limit = PAGE_SIZE;
54155+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
54156+ }
54157+#endif
54158+
54159+#ifdef CONFIG_PAX_SEGMEXEC
54160+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
54161+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
54162+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
54163+ pax_task_size = SEGMEXEC_TASK_SIZE;
54164+ current->mm->def_flags |= VM_NOHUGEPAGE;
54165+ } else
54166+#endif
54167+
54168+ pax_task_size = TASK_SIZE;
54169+
54170+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
54171+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54172+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
54173+ put_cpu();
54174+ }
54175+#endif
54176+
54177+#ifdef CONFIG_PAX_ASLR
54178+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
54179+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
54180+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
54181+ }
54182+#endif
54183+
54184+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54185+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54186+ executable_stack = EXSTACK_DISABLE_X;
54187+ current->personality &= ~READ_IMPLIES_EXEC;
54188+ } else
54189+#endif
54190+
54191 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
54192 current->personality |= READ_IMPLIES_EXEC;
54193
54194@@ -817,6 +1240,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
54195 #else
54196 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
54197 #endif
54198+
54199+#ifdef CONFIG_PAX_RANDMMAP
54200+ /* PaX: randomize base address at the default exe base if requested */
54201+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
54202+#ifdef CONFIG_SPARC64
54203+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
54204+#else
54205+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
54206+#endif
54207+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
54208+ elf_flags |= MAP_FIXED;
54209+ }
54210+#endif
54211+
54212 }
54213
54214 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
54215@@ -849,9 +1286,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
54216 * allowed task size. Note that p_filesz must always be
54217 * <= p_memsz so it is only necessary to check p_memsz.
54218 */
54219- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
54220- elf_ppnt->p_memsz > TASK_SIZE ||
54221- TASK_SIZE - elf_ppnt->p_memsz < k) {
54222+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
54223+ elf_ppnt->p_memsz > pax_task_size ||
54224+ pax_task_size - elf_ppnt->p_memsz < k) {
54225 /* set_brk can never work. Avoid overflows. */
54226 send_sig(SIGKILL, current, 0);
54227 retval = -EINVAL;
54228@@ -890,17 +1327,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
54229 goto out_free_dentry;
54230 }
54231 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
54232- send_sig(SIGSEGV, current, 0);
54233- retval = -EFAULT; /* Nobody gets to see this, but.. */
54234- goto out_free_dentry;
54235+ /*
54236+ * This bss-zeroing can fail if the ELF
54237+ * file specifies odd protections. So
54238+ * we don't check the return value
54239+ */
54240 }
54241
54242+#ifdef CONFIG_PAX_RANDMMAP
54243+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
54244+ unsigned long start, size, flags;
54245+ vm_flags_t vm_flags;
54246+
54247+ start = ELF_PAGEALIGN(elf_brk);
54248+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
54249+ flags = MAP_FIXED | MAP_PRIVATE;
54250+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
54251+
54252+ down_write(&current->mm->mmap_sem);
54253+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
54254+ retval = -ENOMEM;
54255+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
54256+// if (current->personality & ADDR_NO_RANDOMIZE)
54257+// vm_flags |= VM_READ | VM_MAYREAD;
54258+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
54259+ retval = IS_ERR_VALUE(start) ? start : 0;
54260+ }
54261+ up_write(&current->mm->mmap_sem);
54262+ if (retval == 0)
54263+ retval = set_brk(start + size, start + size + PAGE_SIZE);
54264+ if (retval < 0) {
54265+ send_sig(SIGKILL, current, 0);
54266+ goto out_free_dentry;
54267+ }
54268+ }
54269+#endif
54270+
54271 if (elf_interpreter) {
54272- unsigned long interp_map_addr = 0;
54273-
54274 elf_entry = load_elf_interp(&loc->interp_elf_ex,
54275 interpreter,
54276- &interp_map_addr,
54277 load_bias);
54278 if (!IS_ERR((void *)elf_entry)) {
54279 /*
54280@@ -1122,7 +1587,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
54281 * Decide what to dump of a segment, part, all or none.
54282 */
54283 static unsigned long vma_dump_size(struct vm_area_struct *vma,
54284- unsigned long mm_flags)
54285+ unsigned long mm_flags, long signr)
54286 {
54287 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
54288
54289@@ -1160,7 +1625,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
54290 if (vma->vm_file == NULL)
54291 return 0;
54292
54293- if (FILTER(MAPPED_PRIVATE))
54294+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
54295 goto whole;
54296
54297 /*
54298@@ -1385,9 +1850,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
54299 {
54300 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
54301 int i = 0;
54302- do
54303+ do {
54304 i += 2;
54305- while (auxv[i - 2] != AT_NULL);
54306+ } while (auxv[i - 2] != AT_NULL);
54307 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
54308 }
54309
54310@@ -1396,7 +1861,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
54311 {
54312 mm_segment_t old_fs = get_fs();
54313 set_fs(KERNEL_DS);
54314- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
54315+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
54316 set_fs(old_fs);
54317 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
54318 }
54319@@ -2023,14 +2488,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
54320 }
54321
54322 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
54323- unsigned long mm_flags)
54324+ struct coredump_params *cprm)
54325 {
54326 struct vm_area_struct *vma;
54327 size_t size = 0;
54328
54329 for (vma = first_vma(current, gate_vma); vma != NULL;
54330 vma = next_vma(vma, gate_vma))
54331- size += vma_dump_size(vma, mm_flags);
54332+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
54333 return size;
54334 }
54335
54336@@ -2123,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54337
54338 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
54339
54340- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
54341+ offset += elf_core_vma_data_size(gate_vma, cprm);
54342 offset += elf_core_extra_data_size();
54343 e_shoff = offset;
54344
54345@@ -2137,10 +2602,12 @@ static int elf_core_dump(struct coredump_params *cprm)
54346 offset = dataoff;
54347
54348 size += sizeof(*elf);
54349+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
54350 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
54351 goto end_coredump;
54352
54353 size += sizeof(*phdr4note);
54354+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
54355 if (size > cprm->limit
54356 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
54357 goto end_coredump;
54358@@ -2154,7 +2621,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54359 phdr.p_offset = offset;
54360 phdr.p_vaddr = vma->vm_start;
54361 phdr.p_paddr = 0;
54362- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
54363+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
54364 phdr.p_memsz = vma->vm_end - vma->vm_start;
54365 offset += phdr.p_filesz;
54366 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
54367@@ -2165,6 +2632,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54368 phdr.p_align = ELF_EXEC_PAGESIZE;
54369
54370 size += sizeof(phdr);
54371+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
54372 if (size > cprm->limit
54373 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
54374 goto end_coredump;
54375@@ -2189,7 +2657,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54376 unsigned long addr;
54377 unsigned long end;
54378
54379- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
54380+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
54381
54382 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
54383 struct page *page;
54384@@ -2198,6 +2666,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54385 page = get_dump_page(addr);
54386 if (page) {
54387 void *kaddr = kmap(page);
54388+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
54389 stop = ((size += PAGE_SIZE) > cprm->limit) ||
54390 !dump_write(cprm->file, kaddr,
54391 PAGE_SIZE);
54392@@ -2215,6 +2684,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54393
54394 if (e_phnum == PN_XNUM) {
54395 size += sizeof(*shdr4extnum);
54396+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
54397 if (size > cprm->limit
54398 || !dump_write(cprm->file, shdr4extnum,
54399 sizeof(*shdr4extnum)))
54400@@ -2235,6 +2705,167 @@ out:
54401
54402 #endif /* CONFIG_ELF_CORE */
54403
54404+#ifdef CONFIG_PAX_MPROTECT
54405+/* PaX: non-PIC ELF libraries need relocations on their executable segments
54406+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
54407+ * we'll remove VM_MAYWRITE for good on RELRO segments.
54408+ *
54409+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
54410+ * basis because we want to allow the common case and not the special ones.
54411+ */
54412+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
54413+{
54414+ struct elfhdr elf_h;
54415+ struct elf_phdr elf_p;
54416+ unsigned long i;
54417+ unsigned long oldflags;
54418+ bool is_textrel_rw, is_textrel_rx, is_relro;
54419+
54420+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
54421+ return;
54422+
54423+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
54424+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
54425+
54426+#ifdef CONFIG_PAX_ELFRELOCS
54427+ /* possible TEXTREL */
54428+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
54429+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
54430+#else
54431+ is_textrel_rw = false;
54432+ is_textrel_rx = false;
54433+#endif
54434+
54435+ /* possible RELRO */
54436+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
54437+
54438+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
54439+ return;
54440+
54441+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
54442+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
54443+
54444+#ifdef CONFIG_PAX_ETEXECRELOCS
54445+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
54446+#else
54447+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
54448+#endif
54449+
54450+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
54451+ !elf_check_arch(&elf_h) ||
54452+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
54453+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
54454+ return;
54455+
54456+ for (i = 0UL; i < elf_h.e_phnum; i++) {
54457+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
54458+ return;
54459+ switch (elf_p.p_type) {
54460+ case PT_DYNAMIC:
54461+ if (!is_textrel_rw && !is_textrel_rx)
54462+ continue;
54463+ i = 0UL;
54464+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
54465+ elf_dyn dyn;
54466+
54467+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
54468+ break;
54469+ if (dyn.d_tag == DT_NULL)
54470+ break;
54471+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
54472+ gr_log_textrel(vma);
54473+ if (is_textrel_rw)
54474+ vma->vm_flags |= VM_MAYWRITE;
54475+ else
54476+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
54477+ vma->vm_flags &= ~VM_MAYWRITE;
54478+ break;
54479+ }
54480+ i++;
54481+ }
54482+ is_textrel_rw = false;
54483+ is_textrel_rx = false;
54484+ continue;
54485+
54486+ case PT_GNU_RELRO:
54487+ if (!is_relro)
54488+ continue;
54489+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
54490+ vma->vm_flags &= ~VM_MAYWRITE;
54491+ is_relro = false;
54492+ continue;
54493+
54494+#ifdef CONFIG_PAX_PT_PAX_FLAGS
54495+ case PT_PAX_FLAGS: {
54496+ const char *msg_mprotect = "", *msg_emutramp = "";
54497+ char *buffer_lib, *buffer_exe;
54498+
54499+ if (elf_p.p_flags & PF_NOMPROTECT)
54500+ msg_mprotect = "MPROTECT disabled";
54501+
54502+#ifdef CONFIG_PAX_EMUTRAMP
54503+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
54504+ msg_emutramp = "EMUTRAMP enabled";
54505+#endif
54506+
54507+ if (!msg_mprotect[0] && !msg_emutramp[0])
54508+ continue;
54509+
54510+ if (!printk_ratelimit())
54511+ continue;
54512+
54513+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
54514+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
54515+ if (buffer_lib && buffer_exe) {
54516+ char *path_lib, *path_exe;
54517+
54518+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
54519+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
54520+
54521+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
54522+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
54523+
54524+ }
54525+ free_page((unsigned long)buffer_exe);
54526+ free_page((unsigned long)buffer_lib);
54527+ continue;
54528+ }
54529+#endif
54530+
54531+ }
54532+ }
54533+}
54534+#endif
54535+
54536+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54537+
54538+extern int grsec_enable_log_rwxmaps;
54539+
54540+static void elf_handle_mmap(struct file *file)
54541+{
54542+ struct elfhdr elf_h;
54543+ struct elf_phdr elf_p;
54544+ unsigned long i;
54545+
54546+ if (!grsec_enable_log_rwxmaps)
54547+ return;
54548+
54549+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
54550+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
54551+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
54552+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
54553+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
54554+ return;
54555+
54556+ for (i = 0UL; i < elf_h.e_phnum; i++) {
54557+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
54558+ return;
54559+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
54560+ gr_log_ptgnustack(file);
54561+ }
54562+}
54563+#endif
54564+
54565 static int __init init_elf_binfmt(void)
54566 {
54567 register_binfmt(&elf_format);
54568diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
54569index d50bbe5..af3b649 100644
54570--- a/fs/binfmt_flat.c
54571+++ b/fs/binfmt_flat.c
54572@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
54573 realdatastart = (unsigned long) -ENOMEM;
54574 printk("Unable to allocate RAM for process data, errno %d\n",
54575 (int)-realdatastart);
54576+ down_write(&current->mm->mmap_sem);
54577 vm_munmap(textpos, text_len);
54578+ up_write(&current->mm->mmap_sem);
54579 ret = realdatastart;
54580 goto err;
54581 }
54582@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
54583 }
54584 if (IS_ERR_VALUE(result)) {
54585 printk("Unable to read data+bss, errno %d\n", (int)-result);
54586+ down_write(&current->mm->mmap_sem);
54587 vm_munmap(textpos, text_len);
54588 vm_munmap(realdatastart, len);
54589+ up_write(&current->mm->mmap_sem);
54590 ret = result;
54591 goto err;
54592 }
54593@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
54594 }
54595 if (IS_ERR_VALUE(result)) {
54596 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
54597+ down_write(&current->mm->mmap_sem);
54598 vm_munmap(textpos, text_len + data_len + extra +
54599 MAX_SHARED_LIBS * sizeof(unsigned long));
54600+ up_write(&current->mm->mmap_sem);
54601 ret = result;
54602 goto err;
54603 }
54604diff --git a/fs/bio.c b/fs/bio.c
54605index ea5035d..a2932eb 100644
54606--- a/fs/bio.c
54607+++ b/fs/bio.c
54608@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
54609 /*
54610 * Overflow, abort
54611 */
54612- if (end < start)
54613+ if (end < start || end - start > INT_MAX - nr_pages)
54614 return ERR_PTR(-EINVAL);
54615
54616 nr_pages += end - start;
54617@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
54618 /*
54619 * Overflow, abort
54620 */
54621- if (end < start)
54622+ if (end < start || end - start > INT_MAX - nr_pages)
54623 return ERR_PTR(-EINVAL);
54624
54625 nr_pages += end - start;
54626@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
54627 const int read = bio_data_dir(bio) == READ;
54628 struct bio_map_data *bmd = bio->bi_private;
54629 int i;
54630- char *p = bmd->sgvecs[0].iov_base;
54631+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
54632
54633 bio_for_each_segment_all(bvec, bio, i) {
54634 char *addr = page_address(bvec->bv_page);
54635diff --git a/fs/block_dev.c b/fs/block_dev.c
54636index 1e86823..8e34695 100644
54637--- a/fs/block_dev.c
54638+++ b/fs/block_dev.c
54639@@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
54640 else if (bdev->bd_contains == bdev)
54641 return true; /* is a whole device which isn't held */
54642
54643- else if (whole->bd_holder == bd_may_claim)
54644+ else if (whole->bd_holder == (void *)bd_may_claim)
54645 return true; /* is a partition of a device that is being partitioned */
54646 else if (whole->bd_holder != NULL)
54647 return false; /* is a partition of a held device */
54648diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
54649index b544a44..f3fb987 100644
54650--- a/fs/btrfs/ctree.c
54651+++ b/fs/btrfs/ctree.c
54652@@ -1028,9 +1028,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
54653 free_extent_buffer(buf);
54654 add_root_to_dirty_list(root);
54655 } else {
54656- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
54657- parent_start = parent->start;
54658- else
54659+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
54660+ if (parent)
54661+ parent_start = parent->start;
54662+ else
54663+ parent_start = 0;
54664+ } else
54665 parent_start = 0;
54666
54667 WARN_ON(trans->transid != btrfs_header_generation(parent));
54668diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
54669index cbd9523..5cca781 100644
54670--- a/fs/btrfs/delayed-inode.c
54671+++ b/fs/btrfs/delayed-inode.c
54672@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
54673
54674 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
54675 {
54676- int seq = atomic_inc_return(&delayed_root->items_seq);
54677+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
54678 if ((atomic_dec_return(&delayed_root->items) <
54679 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
54680 waitqueue_active(&delayed_root->wait))
54681@@ -1378,7 +1378,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
54682 static int refs_newer(struct btrfs_delayed_root *delayed_root,
54683 int seq, int count)
54684 {
54685- int val = atomic_read(&delayed_root->items_seq);
54686+ int val = atomic_read_unchecked(&delayed_root->items_seq);
54687
54688 if (val < seq || val >= seq + count)
54689 return 1;
54690@@ -1395,7 +1395,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
54691 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
54692 return;
54693
54694- seq = atomic_read(&delayed_root->items_seq);
54695+ seq = atomic_read_unchecked(&delayed_root->items_seq);
54696
54697 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
54698 int ret;
54699diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
54700index a4b38f9..f86a509 100644
54701--- a/fs/btrfs/delayed-inode.h
54702+++ b/fs/btrfs/delayed-inode.h
54703@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
54704 */
54705 struct list_head prepare_list;
54706 atomic_t items; /* for delayed items */
54707- atomic_t items_seq; /* for delayed items */
54708+ atomic_unchecked_t items_seq; /* for delayed items */
54709 int nodes; /* for delayed nodes */
54710 wait_queue_head_t wait;
54711 };
54712@@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
54713 struct btrfs_delayed_root *delayed_root)
54714 {
54715 atomic_set(&delayed_root->items, 0);
54716- atomic_set(&delayed_root->items_seq, 0);
54717+ atomic_set_unchecked(&delayed_root->items_seq, 0);
54718 delayed_root->nodes = 0;
54719 spin_lock_init(&delayed_root->lock);
54720 init_waitqueue_head(&delayed_root->wait);
54721diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
54722index 8747feb..ad1655c 100644
54723--- a/fs/btrfs/ioctl.c
54724+++ b/fs/btrfs/ioctl.c
54725@@ -3465,9 +3465,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
54726 for (i = 0; i < num_types; i++) {
54727 struct btrfs_space_info *tmp;
54728
54729+ /* Don't copy in more than we allocated */
54730 if (!slot_count)
54731 break;
54732
54733+ slot_count--;
54734+
54735 info = NULL;
54736 rcu_read_lock();
54737 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
54738@@ -3489,10 +3492,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
54739 memcpy(dest, &space, sizeof(space));
54740 dest++;
54741 space_args.total_spaces++;
54742- slot_count--;
54743 }
54744- if (!slot_count)
54745- break;
54746 }
54747 up_read(&info->groups_sem);
54748 }
54749diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
54750index e913328..a34fb36 100644
54751--- a/fs/btrfs/super.c
54752+++ b/fs/btrfs/super.c
54753@@ -266,7 +266,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
54754 function, line, errstr);
54755 return;
54756 }
54757- ACCESS_ONCE(trans->transaction->aborted) = errno;
54758+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
54759 /* Wake up anybody who may be waiting on this transaction */
54760 wake_up(&root->fs_info->transaction_wait);
54761 wake_up(&root->fs_info->transaction_blocked_wait);
54762diff --git a/fs/buffer.c b/fs/buffer.c
54763index 6024877..7bd000a 100644
54764--- a/fs/buffer.c
54765+++ b/fs/buffer.c
54766@@ -3426,7 +3426,7 @@ void __init buffer_init(void)
54767 bh_cachep = kmem_cache_create("buffer_head",
54768 sizeof(struct buffer_head), 0,
54769 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
54770- SLAB_MEM_SPREAD),
54771+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
54772 NULL);
54773
54774 /*
54775diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
54776index 622f469..e8d2d55 100644
54777--- a/fs/cachefiles/bind.c
54778+++ b/fs/cachefiles/bind.c
54779@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
54780 args);
54781
54782 /* start by checking things over */
54783- ASSERT(cache->fstop_percent >= 0 &&
54784- cache->fstop_percent < cache->fcull_percent &&
54785+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
54786 cache->fcull_percent < cache->frun_percent &&
54787 cache->frun_percent < 100);
54788
54789- ASSERT(cache->bstop_percent >= 0 &&
54790- cache->bstop_percent < cache->bcull_percent &&
54791+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
54792 cache->bcull_percent < cache->brun_percent &&
54793 cache->brun_percent < 100);
54794
54795diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
54796index 0a1467b..6a53245 100644
54797--- a/fs/cachefiles/daemon.c
54798+++ b/fs/cachefiles/daemon.c
54799@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
54800 if (n > buflen)
54801 return -EMSGSIZE;
54802
54803- if (copy_to_user(_buffer, buffer, n) != 0)
54804+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
54805 return -EFAULT;
54806
54807 return n;
54808@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
54809 if (test_bit(CACHEFILES_DEAD, &cache->flags))
54810 return -EIO;
54811
54812- if (datalen < 0 || datalen > PAGE_SIZE - 1)
54813+ if (datalen > PAGE_SIZE - 1)
54814 return -EOPNOTSUPP;
54815
54816 /* drag the command string into the kernel so we can parse it */
54817@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
54818 if (args[0] != '%' || args[1] != '\0')
54819 return -EINVAL;
54820
54821- if (fstop < 0 || fstop >= cache->fcull_percent)
54822+ if (fstop >= cache->fcull_percent)
54823 return cachefiles_daemon_range_error(cache, args);
54824
54825 cache->fstop_percent = fstop;
54826@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
54827 if (args[0] != '%' || args[1] != '\0')
54828 return -EINVAL;
54829
54830- if (bstop < 0 || bstop >= cache->bcull_percent)
54831+ if (bstop >= cache->bcull_percent)
54832 return cachefiles_daemon_range_error(cache, args);
54833
54834 cache->bstop_percent = bstop;
54835diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
54836index 5349473..d6c0b93 100644
54837--- a/fs/cachefiles/internal.h
54838+++ b/fs/cachefiles/internal.h
54839@@ -59,7 +59,7 @@ struct cachefiles_cache {
54840 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
54841 struct rb_root active_nodes; /* active nodes (can't be culled) */
54842 rwlock_t active_lock; /* lock for active_nodes */
54843- atomic_t gravecounter; /* graveyard uniquifier */
54844+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
54845 unsigned frun_percent; /* when to stop culling (% files) */
54846 unsigned fcull_percent; /* when to start culling (% files) */
54847 unsigned fstop_percent; /* when to stop allocating (% files) */
54848@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
54849 * proc.c
54850 */
54851 #ifdef CONFIG_CACHEFILES_HISTOGRAM
54852-extern atomic_t cachefiles_lookup_histogram[HZ];
54853-extern atomic_t cachefiles_mkdir_histogram[HZ];
54854-extern atomic_t cachefiles_create_histogram[HZ];
54855+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
54856+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
54857+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
54858
54859 extern int __init cachefiles_proc_init(void);
54860 extern void cachefiles_proc_cleanup(void);
54861 static inline
54862-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
54863+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
54864 {
54865 unsigned long jif = jiffies - start_jif;
54866 if (jif >= HZ)
54867 jif = HZ - 1;
54868- atomic_inc(&histogram[jif]);
54869+ atomic_inc_unchecked(&histogram[jif]);
54870 }
54871
54872 #else
54873diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
54874index f4a08d7..5aa4599 100644
54875--- a/fs/cachefiles/namei.c
54876+++ b/fs/cachefiles/namei.c
54877@@ -317,7 +317,7 @@ try_again:
54878 /* first step is to make up a grave dentry in the graveyard */
54879 sprintf(nbuffer, "%08x%08x",
54880 (uint32_t) get_seconds(),
54881- (uint32_t) atomic_inc_return(&cache->gravecounter));
54882+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
54883
54884 /* do the multiway lock magic */
54885 trap = lock_rename(cache->graveyard, dir);
54886diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
54887index eccd339..4c1d995 100644
54888--- a/fs/cachefiles/proc.c
54889+++ b/fs/cachefiles/proc.c
54890@@ -14,9 +14,9 @@
54891 #include <linux/seq_file.h>
54892 #include "internal.h"
54893
54894-atomic_t cachefiles_lookup_histogram[HZ];
54895-atomic_t cachefiles_mkdir_histogram[HZ];
54896-atomic_t cachefiles_create_histogram[HZ];
54897+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
54898+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
54899+atomic_unchecked_t cachefiles_create_histogram[HZ];
54900
54901 /*
54902 * display the latency histogram
54903@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
54904 return 0;
54905 default:
54906 index = (unsigned long) v - 3;
54907- x = atomic_read(&cachefiles_lookup_histogram[index]);
54908- y = atomic_read(&cachefiles_mkdir_histogram[index]);
54909- z = atomic_read(&cachefiles_create_histogram[index]);
54910+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
54911+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
54912+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
54913 if (x == 0 && y == 0 && z == 0)
54914 return 0;
54915
54916diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
54917index ebaff36..7e3ea26 100644
54918--- a/fs/cachefiles/rdwr.c
54919+++ b/fs/cachefiles/rdwr.c
54920@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
54921 old_fs = get_fs();
54922 set_fs(KERNEL_DS);
54923 ret = file->f_op->write(
54924- file, (const void __user *) data, len, &pos);
54925+ file, (const void __force_user *) data, len, &pos);
54926 set_fs(old_fs);
54927 kunmap(page);
54928 file_end_write(file);
54929diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
54930index 868b61d..58835a5 100644
54931--- a/fs/ceph/dir.c
54932+++ b/fs/ceph/dir.c
54933@@ -240,7 +240,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
54934 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
54935 struct ceph_mds_client *mdsc = fsc->mdsc;
54936 unsigned frag = fpos_frag(ctx->pos);
54937- int off = fpos_off(ctx->pos);
54938+ unsigned int off = fpos_off(ctx->pos);
54939 int err;
54940 u32 ftype;
54941 struct ceph_mds_reply_info_parsed *rinfo;
54942diff --git a/fs/ceph/super.c b/fs/ceph/super.c
54943index 6a0951e..03fac6d 100644
54944--- a/fs/ceph/super.c
54945+++ b/fs/ceph/super.c
54946@@ -870,7 +870,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
54947 /*
54948 * construct our own bdi so we can control readahead, etc.
54949 */
54950-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
54951+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
54952
54953 static int ceph_register_bdi(struct super_block *sb,
54954 struct ceph_fs_client *fsc)
54955@@ -887,7 +887,7 @@ static int ceph_register_bdi(struct super_block *sb,
54956 default_backing_dev_info.ra_pages;
54957
54958 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
54959- atomic_long_inc_return(&bdi_seq));
54960+ atomic_long_inc_return_unchecked(&bdi_seq));
54961 if (!err)
54962 sb->s_bdi = &fsc->backing_dev_info;
54963 return err;
54964diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
54965index f3ac415..3d2420c 100644
54966--- a/fs/cifs/cifs_debug.c
54967+++ b/fs/cifs/cifs_debug.c
54968@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
54969
54970 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
54971 #ifdef CONFIG_CIFS_STATS2
54972- atomic_set(&totBufAllocCount, 0);
54973- atomic_set(&totSmBufAllocCount, 0);
54974+ atomic_set_unchecked(&totBufAllocCount, 0);
54975+ atomic_set_unchecked(&totSmBufAllocCount, 0);
54976 #endif /* CONFIG_CIFS_STATS2 */
54977 spin_lock(&cifs_tcp_ses_lock);
54978 list_for_each(tmp1, &cifs_tcp_ses_list) {
54979@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
54980 tcon = list_entry(tmp3,
54981 struct cifs_tcon,
54982 tcon_list);
54983- atomic_set(&tcon->num_smbs_sent, 0);
54984+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
54985 if (server->ops->clear_stats)
54986 server->ops->clear_stats(tcon);
54987 }
54988@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
54989 smBufAllocCount.counter, cifs_min_small);
54990 #ifdef CONFIG_CIFS_STATS2
54991 seq_printf(m, "Total Large %d Small %d Allocations\n",
54992- atomic_read(&totBufAllocCount),
54993- atomic_read(&totSmBufAllocCount));
54994+ atomic_read_unchecked(&totBufAllocCount),
54995+ atomic_read_unchecked(&totSmBufAllocCount));
54996 #endif /* CONFIG_CIFS_STATS2 */
54997
54998 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
54999@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
55000 if (tcon->need_reconnect)
55001 seq_puts(m, "\tDISCONNECTED ");
55002 seq_printf(m, "\nSMBs: %d",
55003- atomic_read(&tcon->num_smbs_sent));
55004+ atomic_read_unchecked(&tcon->num_smbs_sent));
55005 if (server->ops->print_stats)
55006 server->ops->print_stats(m, tcon);
55007 }
55008diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
55009index 77fc5e1..e3d13e6 100644
55010--- a/fs/cifs/cifsfs.c
55011+++ b/fs/cifs/cifsfs.c
55012@@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
55013 */
55014 cifs_req_cachep = kmem_cache_create("cifs_request",
55015 CIFSMaxBufSize + max_hdr_size, 0,
55016- SLAB_HWCACHE_ALIGN, NULL);
55017+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
55018 if (cifs_req_cachep == NULL)
55019 return -ENOMEM;
55020
55021@@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
55022 efficient to alloc 1 per page off the slab compared to 17K (5page)
55023 alloc of large cifs buffers even when page debugging is on */
55024 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
55025- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
55026+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
55027 NULL);
55028 if (cifs_sm_req_cachep == NULL) {
55029 mempool_destroy(cifs_req_poolp);
55030@@ -1168,8 +1168,8 @@ init_cifs(void)
55031 atomic_set(&bufAllocCount, 0);
55032 atomic_set(&smBufAllocCount, 0);
55033 #ifdef CONFIG_CIFS_STATS2
55034- atomic_set(&totBufAllocCount, 0);
55035- atomic_set(&totSmBufAllocCount, 0);
55036+ atomic_set_unchecked(&totBufAllocCount, 0);
55037+ atomic_set_unchecked(&totSmBufAllocCount, 0);
55038 #endif /* CONFIG_CIFS_STATS2 */
55039
55040 atomic_set(&midCount, 0);
55041diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
55042index c8e03f8..75362f6 100644
55043--- a/fs/cifs/cifsglob.h
55044+++ b/fs/cifs/cifsglob.h
55045@@ -758,35 +758,35 @@ struct cifs_tcon {
55046 __u16 Flags; /* optional support bits */
55047 enum statusEnum tidStatus;
55048 #ifdef CONFIG_CIFS_STATS
55049- atomic_t num_smbs_sent;
55050+ atomic_unchecked_t num_smbs_sent;
55051 union {
55052 struct {
55053- atomic_t num_writes;
55054- atomic_t num_reads;
55055- atomic_t num_flushes;
55056- atomic_t num_oplock_brks;
55057- atomic_t num_opens;
55058- atomic_t num_closes;
55059- atomic_t num_deletes;
55060- atomic_t num_mkdirs;
55061- atomic_t num_posixopens;
55062- atomic_t num_posixmkdirs;
55063- atomic_t num_rmdirs;
55064- atomic_t num_renames;
55065- atomic_t num_t2renames;
55066- atomic_t num_ffirst;
55067- atomic_t num_fnext;
55068- atomic_t num_fclose;
55069- atomic_t num_hardlinks;
55070- atomic_t num_symlinks;
55071- atomic_t num_locks;
55072- atomic_t num_acl_get;
55073- atomic_t num_acl_set;
55074+ atomic_unchecked_t num_writes;
55075+ atomic_unchecked_t num_reads;
55076+ atomic_unchecked_t num_flushes;
55077+ atomic_unchecked_t num_oplock_brks;
55078+ atomic_unchecked_t num_opens;
55079+ atomic_unchecked_t num_closes;
55080+ atomic_unchecked_t num_deletes;
55081+ atomic_unchecked_t num_mkdirs;
55082+ atomic_unchecked_t num_posixopens;
55083+ atomic_unchecked_t num_posixmkdirs;
55084+ atomic_unchecked_t num_rmdirs;
55085+ atomic_unchecked_t num_renames;
55086+ atomic_unchecked_t num_t2renames;
55087+ atomic_unchecked_t num_ffirst;
55088+ atomic_unchecked_t num_fnext;
55089+ atomic_unchecked_t num_fclose;
55090+ atomic_unchecked_t num_hardlinks;
55091+ atomic_unchecked_t num_symlinks;
55092+ atomic_unchecked_t num_locks;
55093+ atomic_unchecked_t num_acl_get;
55094+ atomic_unchecked_t num_acl_set;
55095 } cifs_stats;
55096 #ifdef CONFIG_CIFS_SMB2
55097 struct {
55098- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
55099- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
55100+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
55101+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
55102 } smb2_stats;
55103 #endif /* CONFIG_CIFS_SMB2 */
55104 } stats;
55105@@ -1111,7 +1111,7 @@ convert_delimiter(char *path, char delim)
55106 }
55107
55108 #ifdef CONFIG_CIFS_STATS
55109-#define cifs_stats_inc atomic_inc
55110+#define cifs_stats_inc atomic_inc_unchecked
55111
55112 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
55113 unsigned int bytes)
55114@@ -1477,8 +1477,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
55115 /* Various Debug counters */
55116 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
55117 #ifdef CONFIG_CIFS_STATS2
55118-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
55119-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
55120+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
55121+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
55122 #endif
55123 GLOBAL_EXTERN atomic_t smBufAllocCount;
55124 GLOBAL_EXTERN atomic_t midCount;
55125diff --git a/fs/cifs/file.c b/fs/cifs/file.c
55126index 7ddddf2..2e12dbc 100644
55127--- a/fs/cifs/file.c
55128+++ b/fs/cifs/file.c
55129@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
55130 index = mapping->writeback_index; /* Start from prev offset */
55131 end = -1;
55132 } else {
55133- index = wbc->range_start >> PAGE_CACHE_SHIFT;
55134- end = wbc->range_end >> PAGE_CACHE_SHIFT;
55135- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
55136+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
55137 range_whole = true;
55138+ index = 0;
55139+ end = ULONG_MAX;
55140+ } else {
55141+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
55142+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
55143+ }
55144 scanned = true;
55145 }
55146 retry:
55147diff --git a/fs/cifs/link.c b/fs/cifs/link.c
55148index 7e36ceb..109252f 100644
55149--- a/fs/cifs/link.c
55150+++ b/fs/cifs/link.c
55151@@ -624,7 +624,7 @@ symlink_exit:
55152
55153 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
55154 {
55155- char *p = nd_get_link(nd);
55156+ const char *p = nd_get_link(nd);
55157 if (!IS_ERR(p))
55158 kfree(p);
55159 }
55160diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
55161index 138a011..cf9e13a 100644
55162--- a/fs/cifs/misc.c
55163+++ b/fs/cifs/misc.c
55164@@ -170,7 +170,7 @@ cifs_buf_get(void)
55165 memset(ret_buf, 0, buf_size + 3);
55166 atomic_inc(&bufAllocCount);
55167 #ifdef CONFIG_CIFS_STATS2
55168- atomic_inc(&totBufAllocCount);
55169+ atomic_inc_unchecked(&totBufAllocCount);
55170 #endif /* CONFIG_CIFS_STATS2 */
55171 }
55172
55173@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
55174 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
55175 atomic_inc(&smBufAllocCount);
55176 #ifdef CONFIG_CIFS_STATS2
55177- atomic_inc(&totSmBufAllocCount);
55178+ atomic_inc_unchecked(&totSmBufAllocCount);
55179 #endif /* CONFIG_CIFS_STATS2 */
55180
55181 }
55182diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
55183index e50554b..c011413 100644
55184--- a/fs/cifs/smb1ops.c
55185+++ b/fs/cifs/smb1ops.c
55186@@ -609,27 +609,27 @@ static void
55187 cifs_clear_stats(struct cifs_tcon *tcon)
55188 {
55189 #ifdef CONFIG_CIFS_STATS
55190- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
55191- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
55192- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
55193- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
55194- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
55195- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
55196- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
55197- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
55198- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
55199- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
55200- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
55201- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
55202- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
55203- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
55204- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
55205- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
55206- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
55207- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
55208- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
55209- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
55210- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
55211+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
55212+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
55213+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
55214+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
55215+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
55216+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
55217+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
55218+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
55219+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
55220+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
55221+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
55222+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
55223+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
55224+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
55225+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
55226+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
55227+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
55228+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
55229+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
55230+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
55231+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
55232 #endif
55233 }
55234
55235@@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
55236 {
55237 #ifdef CONFIG_CIFS_STATS
55238 seq_printf(m, " Oplocks breaks: %d",
55239- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
55240+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
55241 seq_printf(m, "\nReads: %d Bytes: %llu",
55242- atomic_read(&tcon->stats.cifs_stats.num_reads),
55243+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
55244 (long long)(tcon->bytes_read));
55245 seq_printf(m, "\nWrites: %d Bytes: %llu",
55246- atomic_read(&tcon->stats.cifs_stats.num_writes),
55247+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
55248 (long long)(tcon->bytes_written));
55249 seq_printf(m, "\nFlushes: %d",
55250- atomic_read(&tcon->stats.cifs_stats.num_flushes));
55251+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
55252 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
55253- atomic_read(&tcon->stats.cifs_stats.num_locks),
55254- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
55255- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
55256+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
55257+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
55258+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
55259 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
55260- atomic_read(&tcon->stats.cifs_stats.num_opens),
55261- atomic_read(&tcon->stats.cifs_stats.num_closes),
55262- atomic_read(&tcon->stats.cifs_stats.num_deletes));
55263+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
55264+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
55265+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
55266 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
55267- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
55268- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
55269+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
55270+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
55271 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
55272- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
55273- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
55274+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
55275+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
55276 seq_printf(m, "\nRenames: %d T2 Renames %d",
55277- atomic_read(&tcon->stats.cifs_stats.num_renames),
55278- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
55279+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
55280+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
55281 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
55282- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
55283- atomic_read(&tcon->stats.cifs_stats.num_fnext),
55284- atomic_read(&tcon->stats.cifs_stats.num_fclose));
55285+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
55286+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
55287+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
55288 #endif
55289 }
55290
55291diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
55292index 861b332..5506392 100644
55293--- a/fs/cifs/smb2ops.c
55294+++ b/fs/cifs/smb2ops.c
55295@@ -282,8 +282,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
55296 #ifdef CONFIG_CIFS_STATS
55297 int i;
55298 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
55299- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
55300- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
55301+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
55302+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
55303 }
55304 #endif
55305 }
55306@@ -311,65 +311,65 @@ static void
55307 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
55308 {
55309 #ifdef CONFIG_CIFS_STATS
55310- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
55311- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
55312+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
55313+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
55314 seq_printf(m, "\nNegotiates: %d sent %d failed",
55315- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
55316- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
55317+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
55318+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
55319 seq_printf(m, "\nSessionSetups: %d sent %d failed",
55320- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
55321- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
55322+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
55323+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
55324 seq_printf(m, "\nLogoffs: %d sent %d failed",
55325- atomic_read(&sent[SMB2_LOGOFF_HE]),
55326- atomic_read(&failed[SMB2_LOGOFF_HE]));
55327+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
55328+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
55329 seq_printf(m, "\nTreeConnects: %d sent %d failed",
55330- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
55331- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
55332+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
55333+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
55334 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
55335- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
55336- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
55337+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
55338+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
55339 seq_printf(m, "\nCreates: %d sent %d failed",
55340- atomic_read(&sent[SMB2_CREATE_HE]),
55341- atomic_read(&failed[SMB2_CREATE_HE]));
55342+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
55343+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
55344 seq_printf(m, "\nCloses: %d sent %d failed",
55345- atomic_read(&sent[SMB2_CLOSE_HE]),
55346- atomic_read(&failed[SMB2_CLOSE_HE]));
55347+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
55348+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
55349 seq_printf(m, "\nFlushes: %d sent %d failed",
55350- atomic_read(&sent[SMB2_FLUSH_HE]),
55351- atomic_read(&failed[SMB2_FLUSH_HE]));
55352+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
55353+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
55354 seq_printf(m, "\nReads: %d sent %d failed",
55355- atomic_read(&sent[SMB2_READ_HE]),
55356- atomic_read(&failed[SMB2_READ_HE]));
55357+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
55358+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
55359 seq_printf(m, "\nWrites: %d sent %d failed",
55360- atomic_read(&sent[SMB2_WRITE_HE]),
55361- atomic_read(&failed[SMB2_WRITE_HE]));
55362+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
55363+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
55364 seq_printf(m, "\nLocks: %d sent %d failed",
55365- atomic_read(&sent[SMB2_LOCK_HE]),
55366- atomic_read(&failed[SMB2_LOCK_HE]));
55367+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
55368+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
55369 seq_printf(m, "\nIOCTLs: %d sent %d failed",
55370- atomic_read(&sent[SMB2_IOCTL_HE]),
55371- atomic_read(&failed[SMB2_IOCTL_HE]));
55372+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
55373+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
55374 seq_printf(m, "\nCancels: %d sent %d failed",
55375- atomic_read(&sent[SMB2_CANCEL_HE]),
55376- atomic_read(&failed[SMB2_CANCEL_HE]));
55377+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
55378+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
55379 seq_printf(m, "\nEchos: %d sent %d failed",
55380- atomic_read(&sent[SMB2_ECHO_HE]),
55381- atomic_read(&failed[SMB2_ECHO_HE]));
55382+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
55383+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
55384 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
55385- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
55386- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
55387+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
55388+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
55389 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
55390- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
55391- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
55392+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
55393+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
55394 seq_printf(m, "\nQueryInfos: %d sent %d failed",
55395- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
55396- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
55397+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
55398+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
55399 seq_printf(m, "\nSetInfos: %d sent %d failed",
55400- atomic_read(&sent[SMB2_SET_INFO_HE]),
55401- atomic_read(&failed[SMB2_SET_INFO_HE]));
55402+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
55403+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
55404 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
55405- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
55406- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
55407+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
55408+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
55409 #endif
55410 }
55411
55412diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
55413index edccb52..16bc6db 100644
55414--- a/fs/cifs/smb2pdu.c
55415+++ b/fs/cifs/smb2pdu.c
55416@@ -1957,8 +1957,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
55417 default:
55418 cifs_dbg(VFS, "info level %u isn't supported\n",
55419 srch_inf->info_level);
55420- rc = -EINVAL;
55421- goto qdir_exit;
55422+ return -EINVAL;
55423 }
55424
55425 req->FileIndex = cpu_to_le32(index);
55426diff --git a/fs/coda/cache.c b/fs/coda/cache.c
55427index 1da168c..8bc7ff6 100644
55428--- a/fs/coda/cache.c
55429+++ b/fs/coda/cache.c
55430@@ -24,7 +24,7 @@
55431 #include "coda_linux.h"
55432 #include "coda_cache.h"
55433
55434-static atomic_t permission_epoch = ATOMIC_INIT(0);
55435+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
55436
55437 /* replace or extend an acl cache hit */
55438 void coda_cache_enter(struct inode *inode, int mask)
55439@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
55440 struct coda_inode_info *cii = ITOC(inode);
55441
55442 spin_lock(&cii->c_lock);
55443- cii->c_cached_epoch = atomic_read(&permission_epoch);
55444+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
55445 if (!uid_eq(cii->c_uid, current_fsuid())) {
55446 cii->c_uid = current_fsuid();
55447 cii->c_cached_perm = mask;
55448@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
55449 {
55450 struct coda_inode_info *cii = ITOC(inode);
55451 spin_lock(&cii->c_lock);
55452- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
55453+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
55454 spin_unlock(&cii->c_lock);
55455 }
55456
55457 /* remove all acl caches */
55458 void coda_cache_clear_all(struct super_block *sb)
55459 {
55460- atomic_inc(&permission_epoch);
55461+ atomic_inc_unchecked(&permission_epoch);
55462 }
55463
55464
55465@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
55466 spin_lock(&cii->c_lock);
55467 hit = (mask & cii->c_cached_perm) == mask &&
55468 uid_eq(cii->c_uid, current_fsuid()) &&
55469- cii->c_cached_epoch == atomic_read(&permission_epoch);
55470+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
55471 spin_unlock(&cii->c_lock);
55472
55473 return hit;
55474diff --git a/fs/compat.c b/fs/compat.c
55475index 6af20de..fec3fbb 100644
55476--- a/fs/compat.c
55477+++ b/fs/compat.c
55478@@ -54,7 +54,7 @@
55479 #include <asm/ioctls.h>
55480 #include "internal.h"
55481
55482-int compat_log = 1;
55483+int compat_log = 0;
55484
55485 int compat_printk(const char *fmt, ...)
55486 {
55487@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
55488
55489 set_fs(KERNEL_DS);
55490 /* The __user pointer cast is valid because of the set_fs() */
55491- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
55492+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
55493 set_fs(oldfs);
55494 /* truncating is ok because it's a user address */
55495 if (!ret)
55496@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
55497 goto out;
55498
55499 ret = -EINVAL;
55500- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
55501+ if (nr_segs > UIO_MAXIOV)
55502 goto out;
55503 if (nr_segs > fast_segs) {
55504 ret = -ENOMEM;
55505@@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
55506 struct compat_readdir_callback {
55507 struct dir_context ctx;
55508 struct compat_old_linux_dirent __user *dirent;
55509+ struct file * file;
55510 int result;
55511 };
55512
55513@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
55514 buf->result = -EOVERFLOW;
55515 return -EOVERFLOW;
55516 }
55517+
55518+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55519+ return 0;
55520+
55521 buf->result++;
55522 dirent = buf->dirent;
55523 if (!access_ok(VERIFY_WRITE, dirent,
55524@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
55525 if (!f.file)
55526 return -EBADF;
55527
55528+ buf.file = f.file;
55529 error = iterate_dir(f.file, &buf.ctx);
55530 if (buf.result)
55531 error = buf.result;
55532@@ -901,6 +907,7 @@ struct compat_getdents_callback {
55533 struct dir_context ctx;
55534 struct compat_linux_dirent __user *current_dir;
55535 struct compat_linux_dirent __user *previous;
55536+ struct file * file;
55537 int count;
55538 int error;
55539 };
55540@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
55541 buf->error = -EOVERFLOW;
55542 return -EOVERFLOW;
55543 }
55544+
55545+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55546+ return 0;
55547+
55548 dirent = buf->previous;
55549 if (dirent) {
55550 if (__put_user(offset, &dirent->d_off))
55551@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
55552 if (!f.file)
55553 return -EBADF;
55554
55555+ buf.file = f.file;
55556 error = iterate_dir(f.file, &buf.ctx);
55557 if (error >= 0)
55558 error = buf.error;
55559@@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
55560 struct dir_context ctx;
55561 struct linux_dirent64 __user *current_dir;
55562 struct linux_dirent64 __user *previous;
55563+ struct file * file;
55564 int count;
55565 int error;
55566 };
55567@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
55568 buf->error = -EINVAL; /* only used if we fail.. */
55569 if (reclen > buf->count)
55570 return -EINVAL;
55571+
55572+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55573+ return 0;
55574+
55575 dirent = buf->previous;
55576
55577 if (dirent) {
55578@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
55579 if (!f.file)
55580 return -EBADF;
55581
55582+ buf.file = f.file;
55583 error = iterate_dir(f.file, &buf.ctx);
55584 if (error >= 0)
55585 error = buf.error;
55586diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
55587index a81147e..20bf2b5 100644
55588--- a/fs/compat_binfmt_elf.c
55589+++ b/fs/compat_binfmt_elf.c
55590@@ -30,11 +30,13 @@
55591 #undef elf_phdr
55592 #undef elf_shdr
55593 #undef elf_note
55594+#undef elf_dyn
55595 #undef elf_addr_t
55596 #define elfhdr elf32_hdr
55597 #define elf_phdr elf32_phdr
55598 #define elf_shdr elf32_shdr
55599 #define elf_note elf32_note
55600+#define elf_dyn Elf32_Dyn
55601 #define elf_addr_t Elf32_Addr
55602
55603 /*
55604diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
55605index 5d19acf..9ab093b 100644
55606--- a/fs/compat_ioctl.c
55607+++ b/fs/compat_ioctl.c
55608@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
55609 return -EFAULT;
55610 if (__get_user(udata, &ss32->iomem_base))
55611 return -EFAULT;
55612- ss.iomem_base = compat_ptr(udata);
55613+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
55614 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
55615 __get_user(ss.port_high, &ss32->port_high))
55616 return -EFAULT;
55617@@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
55618 for (i = 0; i < nmsgs; i++) {
55619 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
55620 return -EFAULT;
55621- if (get_user(datap, &umsgs[i].buf) ||
55622- put_user(compat_ptr(datap), &tmsgs[i].buf))
55623+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
55624+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
55625 return -EFAULT;
55626 }
55627 return sys_ioctl(fd, cmd, (unsigned long)tdata);
55628@@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(struct file *file,
55629 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
55630 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
55631 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
55632- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
55633+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
55634 return -EFAULT;
55635
55636 return ioctl_preallocate(file, p);
55637@@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
55638 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
55639 {
55640 unsigned int a, b;
55641- a = *(unsigned int *)p;
55642- b = *(unsigned int *)q;
55643+ a = *(const unsigned int *)p;
55644+ b = *(const unsigned int *)q;
55645 if (a > b)
55646 return 1;
55647 if (a < b)
55648diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
55649index 511d415..319d0e5 100644
55650--- a/fs/configfs/dir.c
55651+++ b/fs/configfs/dir.c
55652@@ -1558,7 +1558,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
55653 }
55654 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
55655 struct configfs_dirent *next;
55656- const char *name;
55657+ const unsigned char * name;
55658+ char d_name[sizeof(next->s_dentry->d_iname)];
55659 int len;
55660 struct inode *inode = NULL;
55661
55662@@ -1567,7 +1568,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
55663 continue;
55664
55665 name = configfs_get_name(next);
55666- len = strlen(name);
55667+ if (next->s_dentry && name == next->s_dentry->d_iname) {
55668+ len = next->s_dentry->d_name.len;
55669+ memcpy(d_name, name, len);
55670+ name = d_name;
55671+ } else
55672+ len = strlen(name);
55673
55674 /*
55675 * We'll have a dentry and an inode for
55676diff --git a/fs/coredump.c b/fs/coredump.c
55677index 9bdeca1..2a9b08d 100644
55678--- a/fs/coredump.c
55679+++ b/fs/coredump.c
55680@@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct file *file)
55681 struct pipe_inode_info *pipe = file->private_data;
55682
55683 pipe_lock(pipe);
55684- pipe->readers++;
55685- pipe->writers--;
55686+ atomic_inc(&pipe->readers);
55687+ atomic_dec(&pipe->writers);
55688 wake_up_interruptible_sync(&pipe->wait);
55689 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
55690 pipe_unlock(pipe);
55691@@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct file *file)
55692 * We actually want wait_event_freezable() but then we need
55693 * to clear TIF_SIGPENDING and improve dump_interrupted().
55694 */
55695- wait_event_interruptible(pipe->wait, pipe->readers == 1);
55696+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
55697
55698 pipe_lock(pipe);
55699- pipe->readers--;
55700- pipe->writers++;
55701+ atomic_dec(&pipe->readers);
55702+ atomic_inc(&pipe->writers);
55703 pipe_unlock(pipe);
55704 }
55705
55706@@ -499,7 +499,9 @@ void do_coredump(siginfo_t *siginfo)
55707 struct files_struct *displaced;
55708 bool need_nonrelative = false;
55709 bool core_dumped = false;
55710- static atomic_t core_dump_count = ATOMIC_INIT(0);
55711+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
55712+ long signr = siginfo->si_signo;
55713+ int dumpable;
55714 struct coredump_params cprm = {
55715 .siginfo = siginfo,
55716 .regs = signal_pt_regs(),
55717@@ -512,12 +514,17 @@ void do_coredump(siginfo_t *siginfo)
55718 .mm_flags = mm->flags,
55719 };
55720
55721- audit_core_dumps(siginfo->si_signo);
55722+ audit_core_dumps(signr);
55723+
55724+ dumpable = __get_dumpable(cprm.mm_flags);
55725+
55726+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
55727+ gr_handle_brute_attach(dumpable);
55728
55729 binfmt = mm->binfmt;
55730 if (!binfmt || !binfmt->core_dump)
55731 goto fail;
55732- if (!__get_dumpable(cprm.mm_flags))
55733+ if (!dumpable)
55734 goto fail;
55735
55736 cred = prepare_creds();
55737@@ -536,7 +543,7 @@ void do_coredump(siginfo_t *siginfo)
55738 need_nonrelative = true;
55739 }
55740
55741- retval = coredump_wait(siginfo->si_signo, &core_state);
55742+ retval = coredump_wait(signr, &core_state);
55743 if (retval < 0)
55744 goto fail_creds;
55745
55746@@ -579,7 +586,7 @@ void do_coredump(siginfo_t *siginfo)
55747 }
55748 cprm.limit = RLIM_INFINITY;
55749
55750- dump_count = atomic_inc_return(&core_dump_count);
55751+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
55752 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
55753 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
55754 task_tgid_vnr(current), current->comm);
55755@@ -611,6 +618,8 @@ void do_coredump(siginfo_t *siginfo)
55756 } else {
55757 struct inode *inode;
55758
55759+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
55760+
55761 if (cprm.limit < binfmt->min_coredump)
55762 goto fail_unlock;
55763
55764@@ -669,7 +678,7 @@ close_fail:
55765 filp_close(cprm.file, NULL);
55766 fail_dropcount:
55767 if (ispipe)
55768- atomic_dec(&core_dump_count);
55769+ atomic_dec_unchecked(&core_dump_count);
55770 fail_unlock:
55771 kfree(cn.corename);
55772 coredump_finish(mm, core_dumped);
55773@@ -689,7 +698,7 @@ int dump_write(struct file *file, const void *addr, int nr)
55774 {
55775 return !dump_interrupted() &&
55776 access_ok(VERIFY_READ, addr, nr) &&
55777- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
55778+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
55779 }
55780 EXPORT_SYMBOL(dump_write);
55781
55782diff --git a/fs/dcache.c b/fs/dcache.c
55783index 89f9671..d2dce57 100644
55784--- a/fs/dcache.c
55785+++ b/fs/dcache.c
55786@@ -1570,7 +1570,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
55787 */
55788 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
55789 if (name->len > DNAME_INLINE_LEN-1) {
55790- dname = kmalloc(name->len + 1, GFP_KERNEL);
55791+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
55792 if (!dname) {
55793 kmem_cache_free(dentry_cache, dentry);
55794 return NULL;
55795@@ -2893,6 +2893,7 @@ static int prepend_path(const struct path *path,
55796 restart:
55797 bptr = *buffer;
55798 blen = *buflen;
55799+ error = 0;
55800 dentry = path->dentry;
55801 vfsmnt = path->mnt;
55802 mnt = real_mount(vfsmnt);
55803@@ -3432,7 +3433,8 @@ void __init vfs_caches_init(unsigned long mempages)
55804 mempages -= reserve;
55805
55806 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
55807- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
55808+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
55809+ SLAB_NO_SANITIZE, NULL);
55810
55811 dcache_init();
55812 inode_init();
55813diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
55814index c7c83ff..bda9461 100644
55815--- a/fs/debugfs/inode.c
55816+++ b/fs/debugfs/inode.c
55817@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
55818 */
55819 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
55820 {
55821+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55822+ return __create_file(name, S_IFDIR | S_IRWXU,
55823+#else
55824 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
55825+#endif
55826 parent, NULL, NULL);
55827 }
55828 EXPORT_SYMBOL_GPL(debugfs_create_dir);
55829diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
55830index 67e9b63..a9adb68 100644
55831--- a/fs/ecryptfs/inode.c
55832+++ b/fs/ecryptfs/inode.c
55833@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
55834 old_fs = get_fs();
55835 set_fs(get_ds());
55836 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
55837- (char __user *)lower_buf,
55838+ (char __force_user *)lower_buf,
55839 PATH_MAX);
55840 set_fs(old_fs);
55841 if (rc < 0)
55842@@ -706,7 +706,7 @@ out:
55843 static void
55844 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
55845 {
55846- char *buf = nd_get_link(nd);
55847+ const char *buf = nd_get_link(nd);
55848 if (!IS_ERR(buf)) {
55849 /* Free the char* */
55850 kfree(buf);
55851diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
55852index e4141f2..d8263e8 100644
55853--- a/fs/ecryptfs/miscdev.c
55854+++ b/fs/ecryptfs/miscdev.c
55855@@ -304,7 +304,7 @@ check_list:
55856 goto out_unlock_msg_ctx;
55857 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
55858 if (msg_ctx->msg) {
55859- if (copy_to_user(&buf[i], packet_length, packet_length_size))
55860+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
55861 goto out_unlock_msg_ctx;
55862 i += packet_length_size;
55863 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
55864diff --git a/fs/exec.c b/fs/exec.c
55865index bb8afc1..2f5087e 100644
55866--- a/fs/exec.c
55867+++ b/fs/exec.c
55868@@ -55,8 +55,20 @@
55869 #include <linux/pipe_fs_i.h>
55870 #include <linux/oom.h>
55871 #include <linux/compat.h>
55872+#include <linux/random.h>
55873+#include <linux/seq_file.h>
55874+#include <linux/coredump.h>
55875+#include <linux/mman.h>
55876+
55877+#ifdef CONFIG_PAX_REFCOUNT
55878+#include <linux/kallsyms.h>
55879+#include <linux/kdebug.h>
55880+#endif
55881+
55882+#include <trace/events/fs.h>
55883
55884 #include <asm/uaccess.h>
55885+#include <asm/sections.h>
55886 #include <asm/mmu_context.h>
55887 #include <asm/tlb.h>
55888
55889@@ -66,19 +78,34 @@
55890
55891 #include <trace/events/sched.h>
55892
55893+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55894+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
55895+{
55896+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
55897+}
55898+#endif
55899+
55900+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
55901+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
55902+EXPORT_SYMBOL(pax_set_initial_flags_func);
55903+#endif
55904+
55905 int suid_dumpable = 0;
55906
55907 static LIST_HEAD(formats);
55908 static DEFINE_RWLOCK(binfmt_lock);
55909
55910+extern int gr_process_kernel_exec_ban(void);
55911+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
55912+
55913 void __register_binfmt(struct linux_binfmt * fmt, int insert)
55914 {
55915 BUG_ON(!fmt);
55916 if (WARN_ON(!fmt->load_binary))
55917 return;
55918 write_lock(&binfmt_lock);
55919- insert ? list_add(&fmt->lh, &formats) :
55920- list_add_tail(&fmt->lh, &formats);
55921+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
55922+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
55923 write_unlock(&binfmt_lock);
55924 }
55925
55926@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
55927 void unregister_binfmt(struct linux_binfmt * fmt)
55928 {
55929 write_lock(&binfmt_lock);
55930- list_del(&fmt->lh);
55931+ pax_list_del((struct list_head *)&fmt->lh);
55932 write_unlock(&binfmt_lock);
55933 }
55934
55935@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
55936 int write)
55937 {
55938 struct page *page;
55939- int ret;
55940
55941-#ifdef CONFIG_STACK_GROWSUP
55942- if (write) {
55943- ret = expand_downwards(bprm->vma, pos);
55944- if (ret < 0)
55945- return NULL;
55946- }
55947-#endif
55948- ret = get_user_pages(current, bprm->mm, pos,
55949- 1, write, 1, &page, NULL);
55950- if (ret <= 0)
55951+ if (0 > expand_downwards(bprm->vma, pos))
55952+ return NULL;
55953+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
55954 return NULL;
55955
55956 if (write) {
55957@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
55958 if (size <= ARG_MAX)
55959 return page;
55960
55961+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55962+ // only allow 512KB for argv+env on suid/sgid binaries
55963+ // to prevent easy ASLR exhaustion
55964+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
55965+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
55966+ (size > (512 * 1024))) {
55967+ put_page(page);
55968+ return NULL;
55969+ }
55970+#endif
55971+
55972 /*
55973 * Limit to 1/4-th the stack size for the argv+env strings.
55974 * This ensures that:
55975@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
55976 vma->vm_end = STACK_TOP_MAX;
55977 vma->vm_start = vma->vm_end - PAGE_SIZE;
55978 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
55979+
55980+#ifdef CONFIG_PAX_SEGMEXEC
55981+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
55982+#endif
55983+
55984 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
55985 INIT_LIST_HEAD(&vma->anon_vma_chain);
55986
55987@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
55988 mm->stack_vm = mm->total_vm = 1;
55989 up_write(&mm->mmap_sem);
55990 bprm->p = vma->vm_end - sizeof(void *);
55991+
55992+#ifdef CONFIG_PAX_RANDUSTACK
55993+ if (randomize_va_space)
55994+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
55995+#endif
55996+
55997 return 0;
55998 err:
55999 up_write(&mm->mmap_sem);
56000@@ -399,7 +440,7 @@ struct user_arg_ptr {
56001 } ptr;
56002 };
56003
56004-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
56005+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
56006 {
56007 const char __user *native;
56008
56009@@ -408,14 +449,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
56010 compat_uptr_t compat;
56011
56012 if (get_user(compat, argv.ptr.compat + nr))
56013- return ERR_PTR(-EFAULT);
56014+ return (const char __force_user *)ERR_PTR(-EFAULT);
56015
56016 return compat_ptr(compat);
56017 }
56018 #endif
56019
56020 if (get_user(native, argv.ptr.native + nr))
56021- return ERR_PTR(-EFAULT);
56022+ return (const char __force_user *)ERR_PTR(-EFAULT);
56023
56024 return native;
56025 }
56026@@ -434,7 +475,7 @@ static int count(struct user_arg_ptr argv, int max)
56027 if (!p)
56028 break;
56029
56030- if (IS_ERR(p))
56031+ if (IS_ERR((const char __force_kernel *)p))
56032 return -EFAULT;
56033
56034 if (i >= max)
56035@@ -469,7 +510,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
56036
56037 ret = -EFAULT;
56038 str = get_user_arg_ptr(argv, argc);
56039- if (IS_ERR(str))
56040+ if (IS_ERR((const char __force_kernel *)str))
56041 goto out;
56042
56043 len = strnlen_user(str, MAX_ARG_STRLEN);
56044@@ -551,7 +592,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
56045 int r;
56046 mm_segment_t oldfs = get_fs();
56047 struct user_arg_ptr argv = {
56048- .ptr.native = (const char __user *const __user *)__argv,
56049+ .ptr.native = (const char __force_user * const __force_user *)__argv,
56050 };
56051
56052 set_fs(KERNEL_DS);
56053@@ -586,7 +627,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
56054 unsigned long new_end = old_end - shift;
56055 struct mmu_gather tlb;
56056
56057- BUG_ON(new_start > new_end);
56058+ if (new_start >= new_end || new_start < mmap_min_addr)
56059+ return -ENOMEM;
56060
56061 /*
56062 * ensure there are no vmas between where we want to go
56063@@ -595,6 +637,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
56064 if (vma != find_vma(mm, new_start))
56065 return -EFAULT;
56066
56067+#ifdef CONFIG_PAX_SEGMEXEC
56068+ BUG_ON(pax_find_mirror_vma(vma));
56069+#endif
56070+
56071 /*
56072 * cover the whole range: [new_start, old_end)
56073 */
56074@@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
56075 stack_top = arch_align_stack(stack_top);
56076 stack_top = PAGE_ALIGN(stack_top);
56077
56078- if (unlikely(stack_top < mmap_min_addr) ||
56079- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
56080- return -ENOMEM;
56081-
56082 stack_shift = vma->vm_end - stack_top;
56083
56084 bprm->p -= stack_shift;
56085@@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
56086 bprm->exec -= stack_shift;
56087
56088 down_write(&mm->mmap_sem);
56089+
56090+ /* Move stack pages down in memory. */
56091+ if (stack_shift) {
56092+ ret = shift_arg_pages(vma, stack_shift);
56093+ if (ret)
56094+ goto out_unlock;
56095+ }
56096+
56097 vm_flags = VM_STACK_FLAGS;
56098
56099+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
56100+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56101+ vm_flags &= ~VM_EXEC;
56102+
56103+#ifdef CONFIG_PAX_MPROTECT
56104+ if (mm->pax_flags & MF_PAX_MPROTECT)
56105+ vm_flags &= ~VM_MAYEXEC;
56106+#endif
56107+
56108+ }
56109+#endif
56110+
56111 /*
56112 * Adjust stack execute permissions; explicitly enable for
56113 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
56114@@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
56115 goto out_unlock;
56116 BUG_ON(prev != vma);
56117
56118- /* Move stack pages down in memory. */
56119- if (stack_shift) {
56120- ret = shift_arg_pages(vma, stack_shift);
56121- if (ret)
56122- goto out_unlock;
56123- }
56124-
56125 /* mprotect_fixup is overkill to remove the temporary stack flags */
56126 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
56127
56128@@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
56129 #endif
56130 current->mm->start_stack = bprm->p;
56131 ret = expand_stack(vma, stack_base);
56132+
56133+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
56134+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
56135+ unsigned long size;
56136+ vm_flags_t vm_flags;
56137+
56138+ size = STACK_TOP - vma->vm_end;
56139+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
56140+
56141+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
56142+
56143+#ifdef CONFIG_X86
56144+ if (!ret) {
56145+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
56146+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
56147+ }
56148+#endif
56149+
56150+ }
56151+#endif
56152+
56153 if (ret)
56154 ret = -EFAULT;
56155
56156@@ -776,6 +852,8 @@ struct file *open_exec(const char *name)
56157
56158 fsnotify_open(file);
56159
56160+ trace_open_exec(name);
56161+
56162 err = deny_write_access(file);
56163 if (err)
56164 goto exit;
56165@@ -799,7 +877,7 @@ int kernel_read(struct file *file, loff_t offset,
56166 old_fs = get_fs();
56167 set_fs(get_ds());
56168 /* The cast to a user pointer is valid due to the set_fs() */
56169- result = vfs_read(file, (void __user *)addr, count, &pos);
56170+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
56171 set_fs(old_fs);
56172 return result;
56173 }
56174@@ -1255,7 +1333,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
56175 }
56176 rcu_read_unlock();
56177
56178- if (p->fs->users > n_fs) {
56179+ if (atomic_read(&p->fs->users) > n_fs) {
56180 bprm->unsafe |= LSM_UNSAFE_SHARE;
56181 } else {
56182 res = -EAGAIN;
56183@@ -1451,6 +1529,31 @@ static int exec_binprm(struct linux_binprm *bprm)
56184 return ret;
56185 }
56186
56187+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56188+static DEFINE_PER_CPU(u64, exec_counter);
56189+static int __init init_exec_counters(void)
56190+{
56191+ unsigned int cpu;
56192+
56193+ for_each_possible_cpu(cpu) {
56194+ per_cpu(exec_counter, cpu) = (u64)cpu;
56195+ }
56196+
56197+ return 0;
56198+}
56199+early_initcall(init_exec_counters);
56200+static inline void increment_exec_counter(void)
56201+{
56202+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
56203+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
56204+}
56205+#else
56206+static inline void increment_exec_counter(void) {}
56207+#endif
56208+
56209+extern void gr_handle_exec_args(struct linux_binprm *bprm,
56210+ struct user_arg_ptr argv);
56211+
56212 /*
56213 * sys_execve() executes a new program.
56214 */
56215@@ -1458,12 +1561,19 @@ static int do_execve_common(const char *filename,
56216 struct user_arg_ptr argv,
56217 struct user_arg_ptr envp)
56218 {
56219+#ifdef CONFIG_GRKERNSEC
56220+ struct file *old_exec_file;
56221+ struct acl_subject_label *old_acl;
56222+ struct rlimit old_rlim[RLIM_NLIMITS];
56223+#endif
56224 struct linux_binprm *bprm;
56225 struct file *file;
56226 struct files_struct *displaced;
56227 bool clear_in_exec;
56228 int retval;
56229
56230+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
56231+
56232 /*
56233 * We move the actual failure in case of RLIMIT_NPROC excess from
56234 * set*uid() to execve() because too many poorly written programs
56235@@ -1504,12 +1614,22 @@ static int do_execve_common(const char *filename,
56236 if (IS_ERR(file))
56237 goto out_unmark;
56238
56239+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
56240+ retval = -EPERM;
56241+ goto out_file;
56242+ }
56243+
56244 sched_exec();
56245
56246 bprm->file = file;
56247 bprm->filename = filename;
56248 bprm->interp = filename;
56249
56250+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
56251+ retval = -EACCES;
56252+ goto out_file;
56253+ }
56254+
56255 retval = bprm_mm_init(bprm);
56256 if (retval)
56257 goto out_file;
56258@@ -1526,24 +1646,70 @@ static int do_execve_common(const char *filename,
56259 if (retval < 0)
56260 goto out;
56261
56262+#ifdef CONFIG_GRKERNSEC
56263+ old_acl = current->acl;
56264+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
56265+ old_exec_file = current->exec_file;
56266+ get_file(file);
56267+ current->exec_file = file;
56268+#endif
56269+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56270+ /* limit suid stack to 8MB
56271+ * we saved the old limits above and will restore them if this exec fails
56272+ */
56273+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
56274+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
56275+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
56276+#endif
56277+
56278+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
56279+ retval = -EPERM;
56280+ goto out_fail;
56281+ }
56282+
56283+ if (!gr_tpe_allow(file)) {
56284+ retval = -EACCES;
56285+ goto out_fail;
56286+ }
56287+
56288+ if (gr_check_crash_exec(file)) {
56289+ retval = -EACCES;
56290+ goto out_fail;
56291+ }
56292+
56293+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
56294+ bprm->unsafe);
56295+ if (retval < 0)
56296+ goto out_fail;
56297+
56298 retval = copy_strings_kernel(1, &bprm->filename, bprm);
56299 if (retval < 0)
56300- goto out;
56301+ goto out_fail;
56302
56303 bprm->exec = bprm->p;
56304 retval = copy_strings(bprm->envc, envp, bprm);
56305 if (retval < 0)
56306- goto out;
56307+ goto out_fail;
56308
56309 retval = copy_strings(bprm->argc, argv, bprm);
56310 if (retval < 0)
56311- goto out;
56312+ goto out_fail;
56313+
56314+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
56315+
56316+ gr_handle_exec_args(bprm, argv);
56317
56318 retval = exec_binprm(bprm);
56319 if (retval < 0)
56320- goto out;
56321+ goto out_fail;
56322+#ifdef CONFIG_GRKERNSEC
56323+ if (old_exec_file)
56324+ fput(old_exec_file);
56325+#endif
56326
56327 /* execve succeeded */
56328+
56329+ increment_exec_counter();
56330 current->fs->in_exec = 0;
56331 current->in_execve = 0;
56332 acct_update_integrals(current);
56333@@ -1552,6 +1718,14 @@ static int do_execve_common(const char *filename,
56334 put_files_struct(displaced);
56335 return retval;
56336
56337+out_fail:
56338+#ifdef CONFIG_GRKERNSEC
56339+ current->acl = old_acl;
56340+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
56341+ fput(current->exec_file);
56342+ current->exec_file = old_exec_file;
56343+#endif
56344+
56345 out:
56346 if (bprm->mm) {
56347 acct_arg_size(bprm, 0);
56348@@ -1706,3 +1880,295 @@ asmlinkage long compat_sys_execve(const char __user * filename,
56349 return error;
56350 }
56351 #endif
56352+
56353+int pax_check_flags(unsigned long *flags)
56354+{
56355+ int retval = 0;
56356+
56357+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
56358+ if (*flags & MF_PAX_SEGMEXEC)
56359+ {
56360+ *flags &= ~MF_PAX_SEGMEXEC;
56361+ retval = -EINVAL;
56362+ }
56363+#endif
56364+
56365+ if ((*flags & MF_PAX_PAGEEXEC)
56366+
56367+#ifdef CONFIG_PAX_PAGEEXEC
56368+ && (*flags & MF_PAX_SEGMEXEC)
56369+#endif
56370+
56371+ )
56372+ {
56373+ *flags &= ~MF_PAX_PAGEEXEC;
56374+ retval = -EINVAL;
56375+ }
56376+
56377+ if ((*flags & MF_PAX_MPROTECT)
56378+
56379+#ifdef CONFIG_PAX_MPROTECT
56380+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
56381+#endif
56382+
56383+ )
56384+ {
56385+ *flags &= ~MF_PAX_MPROTECT;
56386+ retval = -EINVAL;
56387+ }
56388+
56389+ if ((*flags & MF_PAX_EMUTRAMP)
56390+
56391+#ifdef CONFIG_PAX_EMUTRAMP
56392+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
56393+#endif
56394+
56395+ )
56396+ {
56397+ *flags &= ~MF_PAX_EMUTRAMP;
56398+ retval = -EINVAL;
56399+ }
56400+
56401+ return retval;
56402+}
56403+
56404+EXPORT_SYMBOL(pax_check_flags);
56405+
56406+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
56407+char *pax_get_path(const struct path *path, char *buf, int buflen)
56408+{
56409+ char *pathname = d_path(path, buf, buflen);
56410+
56411+ if (IS_ERR(pathname))
56412+ goto toolong;
56413+
56414+ pathname = mangle_path(buf, pathname, "\t\n\\");
56415+ if (!pathname)
56416+ goto toolong;
56417+
56418+ *pathname = 0;
56419+ return buf;
56420+
56421+toolong:
56422+ return "<path too long>";
56423+}
56424+EXPORT_SYMBOL(pax_get_path);
56425+
56426+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
56427+{
56428+ struct task_struct *tsk = current;
56429+ struct mm_struct *mm = current->mm;
56430+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
56431+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
56432+ char *path_exec = NULL;
56433+ char *path_fault = NULL;
56434+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
56435+ siginfo_t info = { };
56436+
56437+ if (buffer_exec && buffer_fault) {
56438+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
56439+
56440+ down_read(&mm->mmap_sem);
56441+ vma = mm->mmap;
56442+ while (vma && (!vma_exec || !vma_fault)) {
56443+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
56444+ vma_exec = vma;
56445+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
56446+ vma_fault = vma;
56447+ vma = vma->vm_next;
56448+ }
56449+ if (vma_exec)
56450+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
56451+ if (vma_fault) {
56452+ start = vma_fault->vm_start;
56453+ end = vma_fault->vm_end;
56454+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
56455+ if (vma_fault->vm_file)
56456+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
56457+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
56458+ path_fault = "<heap>";
56459+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
56460+ path_fault = "<stack>";
56461+ else
56462+ path_fault = "<anonymous mapping>";
56463+ }
56464+ up_read(&mm->mmap_sem);
56465+ }
56466+ if (tsk->signal->curr_ip)
56467+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
56468+ else
56469+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
56470+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
56471+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
56472+ free_page((unsigned long)buffer_exec);
56473+ free_page((unsigned long)buffer_fault);
56474+ pax_report_insns(regs, pc, sp);
56475+ info.si_signo = SIGKILL;
56476+ info.si_errno = 0;
56477+ info.si_code = SI_KERNEL;
56478+ info.si_pid = 0;
56479+ info.si_uid = 0;
56480+ do_coredump(&info);
56481+}
56482+#endif
56483+
56484+#ifdef CONFIG_PAX_REFCOUNT
56485+void pax_report_refcount_overflow(struct pt_regs *regs)
56486+{
56487+ if (current->signal->curr_ip)
56488+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
56489+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
56490+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
56491+ else
56492+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
56493+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
56494+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
56495+ preempt_disable();
56496+ show_regs(regs);
56497+ preempt_enable();
56498+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
56499+}
56500+#endif
56501+
56502+#ifdef CONFIG_PAX_USERCOPY
56503+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
56504+static noinline int check_stack_object(const void *obj, unsigned long len)
56505+{
56506+ const void * const stack = task_stack_page(current);
56507+ const void * const stackend = stack + THREAD_SIZE;
56508+
56509+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
56510+ const void *frame = NULL;
56511+ const void *oldframe;
56512+#endif
56513+
56514+ if (obj + len < obj)
56515+ return -1;
56516+
56517+ if (obj + len <= stack || stackend <= obj)
56518+ return 0;
56519+
56520+ if (obj < stack || stackend < obj + len)
56521+ return -1;
56522+
56523+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
56524+ oldframe = __builtin_frame_address(1);
56525+ if (oldframe)
56526+ frame = __builtin_frame_address(2);
56527+ /*
56528+ low ----------------------------------------------> high
56529+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
56530+ ^----------------^
56531+ allow copies only within here
56532+ */
56533+ while (stack <= frame && frame < stackend) {
56534+ /* if obj + len extends past the last frame, this
56535+ check won't pass and the next frame will be 0,
56536+ causing us to bail out and correctly report
56537+ the copy as invalid
56538+ */
56539+ if (obj + len <= frame)
56540+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
56541+ oldframe = frame;
56542+ frame = *(const void * const *)frame;
56543+ }
56544+ return -1;
56545+#else
56546+ return 1;
56547+#endif
56548+}
56549+
56550+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
56551+{
56552+ if (current->signal->curr_ip)
56553+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
56554+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
56555+ else
56556+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
56557+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
56558+ dump_stack();
56559+ gr_handle_kernel_exploit();
56560+ do_group_exit(SIGKILL);
56561+}
56562+#endif
56563+
56564+#ifdef CONFIG_PAX_USERCOPY
56565+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
56566+{
56567+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
56568+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
56569+#ifdef CONFIG_MODULES
56570+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
56571+#else
56572+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
56573+#endif
56574+
56575+#else
56576+ unsigned long textlow = (unsigned long)_stext;
56577+ unsigned long texthigh = (unsigned long)_etext;
56578+
56579+#ifdef CONFIG_X86_64
56580+ /* check against linear mapping as well */
56581+ if (high > (unsigned long)__va(__pa(textlow)) &&
56582+ low <= (unsigned long)__va(__pa(texthigh)))
56583+ return true;
56584+#endif
56585+
56586+#endif
56587+
56588+ if (high <= textlow || low > texthigh)
56589+ return false;
56590+ else
56591+ return true;
56592+}
56593+#endif
56594+
56595+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
56596+{
56597+
56598+#ifdef CONFIG_PAX_USERCOPY
56599+ const char *type;
56600+
56601+ if (!n)
56602+ return;
56603+
56604+ type = check_heap_object(ptr, n);
56605+ if (!type) {
56606+ int ret = check_stack_object(ptr, n);
56607+ if (ret == 1 || ret == 2)
56608+ return;
56609+ if (ret == 0) {
56610+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
56611+ type = "<kernel text>";
56612+ else
56613+ return;
56614+ } else
56615+ type = "<process stack>";
56616+ }
56617+
56618+ pax_report_usercopy(ptr, n, to_user, type);
56619+#endif
56620+
56621+}
56622+EXPORT_SYMBOL(__check_object_size);
56623+
56624+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56625+void pax_track_stack(void)
56626+{
56627+ unsigned long sp = (unsigned long)&sp;
56628+ if (sp < current_thread_info()->lowest_stack &&
56629+ sp > (unsigned long)task_stack_page(current))
56630+ current_thread_info()->lowest_stack = sp;
56631+}
56632+EXPORT_SYMBOL(pax_track_stack);
56633+#endif
56634+
56635+#ifdef CONFIG_PAX_SIZE_OVERFLOW
56636+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
56637+{
56638+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
56639+ dump_stack();
56640+ do_group_exit(SIGKILL);
56641+}
56642+EXPORT_SYMBOL(report_size_overflow);
56643+#endif
56644diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
56645index 9f9992b..8b59411 100644
56646--- a/fs/ext2/balloc.c
56647+++ b/fs/ext2/balloc.c
56648@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
56649
56650 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
56651 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
56652- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
56653+ if (free_blocks < root_blocks + 1 &&
56654 !uid_eq(sbi->s_resuid, current_fsuid()) &&
56655 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
56656- !in_group_p (sbi->s_resgid))) {
56657+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
56658 return 0;
56659 }
56660 return 1;
56661diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
56662index 2d7557d..14e38f94 100644
56663--- a/fs/ext2/xattr.c
56664+++ b/fs/ext2/xattr.c
56665@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
56666 struct buffer_head *bh = NULL;
56667 struct ext2_xattr_entry *entry;
56668 char *end;
56669- size_t rest = buffer_size;
56670+ size_t rest = buffer_size, total_size = 0;
56671 int error;
56672
56673 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
56674@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
56675 buffer += size;
56676 }
56677 rest -= size;
56678+ total_size += size;
56679 }
56680 }
56681- error = buffer_size - rest; /* total size */
56682+ error = total_size;
56683
56684 cleanup:
56685 brelse(bh);
56686diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
56687index 22548f5..41521d8 100644
56688--- a/fs/ext3/balloc.c
56689+++ b/fs/ext3/balloc.c
56690@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
56691
56692 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
56693 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
56694- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
56695+ if (free_blocks < root_blocks + 1 &&
56696 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
56697 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
56698- !in_group_p (sbi->s_resgid))) {
56699+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
56700 return 0;
56701 }
56702 return 1;
56703diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
56704index b1fc963..881228c 100644
56705--- a/fs/ext3/xattr.c
56706+++ b/fs/ext3/xattr.c
56707@@ -330,7 +330,7 @@ static int
56708 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
56709 char *buffer, size_t buffer_size)
56710 {
56711- size_t rest = buffer_size;
56712+ size_t rest = buffer_size, total_size = 0;
56713
56714 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
56715 const struct xattr_handler *handler =
56716@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
56717 buffer += size;
56718 }
56719 rest -= size;
56720+ total_size += size;
56721 }
56722 }
56723- return buffer_size - rest;
56724+ return total_size;
56725 }
56726
56727 static int
56728diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
56729index dc5d572..4c21f8e 100644
56730--- a/fs/ext4/balloc.c
56731+++ b/fs/ext4/balloc.c
56732@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
56733 /* Hm, nope. Are (enough) root reserved clusters available? */
56734 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
56735 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
56736- capable(CAP_SYS_RESOURCE) ||
56737- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
56738+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
56739+ capable_nolog(CAP_SYS_RESOURCE)) {
56740
56741 if (free_clusters >= (nclusters + dirty_clusters +
56742 resv_clusters))
56743diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
56744index af815ea..99294a6 100644
56745--- a/fs/ext4/ext4.h
56746+++ b/fs/ext4/ext4.h
56747@@ -1256,19 +1256,19 @@ struct ext4_sb_info {
56748 unsigned long s_mb_last_start;
56749
56750 /* stats for buddy allocator */
56751- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
56752- atomic_t s_bal_success; /* we found long enough chunks */
56753- atomic_t s_bal_allocated; /* in blocks */
56754- atomic_t s_bal_ex_scanned; /* total extents scanned */
56755- atomic_t s_bal_goals; /* goal hits */
56756- atomic_t s_bal_breaks; /* too long searches */
56757- atomic_t s_bal_2orders; /* 2^order hits */
56758+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
56759+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
56760+ atomic_unchecked_t s_bal_allocated; /* in blocks */
56761+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
56762+ atomic_unchecked_t s_bal_goals; /* goal hits */
56763+ atomic_unchecked_t s_bal_breaks; /* too long searches */
56764+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
56765 spinlock_t s_bal_lock;
56766 unsigned long s_mb_buddies_generated;
56767 unsigned long long s_mb_generation_time;
56768- atomic_t s_mb_lost_chunks;
56769- atomic_t s_mb_preallocated;
56770- atomic_t s_mb_discarded;
56771+ atomic_unchecked_t s_mb_lost_chunks;
56772+ atomic_unchecked_t s_mb_preallocated;
56773+ atomic_unchecked_t s_mb_discarded;
56774 atomic_t s_lock_busy;
56775
56776 /* locality groups */
56777diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
56778index a41e3ba..e574a00 100644
56779--- a/fs/ext4/mballoc.c
56780+++ b/fs/ext4/mballoc.c
56781@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
56782 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
56783
56784 if (EXT4_SB(sb)->s_mb_stats)
56785- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
56786+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
56787
56788 break;
56789 }
56790@@ -2189,7 +2189,7 @@ repeat:
56791 ac->ac_status = AC_STATUS_CONTINUE;
56792 ac->ac_flags |= EXT4_MB_HINT_FIRST;
56793 cr = 3;
56794- atomic_inc(&sbi->s_mb_lost_chunks);
56795+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
56796 goto repeat;
56797 }
56798 }
56799@@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
56800 if (sbi->s_mb_stats) {
56801 ext4_msg(sb, KERN_INFO,
56802 "mballoc: %u blocks %u reqs (%u success)",
56803- atomic_read(&sbi->s_bal_allocated),
56804- atomic_read(&sbi->s_bal_reqs),
56805- atomic_read(&sbi->s_bal_success));
56806+ atomic_read_unchecked(&sbi->s_bal_allocated),
56807+ atomic_read_unchecked(&sbi->s_bal_reqs),
56808+ atomic_read_unchecked(&sbi->s_bal_success));
56809 ext4_msg(sb, KERN_INFO,
56810 "mballoc: %u extents scanned, %u goal hits, "
56811 "%u 2^N hits, %u breaks, %u lost",
56812- atomic_read(&sbi->s_bal_ex_scanned),
56813- atomic_read(&sbi->s_bal_goals),
56814- atomic_read(&sbi->s_bal_2orders),
56815- atomic_read(&sbi->s_bal_breaks),
56816- atomic_read(&sbi->s_mb_lost_chunks));
56817+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
56818+ atomic_read_unchecked(&sbi->s_bal_goals),
56819+ atomic_read_unchecked(&sbi->s_bal_2orders),
56820+ atomic_read_unchecked(&sbi->s_bal_breaks),
56821+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
56822 ext4_msg(sb, KERN_INFO,
56823 "mballoc: %lu generated and it took %Lu",
56824 sbi->s_mb_buddies_generated,
56825 sbi->s_mb_generation_time);
56826 ext4_msg(sb, KERN_INFO,
56827 "mballoc: %u preallocated, %u discarded",
56828- atomic_read(&sbi->s_mb_preallocated),
56829- atomic_read(&sbi->s_mb_discarded));
56830+ atomic_read_unchecked(&sbi->s_mb_preallocated),
56831+ atomic_read_unchecked(&sbi->s_mb_discarded));
56832 }
56833
56834 free_percpu(sbi->s_locality_groups);
56835@@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
56836 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
56837
56838 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
56839- atomic_inc(&sbi->s_bal_reqs);
56840- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
56841+ atomic_inc_unchecked(&sbi->s_bal_reqs);
56842+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
56843 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
56844- atomic_inc(&sbi->s_bal_success);
56845- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
56846+ atomic_inc_unchecked(&sbi->s_bal_success);
56847+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
56848 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
56849 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
56850- atomic_inc(&sbi->s_bal_goals);
56851+ atomic_inc_unchecked(&sbi->s_bal_goals);
56852 if (ac->ac_found > sbi->s_mb_max_to_scan)
56853- atomic_inc(&sbi->s_bal_breaks);
56854+ atomic_inc_unchecked(&sbi->s_bal_breaks);
56855 }
56856
56857 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
56858@@ -3578,7 +3578,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
56859 trace_ext4_mb_new_inode_pa(ac, pa);
56860
56861 ext4_mb_use_inode_pa(ac, pa);
56862- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
56863+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
56864
56865 ei = EXT4_I(ac->ac_inode);
56866 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
56867@@ -3638,7 +3638,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
56868 trace_ext4_mb_new_group_pa(ac, pa);
56869
56870 ext4_mb_use_group_pa(ac, pa);
56871- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
56872+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
56873
56874 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
56875 lg = ac->ac_lg;
56876@@ -3727,7 +3727,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
56877 * from the bitmap and continue.
56878 */
56879 }
56880- atomic_add(free, &sbi->s_mb_discarded);
56881+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
56882
56883 return err;
56884 }
56885@@ -3745,7 +3745,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
56886 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
56887 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
56888 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
56889- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
56890+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
56891 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
56892
56893 return 0;
56894diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
56895index 214461e..3614c89 100644
56896--- a/fs/ext4/mmp.c
56897+++ b/fs/ext4/mmp.c
56898@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
56899 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
56900 const char *function, unsigned int line, const char *msg)
56901 {
56902- __ext4_warning(sb, function, line, msg);
56903+ __ext4_warning(sb, function, line, "%s", msg);
56904 __ext4_warning(sb, function, line,
56905 "MMP failure info: last update time: %llu, last update "
56906 "node: %s, last update device: %s\n",
56907diff --git a/fs/ext4/super.c b/fs/ext4/super.c
56908index 2c2e6cb..7c3ee62 100644
56909--- a/fs/ext4/super.c
56910+++ b/fs/ext4/super.c
56911@@ -1251,7 +1251,7 @@ static ext4_fsblk_t get_sb_block(void **data)
56912 }
56913
56914 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
56915-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
56916+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
56917 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
56918
56919 #ifdef CONFIG_QUOTA
56920@@ -2431,7 +2431,7 @@ struct ext4_attr {
56921 int offset;
56922 int deprecated_val;
56923 } u;
56924-};
56925+} __do_const;
56926
56927 static int parse_strtoull(const char *buf,
56928 unsigned long long max, unsigned long long *value)
56929diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
56930index 1423c48..9c0c6dc 100644
56931--- a/fs/ext4/xattr.c
56932+++ b/fs/ext4/xattr.c
56933@@ -381,7 +381,7 @@ static int
56934 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
56935 char *buffer, size_t buffer_size)
56936 {
56937- size_t rest = buffer_size;
56938+ size_t rest = buffer_size, total_size = 0;
56939
56940 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
56941 const struct xattr_handler *handler =
56942@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
56943 buffer += size;
56944 }
56945 rest -= size;
56946+ total_size += size;
56947 }
56948 }
56949- return buffer_size - rest;
56950+ return total_size;
56951 }
56952
56953 static int
56954diff --git a/fs/fcntl.c b/fs/fcntl.c
56955index 65343c3..9969dcf 100644
56956--- a/fs/fcntl.c
56957+++ b/fs/fcntl.c
56958@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
56959 if (err)
56960 return err;
56961
56962+ if (gr_handle_chroot_fowner(pid, type))
56963+ return -ENOENT;
56964+ if (gr_check_protected_task_fowner(pid, type))
56965+ return -EACCES;
56966+
56967 f_modown(filp, pid, type, force);
56968 return 0;
56969 }
56970diff --git a/fs/fhandle.c b/fs/fhandle.c
56971index 999ff5c..41f4109 100644
56972--- a/fs/fhandle.c
56973+++ b/fs/fhandle.c
56974@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
56975 } else
56976 retval = 0;
56977 /* copy the mount id */
56978- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
56979- sizeof(*mnt_id)) ||
56980+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
56981 copy_to_user(ufh, handle,
56982 sizeof(struct file_handle) + handle_bytes))
56983 retval = -EFAULT;
56984diff --git a/fs/file.c b/fs/file.c
56985index 4a78f98..f9a6d25 100644
56986--- a/fs/file.c
56987+++ b/fs/file.c
56988@@ -16,6 +16,7 @@
56989 #include <linux/slab.h>
56990 #include <linux/vmalloc.h>
56991 #include <linux/file.h>
56992+#include <linux/security.h>
56993 #include <linux/fdtable.h>
56994 #include <linux/bitops.h>
56995 #include <linux/interrupt.h>
56996@@ -141,7 +142,7 @@ out:
56997 * Return <0 error code on error; 1 on successful completion.
56998 * The files->file_lock should be held on entry, and will be held on exit.
56999 */
57000-static int expand_fdtable(struct files_struct *files, int nr)
57001+static int expand_fdtable(struct files_struct *files, unsigned int nr)
57002 __releases(files->file_lock)
57003 __acquires(files->file_lock)
57004 {
57005@@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
57006 * expanded and execution may have blocked.
57007 * The files->file_lock should be held on entry, and will be held on exit.
57008 */
57009-static int expand_files(struct files_struct *files, int nr)
57010+static int expand_files(struct files_struct *files, unsigned int nr)
57011 {
57012 struct fdtable *fdt;
57013
57014@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
57015 if (!file)
57016 return __close_fd(files, fd);
57017
57018+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
57019 if (fd >= rlimit(RLIMIT_NOFILE))
57020 return -EBADF;
57021
57022@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
57023 if (unlikely(oldfd == newfd))
57024 return -EINVAL;
57025
57026+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
57027 if (newfd >= rlimit(RLIMIT_NOFILE))
57028 return -EBADF;
57029
57030@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
57031 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
57032 {
57033 int err;
57034+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
57035 if (from >= rlimit(RLIMIT_NOFILE))
57036 return -EINVAL;
57037 err = alloc_fd(from, flags);
57038diff --git a/fs/filesystems.c b/fs/filesystems.c
57039index 92567d9..fcd8cbf 100644
57040--- a/fs/filesystems.c
57041+++ b/fs/filesystems.c
57042@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
57043 int len = dot ? dot - name : strlen(name);
57044
57045 fs = __get_fs_type(name, len);
57046+#ifdef CONFIG_GRKERNSEC_MODHARDEN
57047+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
57048+#else
57049 if (!fs && (request_module("fs-%.*s", len, name) == 0))
57050+#endif
57051 fs = __get_fs_type(name, len);
57052
57053 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
57054diff --git a/fs/fs_struct.c b/fs/fs_struct.c
57055index d8ac61d..79a36f0 100644
57056--- a/fs/fs_struct.c
57057+++ b/fs/fs_struct.c
57058@@ -4,6 +4,7 @@
57059 #include <linux/path.h>
57060 #include <linux/slab.h>
57061 #include <linux/fs_struct.h>
57062+#include <linux/grsecurity.h>
57063 #include "internal.h"
57064
57065 /*
57066@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
57067 write_seqcount_begin(&fs->seq);
57068 old_root = fs->root;
57069 fs->root = *path;
57070+ gr_set_chroot_entries(current, path);
57071 write_seqcount_end(&fs->seq);
57072 spin_unlock(&fs->lock);
57073 if (old_root.dentry)
57074@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
57075 int hits = 0;
57076 spin_lock(&fs->lock);
57077 write_seqcount_begin(&fs->seq);
57078+ /* this root replacement is only done by pivot_root,
57079+ leave grsec's chroot tagging alone for this task
57080+ so that a pivoted root isn't treated as a chroot
57081+ */
57082 hits += replace_path(&fs->root, old_root, new_root);
57083 hits += replace_path(&fs->pwd, old_root, new_root);
57084 write_seqcount_end(&fs->seq);
57085@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
57086 task_lock(tsk);
57087 spin_lock(&fs->lock);
57088 tsk->fs = NULL;
57089- kill = !--fs->users;
57090+ gr_clear_chroot_entries(tsk);
57091+ kill = !atomic_dec_return(&fs->users);
57092 spin_unlock(&fs->lock);
57093 task_unlock(tsk);
57094 if (kill)
57095@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
57096 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
57097 /* We don't need to lock fs - think why ;-) */
57098 if (fs) {
57099- fs->users = 1;
57100+ atomic_set(&fs->users, 1);
57101 fs->in_exec = 0;
57102 spin_lock_init(&fs->lock);
57103 seqcount_init(&fs->seq);
57104@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
57105 spin_lock(&old->lock);
57106 fs->root = old->root;
57107 path_get(&fs->root);
57108+ /* instead of calling gr_set_chroot_entries here,
57109+ we call it from every caller of this function
57110+ */
57111 fs->pwd = old->pwd;
57112 path_get(&fs->pwd);
57113 spin_unlock(&old->lock);
57114@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
57115
57116 task_lock(current);
57117 spin_lock(&fs->lock);
57118- kill = !--fs->users;
57119+ kill = !atomic_dec_return(&fs->users);
57120 current->fs = new_fs;
57121+ gr_set_chroot_entries(current, &new_fs->root);
57122 spin_unlock(&fs->lock);
57123 task_unlock(current);
57124
57125@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
57126
57127 int current_umask(void)
57128 {
57129- return current->fs->umask;
57130+ return current->fs->umask | gr_acl_umask();
57131 }
57132 EXPORT_SYMBOL(current_umask);
57133
57134 /* to be mentioned only in INIT_TASK */
57135 struct fs_struct init_fs = {
57136- .users = 1,
57137+ .users = ATOMIC_INIT(1),
57138 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
57139 .seq = SEQCNT_ZERO,
57140 .umask = 0022,
57141diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
57142index b2a86e3..37f425a 100644
57143--- a/fs/fscache/cookie.c
57144+++ b/fs/fscache/cookie.c
57145@@ -19,7 +19,7 @@
57146
57147 struct kmem_cache *fscache_cookie_jar;
57148
57149-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
57150+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
57151
57152 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
57153 static int fscache_alloc_object(struct fscache_cache *cache,
57154@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
57155 parent ? (char *) parent->def->name : "<no-parent>",
57156 def->name, netfs_data);
57157
57158- fscache_stat(&fscache_n_acquires);
57159+ fscache_stat_unchecked(&fscache_n_acquires);
57160
57161 /* if there's no parent cookie, then we don't create one here either */
57162 if (!parent) {
57163- fscache_stat(&fscache_n_acquires_null);
57164+ fscache_stat_unchecked(&fscache_n_acquires_null);
57165 _leave(" [no parent]");
57166 return NULL;
57167 }
57168@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
57169 /* allocate and initialise a cookie */
57170 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
57171 if (!cookie) {
57172- fscache_stat(&fscache_n_acquires_oom);
57173+ fscache_stat_unchecked(&fscache_n_acquires_oom);
57174 _leave(" [ENOMEM]");
57175 return NULL;
57176 }
57177@@ -114,13 +114,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
57178
57179 switch (cookie->def->type) {
57180 case FSCACHE_COOKIE_TYPE_INDEX:
57181- fscache_stat(&fscache_n_cookie_index);
57182+ fscache_stat_unchecked(&fscache_n_cookie_index);
57183 break;
57184 case FSCACHE_COOKIE_TYPE_DATAFILE:
57185- fscache_stat(&fscache_n_cookie_data);
57186+ fscache_stat_unchecked(&fscache_n_cookie_data);
57187 break;
57188 default:
57189- fscache_stat(&fscache_n_cookie_special);
57190+ fscache_stat_unchecked(&fscache_n_cookie_special);
57191 break;
57192 }
57193
57194@@ -131,13 +131,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
57195 if (fscache_acquire_non_index_cookie(cookie) < 0) {
57196 atomic_dec(&parent->n_children);
57197 __fscache_cookie_put(cookie);
57198- fscache_stat(&fscache_n_acquires_nobufs);
57199+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
57200 _leave(" = NULL");
57201 return NULL;
57202 }
57203 }
57204
57205- fscache_stat(&fscache_n_acquires_ok);
57206+ fscache_stat_unchecked(&fscache_n_acquires_ok);
57207 _leave(" = %p", cookie);
57208 return cookie;
57209 }
57210@@ -173,7 +173,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
57211 cache = fscache_select_cache_for_object(cookie->parent);
57212 if (!cache) {
57213 up_read(&fscache_addremove_sem);
57214- fscache_stat(&fscache_n_acquires_no_cache);
57215+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
57216 _leave(" = -ENOMEDIUM [no cache]");
57217 return -ENOMEDIUM;
57218 }
57219@@ -259,14 +259,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
57220 object = cache->ops->alloc_object(cache, cookie);
57221 fscache_stat_d(&fscache_n_cop_alloc_object);
57222 if (IS_ERR(object)) {
57223- fscache_stat(&fscache_n_object_no_alloc);
57224+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
57225 ret = PTR_ERR(object);
57226 goto error;
57227 }
57228
57229- fscache_stat(&fscache_n_object_alloc);
57230+ fscache_stat_unchecked(&fscache_n_object_alloc);
57231
57232- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
57233+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
57234
57235 _debug("ALLOC OBJ%x: %s {%lx}",
57236 object->debug_id, cookie->def->name, object->events);
57237@@ -380,7 +380,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
57238
57239 _enter("{%s}", cookie->def->name);
57240
57241- fscache_stat(&fscache_n_invalidates);
57242+ fscache_stat_unchecked(&fscache_n_invalidates);
57243
57244 /* Only permit invalidation of data files. Invalidating an index will
57245 * require the caller to release all its attachments to the tree rooted
57246@@ -438,10 +438,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
57247 {
57248 struct fscache_object *object;
57249
57250- fscache_stat(&fscache_n_updates);
57251+ fscache_stat_unchecked(&fscache_n_updates);
57252
57253 if (!cookie) {
57254- fscache_stat(&fscache_n_updates_null);
57255+ fscache_stat_unchecked(&fscache_n_updates_null);
57256 _leave(" [no cookie]");
57257 return;
57258 }
57259@@ -473,12 +473,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
57260 {
57261 struct fscache_object *object;
57262
57263- fscache_stat(&fscache_n_relinquishes);
57264+ fscache_stat_unchecked(&fscache_n_relinquishes);
57265 if (retire)
57266- fscache_stat(&fscache_n_relinquishes_retire);
57267+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
57268
57269 if (!cookie) {
57270- fscache_stat(&fscache_n_relinquishes_null);
57271+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
57272 _leave(" [no cookie]");
57273 return;
57274 }
57275@@ -598,7 +598,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
57276 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
57277 goto inconsistent;
57278
57279- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57280+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57281
57282 atomic_inc(&cookie->n_active);
57283 if (fscache_submit_op(object, op) < 0)
57284diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
57285index 4226f66..0fb3f45 100644
57286--- a/fs/fscache/internal.h
57287+++ b/fs/fscache/internal.h
57288@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
57289 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
57290 extern int fscache_wait_for_operation_activation(struct fscache_object *,
57291 struct fscache_operation *,
57292- atomic_t *,
57293- atomic_t *,
57294+ atomic_unchecked_t *,
57295+ atomic_unchecked_t *,
57296 void (*)(struct fscache_operation *));
57297 extern void fscache_invalidate_writes(struct fscache_cookie *);
57298
57299@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
57300 * stats.c
57301 */
57302 #ifdef CONFIG_FSCACHE_STATS
57303-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
57304-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
57305+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
57306+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
57307
57308-extern atomic_t fscache_n_op_pend;
57309-extern atomic_t fscache_n_op_run;
57310-extern atomic_t fscache_n_op_enqueue;
57311-extern atomic_t fscache_n_op_deferred_release;
57312-extern atomic_t fscache_n_op_release;
57313-extern atomic_t fscache_n_op_gc;
57314-extern atomic_t fscache_n_op_cancelled;
57315-extern atomic_t fscache_n_op_rejected;
57316+extern atomic_unchecked_t fscache_n_op_pend;
57317+extern atomic_unchecked_t fscache_n_op_run;
57318+extern atomic_unchecked_t fscache_n_op_enqueue;
57319+extern atomic_unchecked_t fscache_n_op_deferred_release;
57320+extern atomic_unchecked_t fscache_n_op_release;
57321+extern atomic_unchecked_t fscache_n_op_gc;
57322+extern atomic_unchecked_t fscache_n_op_cancelled;
57323+extern atomic_unchecked_t fscache_n_op_rejected;
57324
57325-extern atomic_t fscache_n_attr_changed;
57326-extern atomic_t fscache_n_attr_changed_ok;
57327-extern atomic_t fscache_n_attr_changed_nobufs;
57328-extern atomic_t fscache_n_attr_changed_nomem;
57329-extern atomic_t fscache_n_attr_changed_calls;
57330+extern atomic_unchecked_t fscache_n_attr_changed;
57331+extern atomic_unchecked_t fscache_n_attr_changed_ok;
57332+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
57333+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
57334+extern atomic_unchecked_t fscache_n_attr_changed_calls;
57335
57336-extern atomic_t fscache_n_allocs;
57337-extern atomic_t fscache_n_allocs_ok;
57338-extern atomic_t fscache_n_allocs_wait;
57339-extern atomic_t fscache_n_allocs_nobufs;
57340-extern atomic_t fscache_n_allocs_intr;
57341-extern atomic_t fscache_n_allocs_object_dead;
57342-extern atomic_t fscache_n_alloc_ops;
57343-extern atomic_t fscache_n_alloc_op_waits;
57344+extern atomic_unchecked_t fscache_n_allocs;
57345+extern atomic_unchecked_t fscache_n_allocs_ok;
57346+extern atomic_unchecked_t fscache_n_allocs_wait;
57347+extern atomic_unchecked_t fscache_n_allocs_nobufs;
57348+extern atomic_unchecked_t fscache_n_allocs_intr;
57349+extern atomic_unchecked_t fscache_n_allocs_object_dead;
57350+extern atomic_unchecked_t fscache_n_alloc_ops;
57351+extern atomic_unchecked_t fscache_n_alloc_op_waits;
57352
57353-extern atomic_t fscache_n_retrievals;
57354-extern atomic_t fscache_n_retrievals_ok;
57355-extern atomic_t fscache_n_retrievals_wait;
57356-extern atomic_t fscache_n_retrievals_nodata;
57357-extern atomic_t fscache_n_retrievals_nobufs;
57358-extern atomic_t fscache_n_retrievals_intr;
57359-extern atomic_t fscache_n_retrievals_nomem;
57360-extern atomic_t fscache_n_retrievals_object_dead;
57361-extern atomic_t fscache_n_retrieval_ops;
57362-extern atomic_t fscache_n_retrieval_op_waits;
57363+extern atomic_unchecked_t fscache_n_retrievals;
57364+extern atomic_unchecked_t fscache_n_retrievals_ok;
57365+extern atomic_unchecked_t fscache_n_retrievals_wait;
57366+extern atomic_unchecked_t fscache_n_retrievals_nodata;
57367+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
57368+extern atomic_unchecked_t fscache_n_retrievals_intr;
57369+extern atomic_unchecked_t fscache_n_retrievals_nomem;
57370+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
57371+extern atomic_unchecked_t fscache_n_retrieval_ops;
57372+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
57373
57374-extern atomic_t fscache_n_stores;
57375-extern atomic_t fscache_n_stores_ok;
57376-extern atomic_t fscache_n_stores_again;
57377-extern atomic_t fscache_n_stores_nobufs;
57378-extern atomic_t fscache_n_stores_oom;
57379-extern atomic_t fscache_n_store_ops;
57380-extern atomic_t fscache_n_store_calls;
57381-extern atomic_t fscache_n_store_pages;
57382-extern atomic_t fscache_n_store_radix_deletes;
57383-extern atomic_t fscache_n_store_pages_over_limit;
57384+extern atomic_unchecked_t fscache_n_stores;
57385+extern atomic_unchecked_t fscache_n_stores_ok;
57386+extern atomic_unchecked_t fscache_n_stores_again;
57387+extern atomic_unchecked_t fscache_n_stores_nobufs;
57388+extern atomic_unchecked_t fscache_n_stores_oom;
57389+extern atomic_unchecked_t fscache_n_store_ops;
57390+extern atomic_unchecked_t fscache_n_store_calls;
57391+extern atomic_unchecked_t fscache_n_store_pages;
57392+extern atomic_unchecked_t fscache_n_store_radix_deletes;
57393+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
57394
57395-extern atomic_t fscache_n_store_vmscan_not_storing;
57396-extern atomic_t fscache_n_store_vmscan_gone;
57397-extern atomic_t fscache_n_store_vmscan_busy;
57398-extern atomic_t fscache_n_store_vmscan_cancelled;
57399-extern atomic_t fscache_n_store_vmscan_wait;
57400+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
57401+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
57402+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
57403+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
57404+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
57405
57406-extern atomic_t fscache_n_marks;
57407-extern atomic_t fscache_n_uncaches;
57408+extern atomic_unchecked_t fscache_n_marks;
57409+extern atomic_unchecked_t fscache_n_uncaches;
57410
57411-extern atomic_t fscache_n_acquires;
57412-extern atomic_t fscache_n_acquires_null;
57413-extern atomic_t fscache_n_acquires_no_cache;
57414-extern atomic_t fscache_n_acquires_ok;
57415-extern atomic_t fscache_n_acquires_nobufs;
57416-extern atomic_t fscache_n_acquires_oom;
57417+extern atomic_unchecked_t fscache_n_acquires;
57418+extern atomic_unchecked_t fscache_n_acquires_null;
57419+extern atomic_unchecked_t fscache_n_acquires_no_cache;
57420+extern atomic_unchecked_t fscache_n_acquires_ok;
57421+extern atomic_unchecked_t fscache_n_acquires_nobufs;
57422+extern atomic_unchecked_t fscache_n_acquires_oom;
57423
57424-extern atomic_t fscache_n_invalidates;
57425-extern atomic_t fscache_n_invalidates_run;
57426+extern atomic_unchecked_t fscache_n_invalidates;
57427+extern atomic_unchecked_t fscache_n_invalidates_run;
57428
57429-extern atomic_t fscache_n_updates;
57430-extern atomic_t fscache_n_updates_null;
57431-extern atomic_t fscache_n_updates_run;
57432+extern atomic_unchecked_t fscache_n_updates;
57433+extern atomic_unchecked_t fscache_n_updates_null;
57434+extern atomic_unchecked_t fscache_n_updates_run;
57435
57436-extern atomic_t fscache_n_relinquishes;
57437-extern atomic_t fscache_n_relinquishes_null;
57438-extern atomic_t fscache_n_relinquishes_waitcrt;
57439-extern atomic_t fscache_n_relinquishes_retire;
57440+extern atomic_unchecked_t fscache_n_relinquishes;
57441+extern atomic_unchecked_t fscache_n_relinquishes_null;
57442+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
57443+extern atomic_unchecked_t fscache_n_relinquishes_retire;
57444
57445-extern atomic_t fscache_n_cookie_index;
57446-extern atomic_t fscache_n_cookie_data;
57447-extern atomic_t fscache_n_cookie_special;
57448+extern atomic_unchecked_t fscache_n_cookie_index;
57449+extern atomic_unchecked_t fscache_n_cookie_data;
57450+extern atomic_unchecked_t fscache_n_cookie_special;
57451
57452-extern atomic_t fscache_n_object_alloc;
57453-extern atomic_t fscache_n_object_no_alloc;
57454-extern atomic_t fscache_n_object_lookups;
57455-extern atomic_t fscache_n_object_lookups_negative;
57456-extern atomic_t fscache_n_object_lookups_positive;
57457-extern atomic_t fscache_n_object_lookups_timed_out;
57458-extern atomic_t fscache_n_object_created;
57459-extern atomic_t fscache_n_object_avail;
57460-extern atomic_t fscache_n_object_dead;
57461+extern atomic_unchecked_t fscache_n_object_alloc;
57462+extern atomic_unchecked_t fscache_n_object_no_alloc;
57463+extern atomic_unchecked_t fscache_n_object_lookups;
57464+extern atomic_unchecked_t fscache_n_object_lookups_negative;
57465+extern atomic_unchecked_t fscache_n_object_lookups_positive;
57466+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
57467+extern atomic_unchecked_t fscache_n_object_created;
57468+extern atomic_unchecked_t fscache_n_object_avail;
57469+extern atomic_unchecked_t fscache_n_object_dead;
57470
57471-extern atomic_t fscache_n_checkaux_none;
57472-extern atomic_t fscache_n_checkaux_okay;
57473-extern atomic_t fscache_n_checkaux_update;
57474-extern atomic_t fscache_n_checkaux_obsolete;
57475+extern atomic_unchecked_t fscache_n_checkaux_none;
57476+extern atomic_unchecked_t fscache_n_checkaux_okay;
57477+extern atomic_unchecked_t fscache_n_checkaux_update;
57478+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
57479
57480 extern atomic_t fscache_n_cop_alloc_object;
57481 extern atomic_t fscache_n_cop_lookup_object;
57482@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
57483 atomic_inc(stat);
57484 }
57485
57486+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
57487+{
57488+ atomic_inc_unchecked(stat);
57489+}
57490+
57491 static inline void fscache_stat_d(atomic_t *stat)
57492 {
57493 atomic_dec(stat);
57494@@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
57495
57496 #define __fscache_stat(stat) (NULL)
57497 #define fscache_stat(stat) do {} while (0)
57498+#define fscache_stat_unchecked(stat) do {} while (0)
57499 #define fscache_stat_d(stat) do {} while (0)
57500 #endif
57501
57502diff --git a/fs/fscache/object.c b/fs/fscache/object.c
57503index 86d75a6..5f3d7a0 100644
57504--- a/fs/fscache/object.c
57505+++ b/fs/fscache/object.c
57506@@ -451,7 +451,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
57507 _debug("LOOKUP \"%s\" in \"%s\"",
57508 cookie->def->name, object->cache->tag->name);
57509
57510- fscache_stat(&fscache_n_object_lookups);
57511+ fscache_stat_unchecked(&fscache_n_object_lookups);
57512 fscache_stat(&fscache_n_cop_lookup_object);
57513 ret = object->cache->ops->lookup_object(object);
57514 fscache_stat_d(&fscache_n_cop_lookup_object);
57515@@ -461,7 +461,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
57516 if (ret == -ETIMEDOUT) {
57517 /* probably stuck behind another object, so move this one to
57518 * the back of the queue */
57519- fscache_stat(&fscache_n_object_lookups_timed_out);
57520+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
57521 _leave(" [timeout]");
57522 return NO_TRANSIT;
57523 }
57524@@ -489,7 +489,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
57525 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
57526
57527 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
57528- fscache_stat(&fscache_n_object_lookups_negative);
57529+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
57530
57531 /* Allow write requests to begin stacking up and read requests to begin
57532 * returning ENODATA.
57533@@ -523,7 +523,7 @@ void fscache_obtained_object(struct fscache_object *object)
57534 /* if we were still looking up, then we must have a positive lookup
57535 * result, in which case there may be data available */
57536 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
57537- fscache_stat(&fscache_n_object_lookups_positive);
57538+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
57539
57540 /* We do (presumably) have data */
57541 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
57542@@ -534,7 +534,7 @@ void fscache_obtained_object(struct fscache_object *object)
57543 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
57544 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
57545 } else {
57546- fscache_stat(&fscache_n_object_created);
57547+ fscache_stat_unchecked(&fscache_n_object_created);
57548 }
57549
57550 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
57551@@ -570,7 +570,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
57552 fscache_stat_d(&fscache_n_cop_lookup_complete);
57553
57554 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
57555- fscache_stat(&fscache_n_object_avail);
57556+ fscache_stat_unchecked(&fscache_n_object_avail);
57557
57558 _leave("");
57559 return transit_to(JUMPSTART_DEPS);
57560@@ -716,7 +716,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
57561
57562 /* this just shifts the object release to the work processor */
57563 fscache_put_object(object);
57564- fscache_stat(&fscache_n_object_dead);
57565+ fscache_stat_unchecked(&fscache_n_object_dead);
57566
57567 _leave("");
57568 return transit_to(OBJECT_DEAD);
57569@@ -881,7 +881,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
57570 enum fscache_checkaux result;
57571
57572 if (!object->cookie->def->check_aux) {
57573- fscache_stat(&fscache_n_checkaux_none);
57574+ fscache_stat_unchecked(&fscache_n_checkaux_none);
57575 return FSCACHE_CHECKAUX_OKAY;
57576 }
57577
57578@@ -890,17 +890,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
57579 switch (result) {
57580 /* entry okay as is */
57581 case FSCACHE_CHECKAUX_OKAY:
57582- fscache_stat(&fscache_n_checkaux_okay);
57583+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
57584 break;
57585
57586 /* entry requires update */
57587 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
57588- fscache_stat(&fscache_n_checkaux_update);
57589+ fscache_stat_unchecked(&fscache_n_checkaux_update);
57590 break;
57591
57592 /* entry requires deletion */
57593 case FSCACHE_CHECKAUX_OBSOLETE:
57594- fscache_stat(&fscache_n_checkaux_obsolete);
57595+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
57596 break;
57597
57598 default:
57599@@ -986,7 +986,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
57600 {
57601 const struct fscache_state *s;
57602
57603- fscache_stat(&fscache_n_invalidates_run);
57604+ fscache_stat_unchecked(&fscache_n_invalidates_run);
57605 fscache_stat(&fscache_n_cop_invalidate_object);
57606 s = _fscache_invalidate_object(object, event);
57607 fscache_stat_d(&fscache_n_cop_invalidate_object);
57608@@ -1001,7 +1001,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
57609 {
57610 _enter("{OBJ%x},%d", object->debug_id, event);
57611
57612- fscache_stat(&fscache_n_updates_run);
57613+ fscache_stat_unchecked(&fscache_n_updates_run);
57614 fscache_stat(&fscache_n_cop_update_object);
57615 object->cache->ops->update_object(object);
57616 fscache_stat_d(&fscache_n_cop_update_object);
57617diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
57618index 318071a..379938b 100644
57619--- a/fs/fscache/operation.c
57620+++ b/fs/fscache/operation.c
57621@@ -17,7 +17,7 @@
57622 #include <linux/slab.h>
57623 #include "internal.h"
57624
57625-atomic_t fscache_op_debug_id;
57626+atomic_unchecked_t fscache_op_debug_id;
57627 EXPORT_SYMBOL(fscache_op_debug_id);
57628
57629 /**
57630@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
57631 ASSERTCMP(atomic_read(&op->usage), >, 0);
57632 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
57633
57634- fscache_stat(&fscache_n_op_enqueue);
57635+ fscache_stat_unchecked(&fscache_n_op_enqueue);
57636 switch (op->flags & FSCACHE_OP_TYPE) {
57637 case FSCACHE_OP_ASYNC:
57638 _debug("queue async");
57639@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
57640 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
57641 if (op->processor)
57642 fscache_enqueue_operation(op);
57643- fscache_stat(&fscache_n_op_run);
57644+ fscache_stat_unchecked(&fscache_n_op_run);
57645 }
57646
57647 /*
57648@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
57649 if (object->n_in_progress > 0) {
57650 atomic_inc(&op->usage);
57651 list_add_tail(&op->pend_link, &object->pending_ops);
57652- fscache_stat(&fscache_n_op_pend);
57653+ fscache_stat_unchecked(&fscache_n_op_pend);
57654 } else if (!list_empty(&object->pending_ops)) {
57655 atomic_inc(&op->usage);
57656 list_add_tail(&op->pend_link, &object->pending_ops);
57657- fscache_stat(&fscache_n_op_pend);
57658+ fscache_stat_unchecked(&fscache_n_op_pend);
57659 fscache_start_operations(object);
57660 } else {
57661 ASSERTCMP(object->n_in_progress, ==, 0);
57662@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
57663 object->n_exclusive++; /* reads and writes must wait */
57664 atomic_inc(&op->usage);
57665 list_add_tail(&op->pend_link, &object->pending_ops);
57666- fscache_stat(&fscache_n_op_pend);
57667+ fscache_stat_unchecked(&fscache_n_op_pend);
57668 ret = 0;
57669 } else {
57670 /* If we're in any other state, there must have been an I/O
57671@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
57672 if (object->n_exclusive > 0) {
57673 atomic_inc(&op->usage);
57674 list_add_tail(&op->pend_link, &object->pending_ops);
57675- fscache_stat(&fscache_n_op_pend);
57676+ fscache_stat_unchecked(&fscache_n_op_pend);
57677 } else if (!list_empty(&object->pending_ops)) {
57678 atomic_inc(&op->usage);
57679 list_add_tail(&op->pend_link, &object->pending_ops);
57680- fscache_stat(&fscache_n_op_pend);
57681+ fscache_stat_unchecked(&fscache_n_op_pend);
57682 fscache_start_operations(object);
57683 } else {
57684 ASSERTCMP(object->n_exclusive, ==, 0);
57685@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
57686 object->n_ops++;
57687 atomic_inc(&op->usage);
57688 list_add_tail(&op->pend_link, &object->pending_ops);
57689- fscache_stat(&fscache_n_op_pend);
57690+ fscache_stat_unchecked(&fscache_n_op_pend);
57691 ret = 0;
57692 } else if (fscache_object_is_dying(object)) {
57693- fscache_stat(&fscache_n_op_rejected);
57694+ fscache_stat_unchecked(&fscache_n_op_rejected);
57695 op->state = FSCACHE_OP_ST_CANCELLED;
57696 ret = -ENOBUFS;
57697 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
57698@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
57699 ret = -EBUSY;
57700 if (op->state == FSCACHE_OP_ST_PENDING) {
57701 ASSERT(!list_empty(&op->pend_link));
57702- fscache_stat(&fscache_n_op_cancelled);
57703+ fscache_stat_unchecked(&fscache_n_op_cancelled);
57704 list_del_init(&op->pend_link);
57705 if (do_cancel)
57706 do_cancel(op);
57707@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
57708 while (!list_empty(&object->pending_ops)) {
57709 op = list_entry(object->pending_ops.next,
57710 struct fscache_operation, pend_link);
57711- fscache_stat(&fscache_n_op_cancelled);
57712+ fscache_stat_unchecked(&fscache_n_op_cancelled);
57713 list_del_init(&op->pend_link);
57714
57715 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
57716@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
57717 op->state, ==, FSCACHE_OP_ST_CANCELLED);
57718 op->state = FSCACHE_OP_ST_DEAD;
57719
57720- fscache_stat(&fscache_n_op_release);
57721+ fscache_stat_unchecked(&fscache_n_op_release);
57722
57723 if (op->release) {
57724 op->release(op);
57725@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
57726 * lock, and defer it otherwise */
57727 if (!spin_trylock(&object->lock)) {
57728 _debug("defer put");
57729- fscache_stat(&fscache_n_op_deferred_release);
57730+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
57731
57732 cache = object->cache;
57733 spin_lock(&cache->op_gc_list_lock);
57734@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
57735
57736 _debug("GC DEFERRED REL OBJ%x OP%x",
57737 object->debug_id, op->debug_id);
57738- fscache_stat(&fscache_n_op_gc);
57739+ fscache_stat_unchecked(&fscache_n_op_gc);
57740
57741 ASSERTCMP(atomic_read(&op->usage), ==, 0);
57742 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
57743diff --git a/fs/fscache/page.c b/fs/fscache/page.c
57744index 73899c1..ae40c58 100644
57745--- a/fs/fscache/page.c
57746+++ b/fs/fscache/page.c
57747@@ -61,7 +61,7 @@ try_again:
57748 val = radix_tree_lookup(&cookie->stores, page->index);
57749 if (!val) {
57750 rcu_read_unlock();
57751- fscache_stat(&fscache_n_store_vmscan_not_storing);
57752+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
57753 __fscache_uncache_page(cookie, page);
57754 return true;
57755 }
57756@@ -91,11 +91,11 @@ try_again:
57757 spin_unlock(&cookie->stores_lock);
57758
57759 if (xpage) {
57760- fscache_stat(&fscache_n_store_vmscan_cancelled);
57761- fscache_stat(&fscache_n_store_radix_deletes);
57762+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
57763+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
57764 ASSERTCMP(xpage, ==, page);
57765 } else {
57766- fscache_stat(&fscache_n_store_vmscan_gone);
57767+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
57768 }
57769
57770 wake_up_bit(&cookie->flags, 0);
57771@@ -110,11 +110,11 @@ page_busy:
57772 * sleeping on memory allocation, so we may need to impose a timeout
57773 * too. */
57774 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
57775- fscache_stat(&fscache_n_store_vmscan_busy);
57776+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
57777 return false;
57778 }
57779
57780- fscache_stat(&fscache_n_store_vmscan_wait);
57781+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
57782 __fscache_wait_on_page_write(cookie, page);
57783 gfp &= ~__GFP_WAIT;
57784 goto try_again;
57785@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
57786 FSCACHE_COOKIE_STORING_TAG);
57787 if (!radix_tree_tag_get(&cookie->stores, page->index,
57788 FSCACHE_COOKIE_PENDING_TAG)) {
57789- fscache_stat(&fscache_n_store_radix_deletes);
57790+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
57791 xpage = radix_tree_delete(&cookie->stores, page->index);
57792 }
57793 spin_unlock(&cookie->stores_lock);
57794@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
57795
57796 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
57797
57798- fscache_stat(&fscache_n_attr_changed_calls);
57799+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
57800
57801 if (fscache_object_is_active(object) &&
57802 fscache_use_cookie(object)) {
57803@@ -189,11 +189,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
57804
57805 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
57806
57807- fscache_stat(&fscache_n_attr_changed);
57808+ fscache_stat_unchecked(&fscache_n_attr_changed);
57809
57810 op = kzalloc(sizeof(*op), GFP_KERNEL);
57811 if (!op) {
57812- fscache_stat(&fscache_n_attr_changed_nomem);
57813+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
57814 _leave(" = -ENOMEM");
57815 return -ENOMEM;
57816 }
57817@@ -211,7 +211,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
57818 if (fscache_submit_exclusive_op(object, op) < 0)
57819 goto nobufs;
57820 spin_unlock(&cookie->lock);
57821- fscache_stat(&fscache_n_attr_changed_ok);
57822+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
57823 fscache_put_operation(op);
57824 _leave(" = 0");
57825 return 0;
57826@@ -219,7 +219,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
57827 nobufs:
57828 spin_unlock(&cookie->lock);
57829 kfree(op);
57830- fscache_stat(&fscache_n_attr_changed_nobufs);
57831+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
57832 _leave(" = %d", -ENOBUFS);
57833 return -ENOBUFS;
57834 }
57835@@ -258,7 +258,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
57836 /* allocate a retrieval operation and attempt to submit it */
57837 op = kzalloc(sizeof(*op), GFP_NOIO);
57838 if (!op) {
57839- fscache_stat(&fscache_n_retrievals_nomem);
57840+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
57841 return NULL;
57842 }
57843
57844@@ -289,13 +289,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
57845 return 0;
57846 }
57847
57848- fscache_stat(&fscache_n_retrievals_wait);
57849+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
57850
57851 jif = jiffies;
57852 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
57853 fscache_wait_bit_interruptible,
57854 TASK_INTERRUPTIBLE) != 0) {
57855- fscache_stat(&fscache_n_retrievals_intr);
57856+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
57857 _leave(" = -ERESTARTSYS");
57858 return -ERESTARTSYS;
57859 }
57860@@ -324,8 +324,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
57861 */
57862 int fscache_wait_for_operation_activation(struct fscache_object *object,
57863 struct fscache_operation *op,
57864- atomic_t *stat_op_waits,
57865- atomic_t *stat_object_dead,
57866+ atomic_unchecked_t *stat_op_waits,
57867+ atomic_unchecked_t *stat_object_dead,
57868 void (*do_cancel)(struct fscache_operation *))
57869 {
57870 int ret;
57871@@ -335,7 +335,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
57872
57873 _debug(">>> WT");
57874 if (stat_op_waits)
57875- fscache_stat(stat_op_waits);
57876+ fscache_stat_unchecked(stat_op_waits);
57877 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
57878 fscache_wait_bit_interruptible,
57879 TASK_INTERRUPTIBLE) != 0) {
57880@@ -353,7 +353,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
57881 check_if_dead:
57882 if (op->state == FSCACHE_OP_ST_CANCELLED) {
57883 if (stat_object_dead)
57884- fscache_stat(stat_object_dead);
57885+ fscache_stat_unchecked(stat_object_dead);
57886 _leave(" = -ENOBUFS [cancelled]");
57887 return -ENOBUFS;
57888 }
57889@@ -361,7 +361,7 @@ check_if_dead:
57890 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
57891 fscache_cancel_op(op, do_cancel);
57892 if (stat_object_dead)
57893- fscache_stat(stat_object_dead);
57894+ fscache_stat_unchecked(stat_object_dead);
57895 return -ENOBUFS;
57896 }
57897 return 0;
57898@@ -388,7 +388,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
57899
57900 _enter("%p,%p,,,", cookie, page);
57901
57902- fscache_stat(&fscache_n_retrievals);
57903+ fscache_stat_unchecked(&fscache_n_retrievals);
57904
57905 if (hlist_empty(&cookie->backing_objects))
57906 goto nobufs;
57907@@ -428,7 +428,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
57908 goto nobufs_unlock_dec;
57909 spin_unlock(&cookie->lock);
57910
57911- fscache_stat(&fscache_n_retrieval_ops);
57912+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
57913
57914 /* pin the netfs read context in case we need to do the actual netfs
57915 * read because we've encountered a cache read failure */
57916@@ -459,15 +459,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
57917
57918 error:
57919 if (ret == -ENOMEM)
57920- fscache_stat(&fscache_n_retrievals_nomem);
57921+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
57922 else if (ret == -ERESTARTSYS)
57923- fscache_stat(&fscache_n_retrievals_intr);
57924+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
57925 else if (ret == -ENODATA)
57926- fscache_stat(&fscache_n_retrievals_nodata);
57927+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
57928 else if (ret < 0)
57929- fscache_stat(&fscache_n_retrievals_nobufs);
57930+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57931 else
57932- fscache_stat(&fscache_n_retrievals_ok);
57933+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
57934
57935 fscache_put_retrieval(op);
57936 _leave(" = %d", ret);
57937@@ -480,7 +480,7 @@ nobufs_unlock:
57938 atomic_dec(&cookie->n_active);
57939 kfree(op);
57940 nobufs:
57941- fscache_stat(&fscache_n_retrievals_nobufs);
57942+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57943 _leave(" = -ENOBUFS");
57944 return -ENOBUFS;
57945 }
57946@@ -518,7 +518,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
57947
57948 _enter("%p,,%d,,,", cookie, *nr_pages);
57949
57950- fscache_stat(&fscache_n_retrievals);
57951+ fscache_stat_unchecked(&fscache_n_retrievals);
57952
57953 if (hlist_empty(&cookie->backing_objects))
57954 goto nobufs;
57955@@ -554,7 +554,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
57956 goto nobufs_unlock_dec;
57957 spin_unlock(&cookie->lock);
57958
57959- fscache_stat(&fscache_n_retrieval_ops);
57960+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
57961
57962 /* pin the netfs read context in case we need to do the actual netfs
57963 * read because we've encountered a cache read failure */
57964@@ -585,15 +585,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
57965
57966 error:
57967 if (ret == -ENOMEM)
57968- fscache_stat(&fscache_n_retrievals_nomem);
57969+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
57970 else if (ret == -ERESTARTSYS)
57971- fscache_stat(&fscache_n_retrievals_intr);
57972+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
57973 else if (ret == -ENODATA)
57974- fscache_stat(&fscache_n_retrievals_nodata);
57975+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
57976 else if (ret < 0)
57977- fscache_stat(&fscache_n_retrievals_nobufs);
57978+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57979 else
57980- fscache_stat(&fscache_n_retrievals_ok);
57981+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
57982
57983 fscache_put_retrieval(op);
57984 _leave(" = %d", ret);
57985@@ -606,7 +606,7 @@ nobufs_unlock:
57986 atomic_dec(&cookie->n_active);
57987 kfree(op);
57988 nobufs:
57989- fscache_stat(&fscache_n_retrievals_nobufs);
57990+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57991 _leave(" = -ENOBUFS");
57992 return -ENOBUFS;
57993 }
57994@@ -630,7 +630,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
57995
57996 _enter("%p,%p,,,", cookie, page);
57997
57998- fscache_stat(&fscache_n_allocs);
57999+ fscache_stat_unchecked(&fscache_n_allocs);
58000
58001 if (hlist_empty(&cookie->backing_objects))
58002 goto nobufs;
58003@@ -662,7 +662,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
58004 goto nobufs_unlock;
58005 spin_unlock(&cookie->lock);
58006
58007- fscache_stat(&fscache_n_alloc_ops);
58008+ fscache_stat_unchecked(&fscache_n_alloc_ops);
58009
58010 ret = fscache_wait_for_operation_activation(
58011 object, &op->op,
58012@@ -679,11 +679,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
58013
58014 error:
58015 if (ret == -ERESTARTSYS)
58016- fscache_stat(&fscache_n_allocs_intr);
58017+ fscache_stat_unchecked(&fscache_n_allocs_intr);
58018 else if (ret < 0)
58019- fscache_stat(&fscache_n_allocs_nobufs);
58020+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
58021 else
58022- fscache_stat(&fscache_n_allocs_ok);
58023+ fscache_stat_unchecked(&fscache_n_allocs_ok);
58024
58025 fscache_put_retrieval(op);
58026 _leave(" = %d", ret);
58027@@ -694,7 +694,7 @@ nobufs_unlock:
58028 atomic_dec(&cookie->n_active);
58029 kfree(op);
58030 nobufs:
58031- fscache_stat(&fscache_n_allocs_nobufs);
58032+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
58033 _leave(" = -ENOBUFS");
58034 return -ENOBUFS;
58035 }
58036@@ -770,7 +770,7 @@ static void fscache_write_op(struct fscache_operation *_op)
58037
58038 spin_lock(&cookie->stores_lock);
58039
58040- fscache_stat(&fscache_n_store_calls);
58041+ fscache_stat_unchecked(&fscache_n_store_calls);
58042
58043 /* find a page to store */
58044 page = NULL;
58045@@ -781,7 +781,7 @@ static void fscache_write_op(struct fscache_operation *_op)
58046 page = results[0];
58047 _debug("gang %d [%lx]", n, page->index);
58048 if (page->index > op->store_limit) {
58049- fscache_stat(&fscache_n_store_pages_over_limit);
58050+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
58051 goto superseded;
58052 }
58053
58054@@ -793,7 +793,7 @@ static void fscache_write_op(struct fscache_operation *_op)
58055 spin_unlock(&cookie->stores_lock);
58056 spin_unlock(&object->lock);
58057
58058- fscache_stat(&fscache_n_store_pages);
58059+ fscache_stat_unchecked(&fscache_n_store_pages);
58060 fscache_stat(&fscache_n_cop_write_page);
58061 ret = object->cache->ops->write_page(op, page);
58062 fscache_stat_d(&fscache_n_cop_write_page);
58063@@ -896,7 +896,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58064 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
58065 ASSERT(PageFsCache(page));
58066
58067- fscache_stat(&fscache_n_stores);
58068+ fscache_stat_unchecked(&fscache_n_stores);
58069
58070 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
58071 _leave(" = -ENOBUFS [invalidating]");
58072@@ -954,7 +954,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58073 spin_unlock(&cookie->stores_lock);
58074 spin_unlock(&object->lock);
58075
58076- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
58077+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58078 op->store_limit = object->store_limit;
58079
58080 atomic_inc(&cookie->n_active);
58081@@ -963,8 +963,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58082
58083 spin_unlock(&cookie->lock);
58084 radix_tree_preload_end();
58085- fscache_stat(&fscache_n_store_ops);
58086- fscache_stat(&fscache_n_stores_ok);
58087+ fscache_stat_unchecked(&fscache_n_store_ops);
58088+ fscache_stat_unchecked(&fscache_n_stores_ok);
58089
58090 /* the work queue now carries its own ref on the object */
58091 fscache_put_operation(&op->op);
58092@@ -972,14 +972,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58093 return 0;
58094
58095 already_queued:
58096- fscache_stat(&fscache_n_stores_again);
58097+ fscache_stat_unchecked(&fscache_n_stores_again);
58098 already_pending:
58099 spin_unlock(&cookie->stores_lock);
58100 spin_unlock(&object->lock);
58101 spin_unlock(&cookie->lock);
58102 radix_tree_preload_end();
58103 kfree(op);
58104- fscache_stat(&fscache_n_stores_ok);
58105+ fscache_stat_unchecked(&fscache_n_stores_ok);
58106 _leave(" = 0");
58107 return 0;
58108
58109@@ -999,14 +999,14 @@ nobufs:
58110 spin_unlock(&cookie->lock);
58111 radix_tree_preload_end();
58112 kfree(op);
58113- fscache_stat(&fscache_n_stores_nobufs);
58114+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
58115 _leave(" = -ENOBUFS");
58116 return -ENOBUFS;
58117
58118 nomem_free:
58119 kfree(op);
58120 nomem:
58121- fscache_stat(&fscache_n_stores_oom);
58122+ fscache_stat_unchecked(&fscache_n_stores_oom);
58123 _leave(" = -ENOMEM");
58124 return -ENOMEM;
58125 }
58126@@ -1024,7 +1024,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
58127 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
58128 ASSERTCMP(page, !=, NULL);
58129
58130- fscache_stat(&fscache_n_uncaches);
58131+ fscache_stat_unchecked(&fscache_n_uncaches);
58132
58133 /* cache withdrawal may beat us to it */
58134 if (!PageFsCache(page))
58135@@ -1075,7 +1075,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
58136 struct fscache_cookie *cookie = op->op.object->cookie;
58137
58138 #ifdef CONFIG_FSCACHE_STATS
58139- atomic_inc(&fscache_n_marks);
58140+ atomic_inc_unchecked(&fscache_n_marks);
58141 #endif
58142
58143 _debug("- mark %p{%lx}", page, page->index);
58144diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
58145index 40d13c7..ddf52b9 100644
58146--- a/fs/fscache/stats.c
58147+++ b/fs/fscache/stats.c
58148@@ -18,99 +18,99 @@
58149 /*
58150 * operation counters
58151 */
58152-atomic_t fscache_n_op_pend;
58153-atomic_t fscache_n_op_run;
58154-atomic_t fscache_n_op_enqueue;
58155-atomic_t fscache_n_op_requeue;
58156-atomic_t fscache_n_op_deferred_release;
58157-atomic_t fscache_n_op_release;
58158-atomic_t fscache_n_op_gc;
58159-atomic_t fscache_n_op_cancelled;
58160-atomic_t fscache_n_op_rejected;
58161+atomic_unchecked_t fscache_n_op_pend;
58162+atomic_unchecked_t fscache_n_op_run;
58163+atomic_unchecked_t fscache_n_op_enqueue;
58164+atomic_unchecked_t fscache_n_op_requeue;
58165+atomic_unchecked_t fscache_n_op_deferred_release;
58166+atomic_unchecked_t fscache_n_op_release;
58167+atomic_unchecked_t fscache_n_op_gc;
58168+atomic_unchecked_t fscache_n_op_cancelled;
58169+atomic_unchecked_t fscache_n_op_rejected;
58170
58171-atomic_t fscache_n_attr_changed;
58172-atomic_t fscache_n_attr_changed_ok;
58173-atomic_t fscache_n_attr_changed_nobufs;
58174-atomic_t fscache_n_attr_changed_nomem;
58175-atomic_t fscache_n_attr_changed_calls;
58176+atomic_unchecked_t fscache_n_attr_changed;
58177+atomic_unchecked_t fscache_n_attr_changed_ok;
58178+atomic_unchecked_t fscache_n_attr_changed_nobufs;
58179+atomic_unchecked_t fscache_n_attr_changed_nomem;
58180+atomic_unchecked_t fscache_n_attr_changed_calls;
58181
58182-atomic_t fscache_n_allocs;
58183-atomic_t fscache_n_allocs_ok;
58184-atomic_t fscache_n_allocs_wait;
58185-atomic_t fscache_n_allocs_nobufs;
58186-atomic_t fscache_n_allocs_intr;
58187-atomic_t fscache_n_allocs_object_dead;
58188-atomic_t fscache_n_alloc_ops;
58189-atomic_t fscache_n_alloc_op_waits;
58190+atomic_unchecked_t fscache_n_allocs;
58191+atomic_unchecked_t fscache_n_allocs_ok;
58192+atomic_unchecked_t fscache_n_allocs_wait;
58193+atomic_unchecked_t fscache_n_allocs_nobufs;
58194+atomic_unchecked_t fscache_n_allocs_intr;
58195+atomic_unchecked_t fscache_n_allocs_object_dead;
58196+atomic_unchecked_t fscache_n_alloc_ops;
58197+atomic_unchecked_t fscache_n_alloc_op_waits;
58198
58199-atomic_t fscache_n_retrievals;
58200-atomic_t fscache_n_retrievals_ok;
58201-atomic_t fscache_n_retrievals_wait;
58202-atomic_t fscache_n_retrievals_nodata;
58203-atomic_t fscache_n_retrievals_nobufs;
58204-atomic_t fscache_n_retrievals_intr;
58205-atomic_t fscache_n_retrievals_nomem;
58206-atomic_t fscache_n_retrievals_object_dead;
58207-atomic_t fscache_n_retrieval_ops;
58208-atomic_t fscache_n_retrieval_op_waits;
58209+atomic_unchecked_t fscache_n_retrievals;
58210+atomic_unchecked_t fscache_n_retrievals_ok;
58211+atomic_unchecked_t fscache_n_retrievals_wait;
58212+atomic_unchecked_t fscache_n_retrievals_nodata;
58213+atomic_unchecked_t fscache_n_retrievals_nobufs;
58214+atomic_unchecked_t fscache_n_retrievals_intr;
58215+atomic_unchecked_t fscache_n_retrievals_nomem;
58216+atomic_unchecked_t fscache_n_retrievals_object_dead;
58217+atomic_unchecked_t fscache_n_retrieval_ops;
58218+atomic_unchecked_t fscache_n_retrieval_op_waits;
58219
58220-atomic_t fscache_n_stores;
58221-atomic_t fscache_n_stores_ok;
58222-atomic_t fscache_n_stores_again;
58223-atomic_t fscache_n_stores_nobufs;
58224-atomic_t fscache_n_stores_oom;
58225-atomic_t fscache_n_store_ops;
58226-atomic_t fscache_n_store_calls;
58227-atomic_t fscache_n_store_pages;
58228-atomic_t fscache_n_store_radix_deletes;
58229-atomic_t fscache_n_store_pages_over_limit;
58230+atomic_unchecked_t fscache_n_stores;
58231+atomic_unchecked_t fscache_n_stores_ok;
58232+atomic_unchecked_t fscache_n_stores_again;
58233+atomic_unchecked_t fscache_n_stores_nobufs;
58234+atomic_unchecked_t fscache_n_stores_oom;
58235+atomic_unchecked_t fscache_n_store_ops;
58236+atomic_unchecked_t fscache_n_store_calls;
58237+atomic_unchecked_t fscache_n_store_pages;
58238+atomic_unchecked_t fscache_n_store_radix_deletes;
58239+atomic_unchecked_t fscache_n_store_pages_over_limit;
58240
58241-atomic_t fscache_n_store_vmscan_not_storing;
58242-atomic_t fscache_n_store_vmscan_gone;
58243-atomic_t fscache_n_store_vmscan_busy;
58244-atomic_t fscache_n_store_vmscan_cancelled;
58245-atomic_t fscache_n_store_vmscan_wait;
58246+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
58247+atomic_unchecked_t fscache_n_store_vmscan_gone;
58248+atomic_unchecked_t fscache_n_store_vmscan_busy;
58249+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
58250+atomic_unchecked_t fscache_n_store_vmscan_wait;
58251
58252-atomic_t fscache_n_marks;
58253-atomic_t fscache_n_uncaches;
58254+atomic_unchecked_t fscache_n_marks;
58255+atomic_unchecked_t fscache_n_uncaches;
58256
58257-atomic_t fscache_n_acquires;
58258-atomic_t fscache_n_acquires_null;
58259-atomic_t fscache_n_acquires_no_cache;
58260-atomic_t fscache_n_acquires_ok;
58261-atomic_t fscache_n_acquires_nobufs;
58262-atomic_t fscache_n_acquires_oom;
58263+atomic_unchecked_t fscache_n_acquires;
58264+atomic_unchecked_t fscache_n_acquires_null;
58265+atomic_unchecked_t fscache_n_acquires_no_cache;
58266+atomic_unchecked_t fscache_n_acquires_ok;
58267+atomic_unchecked_t fscache_n_acquires_nobufs;
58268+atomic_unchecked_t fscache_n_acquires_oom;
58269
58270-atomic_t fscache_n_invalidates;
58271-atomic_t fscache_n_invalidates_run;
58272+atomic_unchecked_t fscache_n_invalidates;
58273+atomic_unchecked_t fscache_n_invalidates_run;
58274
58275-atomic_t fscache_n_updates;
58276-atomic_t fscache_n_updates_null;
58277-atomic_t fscache_n_updates_run;
58278+atomic_unchecked_t fscache_n_updates;
58279+atomic_unchecked_t fscache_n_updates_null;
58280+atomic_unchecked_t fscache_n_updates_run;
58281
58282-atomic_t fscache_n_relinquishes;
58283-atomic_t fscache_n_relinquishes_null;
58284-atomic_t fscache_n_relinquishes_waitcrt;
58285-atomic_t fscache_n_relinquishes_retire;
58286+atomic_unchecked_t fscache_n_relinquishes;
58287+atomic_unchecked_t fscache_n_relinquishes_null;
58288+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
58289+atomic_unchecked_t fscache_n_relinquishes_retire;
58290
58291-atomic_t fscache_n_cookie_index;
58292-atomic_t fscache_n_cookie_data;
58293-atomic_t fscache_n_cookie_special;
58294+atomic_unchecked_t fscache_n_cookie_index;
58295+atomic_unchecked_t fscache_n_cookie_data;
58296+atomic_unchecked_t fscache_n_cookie_special;
58297
58298-atomic_t fscache_n_object_alloc;
58299-atomic_t fscache_n_object_no_alloc;
58300-atomic_t fscache_n_object_lookups;
58301-atomic_t fscache_n_object_lookups_negative;
58302-atomic_t fscache_n_object_lookups_positive;
58303-atomic_t fscache_n_object_lookups_timed_out;
58304-atomic_t fscache_n_object_created;
58305-atomic_t fscache_n_object_avail;
58306-atomic_t fscache_n_object_dead;
58307+atomic_unchecked_t fscache_n_object_alloc;
58308+atomic_unchecked_t fscache_n_object_no_alloc;
58309+atomic_unchecked_t fscache_n_object_lookups;
58310+atomic_unchecked_t fscache_n_object_lookups_negative;
58311+atomic_unchecked_t fscache_n_object_lookups_positive;
58312+atomic_unchecked_t fscache_n_object_lookups_timed_out;
58313+atomic_unchecked_t fscache_n_object_created;
58314+atomic_unchecked_t fscache_n_object_avail;
58315+atomic_unchecked_t fscache_n_object_dead;
58316
58317-atomic_t fscache_n_checkaux_none;
58318-atomic_t fscache_n_checkaux_okay;
58319-atomic_t fscache_n_checkaux_update;
58320-atomic_t fscache_n_checkaux_obsolete;
58321+atomic_unchecked_t fscache_n_checkaux_none;
58322+atomic_unchecked_t fscache_n_checkaux_okay;
58323+atomic_unchecked_t fscache_n_checkaux_update;
58324+atomic_unchecked_t fscache_n_checkaux_obsolete;
58325
58326 atomic_t fscache_n_cop_alloc_object;
58327 atomic_t fscache_n_cop_lookup_object;
58328@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
58329 seq_puts(m, "FS-Cache statistics\n");
58330
58331 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
58332- atomic_read(&fscache_n_cookie_index),
58333- atomic_read(&fscache_n_cookie_data),
58334- atomic_read(&fscache_n_cookie_special));
58335+ atomic_read_unchecked(&fscache_n_cookie_index),
58336+ atomic_read_unchecked(&fscache_n_cookie_data),
58337+ atomic_read_unchecked(&fscache_n_cookie_special));
58338
58339 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
58340- atomic_read(&fscache_n_object_alloc),
58341- atomic_read(&fscache_n_object_no_alloc),
58342- atomic_read(&fscache_n_object_avail),
58343- atomic_read(&fscache_n_object_dead));
58344+ atomic_read_unchecked(&fscache_n_object_alloc),
58345+ atomic_read_unchecked(&fscache_n_object_no_alloc),
58346+ atomic_read_unchecked(&fscache_n_object_avail),
58347+ atomic_read_unchecked(&fscache_n_object_dead));
58348 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
58349- atomic_read(&fscache_n_checkaux_none),
58350- atomic_read(&fscache_n_checkaux_okay),
58351- atomic_read(&fscache_n_checkaux_update),
58352- atomic_read(&fscache_n_checkaux_obsolete));
58353+ atomic_read_unchecked(&fscache_n_checkaux_none),
58354+ atomic_read_unchecked(&fscache_n_checkaux_okay),
58355+ atomic_read_unchecked(&fscache_n_checkaux_update),
58356+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
58357
58358 seq_printf(m, "Pages : mrk=%u unc=%u\n",
58359- atomic_read(&fscache_n_marks),
58360- atomic_read(&fscache_n_uncaches));
58361+ atomic_read_unchecked(&fscache_n_marks),
58362+ atomic_read_unchecked(&fscache_n_uncaches));
58363
58364 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
58365 " oom=%u\n",
58366- atomic_read(&fscache_n_acquires),
58367- atomic_read(&fscache_n_acquires_null),
58368- atomic_read(&fscache_n_acquires_no_cache),
58369- atomic_read(&fscache_n_acquires_ok),
58370- atomic_read(&fscache_n_acquires_nobufs),
58371- atomic_read(&fscache_n_acquires_oom));
58372+ atomic_read_unchecked(&fscache_n_acquires),
58373+ atomic_read_unchecked(&fscache_n_acquires_null),
58374+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
58375+ atomic_read_unchecked(&fscache_n_acquires_ok),
58376+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
58377+ atomic_read_unchecked(&fscache_n_acquires_oom));
58378
58379 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
58380- atomic_read(&fscache_n_object_lookups),
58381- atomic_read(&fscache_n_object_lookups_negative),
58382- atomic_read(&fscache_n_object_lookups_positive),
58383- atomic_read(&fscache_n_object_created),
58384- atomic_read(&fscache_n_object_lookups_timed_out));
58385+ atomic_read_unchecked(&fscache_n_object_lookups),
58386+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
58387+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
58388+ atomic_read_unchecked(&fscache_n_object_created),
58389+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
58390
58391 seq_printf(m, "Invals : n=%u run=%u\n",
58392- atomic_read(&fscache_n_invalidates),
58393- atomic_read(&fscache_n_invalidates_run));
58394+ atomic_read_unchecked(&fscache_n_invalidates),
58395+ atomic_read_unchecked(&fscache_n_invalidates_run));
58396
58397 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
58398- atomic_read(&fscache_n_updates),
58399- atomic_read(&fscache_n_updates_null),
58400- atomic_read(&fscache_n_updates_run));
58401+ atomic_read_unchecked(&fscache_n_updates),
58402+ atomic_read_unchecked(&fscache_n_updates_null),
58403+ atomic_read_unchecked(&fscache_n_updates_run));
58404
58405 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
58406- atomic_read(&fscache_n_relinquishes),
58407- atomic_read(&fscache_n_relinquishes_null),
58408- atomic_read(&fscache_n_relinquishes_waitcrt),
58409- atomic_read(&fscache_n_relinquishes_retire));
58410+ atomic_read_unchecked(&fscache_n_relinquishes),
58411+ atomic_read_unchecked(&fscache_n_relinquishes_null),
58412+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
58413+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
58414
58415 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
58416- atomic_read(&fscache_n_attr_changed),
58417- atomic_read(&fscache_n_attr_changed_ok),
58418- atomic_read(&fscache_n_attr_changed_nobufs),
58419- atomic_read(&fscache_n_attr_changed_nomem),
58420- atomic_read(&fscache_n_attr_changed_calls));
58421+ atomic_read_unchecked(&fscache_n_attr_changed),
58422+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
58423+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
58424+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
58425+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
58426
58427 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
58428- atomic_read(&fscache_n_allocs),
58429- atomic_read(&fscache_n_allocs_ok),
58430- atomic_read(&fscache_n_allocs_wait),
58431- atomic_read(&fscache_n_allocs_nobufs),
58432- atomic_read(&fscache_n_allocs_intr));
58433+ atomic_read_unchecked(&fscache_n_allocs),
58434+ atomic_read_unchecked(&fscache_n_allocs_ok),
58435+ atomic_read_unchecked(&fscache_n_allocs_wait),
58436+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
58437+ atomic_read_unchecked(&fscache_n_allocs_intr));
58438 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
58439- atomic_read(&fscache_n_alloc_ops),
58440- atomic_read(&fscache_n_alloc_op_waits),
58441- atomic_read(&fscache_n_allocs_object_dead));
58442+ atomic_read_unchecked(&fscache_n_alloc_ops),
58443+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
58444+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
58445
58446 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
58447 " int=%u oom=%u\n",
58448- atomic_read(&fscache_n_retrievals),
58449- atomic_read(&fscache_n_retrievals_ok),
58450- atomic_read(&fscache_n_retrievals_wait),
58451- atomic_read(&fscache_n_retrievals_nodata),
58452- atomic_read(&fscache_n_retrievals_nobufs),
58453- atomic_read(&fscache_n_retrievals_intr),
58454- atomic_read(&fscache_n_retrievals_nomem));
58455+ atomic_read_unchecked(&fscache_n_retrievals),
58456+ atomic_read_unchecked(&fscache_n_retrievals_ok),
58457+ atomic_read_unchecked(&fscache_n_retrievals_wait),
58458+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
58459+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
58460+ atomic_read_unchecked(&fscache_n_retrievals_intr),
58461+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
58462 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
58463- atomic_read(&fscache_n_retrieval_ops),
58464- atomic_read(&fscache_n_retrieval_op_waits),
58465- atomic_read(&fscache_n_retrievals_object_dead));
58466+ atomic_read_unchecked(&fscache_n_retrieval_ops),
58467+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
58468+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
58469
58470 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
58471- atomic_read(&fscache_n_stores),
58472- atomic_read(&fscache_n_stores_ok),
58473- atomic_read(&fscache_n_stores_again),
58474- atomic_read(&fscache_n_stores_nobufs),
58475- atomic_read(&fscache_n_stores_oom));
58476+ atomic_read_unchecked(&fscache_n_stores),
58477+ atomic_read_unchecked(&fscache_n_stores_ok),
58478+ atomic_read_unchecked(&fscache_n_stores_again),
58479+ atomic_read_unchecked(&fscache_n_stores_nobufs),
58480+ atomic_read_unchecked(&fscache_n_stores_oom));
58481 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
58482- atomic_read(&fscache_n_store_ops),
58483- atomic_read(&fscache_n_store_calls),
58484- atomic_read(&fscache_n_store_pages),
58485- atomic_read(&fscache_n_store_radix_deletes),
58486- atomic_read(&fscache_n_store_pages_over_limit));
58487+ atomic_read_unchecked(&fscache_n_store_ops),
58488+ atomic_read_unchecked(&fscache_n_store_calls),
58489+ atomic_read_unchecked(&fscache_n_store_pages),
58490+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
58491+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
58492
58493 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
58494- atomic_read(&fscache_n_store_vmscan_not_storing),
58495- atomic_read(&fscache_n_store_vmscan_gone),
58496- atomic_read(&fscache_n_store_vmscan_busy),
58497- atomic_read(&fscache_n_store_vmscan_cancelled),
58498- atomic_read(&fscache_n_store_vmscan_wait));
58499+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
58500+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
58501+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
58502+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
58503+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
58504
58505 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
58506- atomic_read(&fscache_n_op_pend),
58507- atomic_read(&fscache_n_op_run),
58508- atomic_read(&fscache_n_op_enqueue),
58509- atomic_read(&fscache_n_op_cancelled),
58510- atomic_read(&fscache_n_op_rejected));
58511+ atomic_read_unchecked(&fscache_n_op_pend),
58512+ atomic_read_unchecked(&fscache_n_op_run),
58513+ atomic_read_unchecked(&fscache_n_op_enqueue),
58514+ atomic_read_unchecked(&fscache_n_op_cancelled),
58515+ atomic_read_unchecked(&fscache_n_op_rejected));
58516 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
58517- atomic_read(&fscache_n_op_deferred_release),
58518- atomic_read(&fscache_n_op_release),
58519- atomic_read(&fscache_n_op_gc));
58520+ atomic_read_unchecked(&fscache_n_op_deferred_release),
58521+ atomic_read_unchecked(&fscache_n_op_release),
58522+ atomic_read_unchecked(&fscache_n_op_gc));
58523
58524 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
58525 atomic_read(&fscache_n_cop_alloc_object),
58526diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
58527index adbfd66..4b25822 100644
58528--- a/fs/fuse/cuse.c
58529+++ b/fs/fuse/cuse.c
58530@@ -603,10 +603,12 @@ static int __init cuse_init(void)
58531 INIT_LIST_HEAD(&cuse_conntbl[i]);
58532
58533 /* inherit and extend fuse_dev_operations */
58534- cuse_channel_fops = fuse_dev_operations;
58535- cuse_channel_fops.owner = THIS_MODULE;
58536- cuse_channel_fops.open = cuse_channel_open;
58537- cuse_channel_fops.release = cuse_channel_release;
58538+ pax_open_kernel();
58539+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
58540+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
58541+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
58542+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
58543+ pax_close_kernel();
58544
58545 cuse_class = class_create(THIS_MODULE, "cuse");
58546 if (IS_ERR(cuse_class))
58547diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
58548index ef74ad5..c9ac759e 100644
58549--- a/fs/fuse/dev.c
58550+++ b/fs/fuse/dev.c
58551@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
58552 ret = 0;
58553 pipe_lock(pipe);
58554
58555- if (!pipe->readers) {
58556+ if (!atomic_read(&pipe->readers)) {
58557 send_sig(SIGPIPE, current, 0);
58558 if (!ret)
58559 ret = -EPIPE;
58560@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
58561 page_nr++;
58562 ret += buf->len;
58563
58564- if (pipe->files)
58565+ if (atomic_read(&pipe->files))
58566 do_wakeup = 1;
58567 }
58568
58569diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
58570index b7989f2..1f72ec4 100644
58571--- a/fs/fuse/dir.c
58572+++ b/fs/fuse/dir.c
58573@@ -1438,7 +1438,7 @@ static char *read_link(struct dentry *dentry)
58574 return link;
58575 }
58576
58577-static void free_link(char *link)
58578+static void free_link(const char *link)
58579 {
58580 if (!IS_ERR(link))
58581 free_page((unsigned long) link);
58582diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
58583index 1298766..c964c60 100644
58584--- a/fs/gfs2/inode.c
58585+++ b/fs/gfs2/inode.c
58586@@ -1515,7 +1515,7 @@ out:
58587
58588 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58589 {
58590- char *s = nd_get_link(nd);
58591+ const char *s = nd_get_link(nd);
58592 if (!IS_ERR(s))
58593 kfree(s);
58594 }
58595diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
58596index 2543728..14d7bd4 100644
58597--- a/fs/hostfs/hostfs_kern.c
58598+++ b/fs/hostfs/hostfs_kern.c
58599@@ -904,7 +904,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58600
58601 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
58602 {
58603- char *s = nd_get_link(nd);
58604+ const char *s = nd_get_link(nd);
58605 if (!IS_ERR(s))
58606 __putname(s);
58607 }
58608diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
58609index d19b30a..ef89c36 100644
58610--- a/fs/hugetlbfs/inode.c
58611+++ b/fs/hugetlbfs/inode.c
58612@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58613 struct mm_struct *mm = current->mm;
58614 struct vm_area_struct *vma;
58615 struct hstate *h = hstate_file(file);
58616+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
58617 struct vm_unmapped_area_info info;
58618
58619 if (len & ~huge_page_mask(h))
58620@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58621 return addr;
58622 }
58623
58624+#ifdef CONFIG_PAX_RANDMMAP
58625+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
58626+#endif
58627+
58628 if (addr) {
58629 addr = ALIGN(addr, huge_page_size(h));
58630 vma = find_vma(mm, addr);
58631- if (TASK_SIZE - len >= addr &&
58632- (!vma || addr + len <= vma->vm_start))
58633+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
58634 return addr;
58635 }
58636
58637 info.flags = 0;
58638 info.length = len;
58639 info.low_limit = TASK_UNMAPPED_BASE;
58640+
58641+#ifdef CONFIG_PAX_RANDMMAP
58642+ if (mm->pax_flags & MF_PAX_RANDMMAP)
58643+ info.low_limit += mm->delta_mmap;
58644+#endif
58645+
58646 info.high_limit = TASK_SIZE;
58647 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
58648 info.align_offset = 0;
58649@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
58650 };
58651 MODULE_ALIAS_FS("hugetlbfs");
58652
58653-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58654+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58655
58656 static int can_do_hugetlb_shm(void)
58657 {
58658diff --git a/fs/inode.c b/fs/inode.c
58659index b33ba8e..3c79a47 100644
58660--- a/fs/inode.c
58661+++ b/fs/inode.c
58662@@ -849,8 +849,8 @@ unsigned int get_next_ino(void)
58663
58664 #ifdef CONFIG_SMP
58665 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
58666- static atomic_t shared_last_ino;
58667- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
58668+ static atomic_unchecked_t shared_last_ino;
58669+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
58670
58671 res = next - LAST_INO_BATCH;
58672 }
58673diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
58674index 4a6cf28..d3a29d3 100644
58675--- a/fs/jffs2/erase.c
58676+++ b/fs/jffs2/erase.c
58677@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
58678 struct jffs2_unknown_node marker = {
58679 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
58680 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
58681- .totlen = cpu_to_je32(c->cleanmarker_size)
58682+ .totlen = cpu_to_je32(c->cleanmarker_size),
58683+ .hdr_crc = cpu_to_je32(0)
58684 };
58685
58686 jffs2_prealloc_raw_node_refs(c, jeb, 1);
58687diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
58688index a6597d6..41b30ec 100644
58689--- a/fs/jffs2/wbuf.c
58690+++ b/fs/jffs2/wbuf.c
58691@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
58692 {
58693 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
58694 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
58695- .totlen = constant_cpu_to_je32(8)
58696+ .totlen = constant_cpu_to_je32(8),
58697+ .hdr_crc = constant_cpu_to_je32(0)
58698 };
58699
58700 /*
58701diff --git a/fs/jfs/super.c b/fs/jfs/super.c
58702index 6669aa2..36b033d 100644
58703--- a/fs/jfs/super.c
58704+++ b/fs/jfs/super.c
58705@@ -882,7 +882,7 @@ static int __init init_jfs_fs(void)
58706
58707 jfs_inode_cachep =
58708 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
58709- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
58710+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
58711 init_once);
58712 if (jfs_inode_cachep == NULL)
58713 return -ENOMEM;
58714diff --git a/fs/libfs.c b/fs/libfs.c
58715index 193e0c2..7404665 100644
58716--- a/fs/libfs.c
58717+++ b/fs/libfs.c
58718@@ -150,6 +150,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
58719
58720 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
58721 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
58722+ char d_name[sizeof(next->d_iname)];
58723+ const unsigned char *name;
58724+
58725 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
58726 if (!simple_positive(next)) {
58727 spin_unlock(&next->d_lock);
58728@@ -158,7 +161,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
58729
58730 spin_unlock(&next->d_lock);
58731 spin_unlock(&dentry->d_lock);
58732- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
58733+ name = next->d_name.name;
58734+ if (name == next->d_iname) {
58735+ memcpy(d_name, name, next->d_name.len);
58736+ name = d_name;
58737+ }
58738+ if (!dir_emit(ctx, name, next->d_name.len,
58739 next->d_inode->i_ino, dt_type(next->d_inode)))
58740 return 0;
58741 spin_lock(&dentry->d_lock);
58742diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
58743index acd3947..1f896e2 100644
58744--- a/fs/lockd/clntproc.c
58745+++ b/fs/lockd/clntproc.c
58746@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
58747 /*
58748 * Cookie counter for NLM requests
58749 */
58750-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
58751+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
58752
58753 void nlmclnt_next_cookie(struct nlm_cookie *c)
58754 {
58755- u32 cookie = atomic_inc_return(&nlm_cookie);
58756+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
58757
58758 memcpy(c->data, &cookie, 4);
58759 c->len=4;
58760diff --git a/fs/locks.c b/fs/locks.c
58761index b27a300..4156d0b 100644
58762--- a/fs/locks.c
58763+++ b/fs/locks.c
58764@@ -2183,16 +2183,16 @@ void locks_remove_flock(struct file *filp)
58765 return;
58766
58767 if (filp->f_op && filp->f_op->flock) {
58768- struct file_lock fl = {
58769+ struct file_lock flock = {
58770 .fl_pid = current->tgid,
58771 .fl_file = filp,
58772 .fl_flags = FL_FLOCK,
58773 .fl_type = F_UNLCK,
58774 .fl_end = OFFSET_MAX,
58775 };
58776- filp->f_op->flock(filp, F_SETLKW, &fl);
58777- if (fl.fl_ops && fl.fl_ops->fl_release_private)
58778- fl.fl_ops->fl_release_private(&fl);
58779+ filp->f_op->flock(filp, F_SETLKW, &flock);
58780+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
58781+ flock.fl_ops->fl_release_private(&flock);
58782 }
58783
58784 spin_lock(&inode->i_lock);
58785diff --git a/fs/namei.c b/fs/namei.c
58786index 23ac50f..c6757a5 100644
58787--- a/fs/namei.c
58788+++ b/fs/namei.c
58789@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
58790 if (ret != -EACCES)
58791 return ret;
58792
58793+#ifdef CONFIG_GRKERNSEC
58794+ /* we'll block if we have to log due to a denied capability use */
58795+ if (mask & MAY_NOT_BLOCK)
58796+ return -ECHILD;
58797+#endif
58798+
58799 if (S_ISDIR(inode->i_mode)) {
58800 /* DACs are overridable for directories */
58801- if (inode_capable(inode, CAP_DAC_OVERRIDE))
58802- return 0;
58803 if (!(mask & MAY_WRITE))
58804- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
58805+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
58806+ inode_capable(inode, CAP_DAC_READ_SEARCH))
58807 return 0;
58808+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
58809+ return 0;
58810 return -EACCES;
58811 }
58812 /*
58813+ * Searching includes executable on directories, else just read.
58814+ */
58815+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
58816+ if (mask == MAY_READ)
58817+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
58818+ inode_capable(inode, CAP_DAC_READ_SEARCH))
58819+ return 0;
58820+
58821+ /*
58822 * Read/write DACs are always overridable.
58823 * Executable DACs are overridable when there is
58824 * at least one exec bit set.
58825@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
58826 if (inode_capable(inode, CAP_DAC_OVERRIDE))
58827 return 0;
58828
58829- /*
58830- * Searching includes executable on directories, else just read.
58831- */
58832- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
58833- if (mask == MAY_READ)
58834- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
58835- return 0;
58836-
58837 return -EACCES;
58838 }
58839
58840@@ -821,7 +829,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
58841 {
58842 struct dentry *dentry = link->dentry;
58843 int error;
58844- char *s;
58845+ const char *s;
58846
58847 BUG_ON(nd->flags & LOOKUP_RCU);
58848
58849@@ -842,6 +850,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
58850 if (error)
58851 goto out_put_nd_path;
58852
58853+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
58854+ dentry->d_inode, dentry, nd->path.mnt)) {
58855+ error = -EACCES;
58856+ goto out_put_nd_path;
58857+ }
58858+
58859 nd->last_type = LAST_BIND;
58860 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
58861 error = PTR_ERR(*p);
58862@@ -1602,6 +1616,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
58863 if (res)
58864 break;
58865 res = walk_component(nd, path, LOOKUP_FOLLOW);
58866+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
58867+ res = -EACCES;
58868 put_link(nd, &link, cookie);
58869 } while (res > 0);
58870
58871@@ -1700,7 +1716,7 @@ EXPORT_SYMBOL(full_name_hash);
58872 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
58873 {
58874 unsigned long a, b, adata, bdata, mask, hash, len;
58875- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
58876+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
58877
58878 hash = a = 0;
58879 len = -sizeof(unsigned long);
58880@@ -1981,6 +1997,8 @@ static int path_lookupat(int dfd, const char *name,
58881 if (err)
58882 break;
58883 err = lookup_last(nd, &path);
58884+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
58885+ err = -EACCES;
58886 put_link(nd, &link, cookie);
58887 }
58888 }
58889@@ -1988,6 +2006,13 @@ static int path_lookupat(int dfd, const char *name,
58890 if (!err)
58891 err = complete_walk(nd);
58892
58893+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
58894+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
58895+ path_put(&nd->path);
58896+ err = -ENOENT;
58897+ }
58898+ }
58899+
58900 if (!err && nd->flags & LOOKUP_DIRECTORY) {
58901 if (!can_lookup(nd->inode)) {
58902 path_put(&nd->path);
58903@@ -2015,8 +2040,15 @@ static int filename_lookup(int dfd, struct filename *name,
58904 retval = path_lookupat(dfd, name->name,
58905 flags | LOOKUP_REVAL, nd);
58906
58907- if (likely(!retval))
58908+ if (likely(!retval)) {
58909 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
58910+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
58911+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
58912+ path_put(&nd->path);
58913+ return -ENOENT;
58914+ }
58915+ }
58916+ }
58917 return retval;
58918 }
58919
58920@@ -2587,6 +2619,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
58921 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
58922 return -EPERM;
58923
58924+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
58925+ return -EPERM;
58926+ if (gr_handle_rawio(inode))
58927+ return -EPERM;
58928+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
58929+ return -EACCES;
58930+
58931 return 0;
58932 }
58933
58934@@ -2818,7 +2857,7 @@ looked_up:
58935 * cleared otherwise prior to returning.
58936 */
58937 static int lookup_open(struct nameidata *nd, struct path *path,
58938- struct file *file,
58939+ struct path *link, struct file *file,
58940 const struct open_flags *op,
58941 bool got_write, int *opened)
58942 {
58943@@ -2853,6 +2892,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
58944 /* Negative dentry, just create the file */
58945 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
58946 umode_t mode = op->mode;
58947+
58948+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
58949+ error = -EACCES;
58950+ goto out_dput;
58951+ }
58952+
58953+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
58954+ error = -EACCES;
58955+ goto out_dput;
58956+ }
58957+
58958 if (!IS_POSIXACL(dir->d_inode))
58959 mode &= ~current_umask();
58960 /*
58961@@ -2874,6 +2924,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
58962 nd->flags & LOOKUP_EXCL);
58963 if (error)
58964 goto out_dput;
58965+ else
58966+ gr_handle_create(dentry, nd->path.mnt);
58967 }
58968 out_no_open:
58969 path->dentry = dentry;
58970@@ -2888,7 +2940,7 @@ out_dput:
58971 /*
58972 * Handle the last step of open()
58973 */
58974-static int do_last(struct nameidata *nd, struct path *path,
58975+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
58976 struct file *file, const struct open_flags *op,
58977 int *opened, struct filename *name)
58978 {
58979@@ -2938,6 +2990,15 @@ static int do_last(struct nameidata *nd, struct path *path,
58980 if (error)
58981 return error;
58982
58983+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
58984+ error = -ENOENT;
58985+ goto out;
58986+ }
58987+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
58988+ error = -EACCES;
58989+ goto out;
58990+ }
58991+
58992 audit_inode(name, dir, LOOKUP_PARENT);
58993 error = -EISDIR;
58994 /* trailing slashes? */
58995@@ -2957,7 +3018,7 @@ retry_lookup:
58996 */
58997 }
58998 mutex_lock(&dir->d_inode->i_mutex);
58999- error = lookup_open(nd, path, file, op, got_write, opened);
59000+ error = lookup_open(nd, path, link, file, op, got_write, opened);
59001 mutex_unlock(&dir->d_inode->i_mutex);
59002
59003 if (error <= 0) {
59004@@ -2981,11 +3042,28 @@ retry_lookup:
59005 goto finish_open_created;
59006 }
59007
59008+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
59009+ error = -ENOENT;
59010+ goto exit_dput;
59011+ }
59012+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
59013+ error = -EACCES;
59014+ goto exit_dput;
59015+ }
59016+
59017 /*
59018 * create/update audit record if it already exists.
59019 */
59020- if (path->dentry->d_inode)
59021+ if (path->dentry->d_inode) {
59022+ /* only check if O_CREAT is specified, all other checks need to go
59023+ into may_open */
59024+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
59025+ error = -EACCES;
59026+ goto exit_dput;
59027+ }
59028+
59029 audit_inode(name, path->dentry, 0);
59030+ }
59031
59032 /*
59033 * If atomic_open() acquired write access it is dropped now due to
59034@@ -3026,6 +3104,11 @@ finish_lookup:
59035 }
59036 }
59037 BUG_ON(inode != path->dentry->d_inode);
59038+ /* if we're resolving a symlink to another symlink */
59039+ if (link && gr_handle_symlink_owner(link, inode)) {
59040+ error = -EACCES;
59041+ goto out;
59042+ }
59043 return 1;
59044 }
59045
59046@@ -3035,7 +3118,6 @@ finish_lookup:
59047 save_parent.dentry = nd->path.dentry;
59048 save_parent.mnt = mntget(path->mnt);
59049 nd->path.dentry = path->dentry;
59050-
59051 }
59052 nd->inode = inode;
59053 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
59054@@ -3045,7 +3127,18 @@ finish_open:
59055 path_put(&save_parent);
59056 return error;
59057 }
59058+
59059+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
59060+ error = -ENOENT;
59061+ goto out;
59062+ }
59063+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
59064+ error = -EACCES;
59065+ goto out;
59066+ }
59067+
59068 audit_inode(name, nd->path.dentry, 0);
59069+
59070 error = -EISDIR;
59071 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
59072 goto out;
59073@@ -3208,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
59074 if (unlikely(error))
59075 goto out;
59076
59077- error = do_last(nd, &path, file, op, &opened, pathname);
59078+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
59079 while (unlikely(error > 0)) { /* trailing symlink */
59080 struct path link = path;
59081 void *cookie;
59082@@ -3226,7 +3319,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
59083 error = follow_link(&link, nd, &cookie);
59084 if (unlikely(error))
59085 break;
59086- error = do_last(nd, &path, file, op, &opened, pathname);
59087+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
59088 put_link(nd, &link, cookie);
59089 }
59090 out:
59091@@ -3326,8 +3419,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
59092 goto unlock;
59093
59094 error = -EEXIST;
59095- if (dentry->d_inode)
59096+ if (dentry->d_inode) {
59097+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
59098+ error = -ENOENT;
59099+ }
59100 goto fail;
59101+ }
59102 /*
59103 * Special case - lookup gave negative, but... we had foo/bar/
59104 * From the vfs_mknod() POV we just have a negative dentry -
59105@@ -3379,6 +3476,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
59106 }
59107 EXPORT_SYMBOL(user_path_create);
59108
59109+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
59110+{
59111+ struct filename *tmp = getname(pathname);
59112+ struct dentry *res;
59113+ if (IS_ERR(tmp))
59114+ return ERR_CAST(tmp);
59115+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
59116+ if (IS_ERR(res))
59117+ putname(tmp);
59118+ else
59119+ *to = tmp;
59120+ return res;
59121+}
59122+
59123 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
59124 {
59125 int error = may_create(dir, dentry);
59126@@ -3441,6 +3552,17 @@ retry:
59127
59128 if (!IS_POSIXACL(path.dentry->d_inode))
59129 mode &= ~current_umask();
59130+
59131+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
59132+ error = -EPERM;
59133+ goto out;
59134+ }
59135+
59136+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
59137+ error = -EACCES;
59138+ goto out;
59139+ }
59140+
59141 error = security_path_mknod(&path, dentry, mode, dev);
59142 if (error)
59143 goto out;
59144@@ -3457,6 +3579,8 @@ retry:
59145 break;
59146 }
59147 out:
59148+ if (!error)
59149+ gr_handle_create(dentry, path.mnt);
59150 done_path_create(&path, dentry);
59151 if (retry_estale(error, lookup_flags)) {
59152 lookup_flags |= LOOKUP_REVAL;
59153@@ -3509,9 +3633,16 @@ retry:
59154
59155 if (!IS_POSIXACL(path.dentry->d_inode))
59156 mode &= ~current_umask();
59157+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
59158+ error = -EACCES;
59159+ goto out;
59160+ }
59161 error = security_path_mkdir(&path, dentry, mode);
59162 if (!error)
59163 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
59164+ if (!error)
59165+ gr_handle_create(dentry, path.mnt);
59166+out:
59167 done_path_create(&path, dentry);
59168 if (retry_estale(error, lookup_flags)) {
59169 lookup_flags |= LOOKUP_REVAL;
59170@@ -3592,6 +3723,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
59171 struct filename *name;
59172 struct dentry *dentry;
59173 struct nameidata nd;
59174+ ino_t saved_ino = 0;
59175+ dev_t saved_dev = 0;
59176 unsigned int lookup_flags = 0;
59177 retry:
59178 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
59179@@ -3624,10 +3757,21 @@ retry:
59180 error = -ENOENT;
59181 goto exit3;
59182 }
59183+
59184+ saved_ino = dentry->d_inode->i_ino;
59185+ saved_dev = gr_get_dev_from_dentry(dentry);
59186+
59187+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
59188+ error = -EACCES;
59189+ goto exit3;
59190+ }
59191+
59192 error = security_path_rmdir(&nd.path, dentry);
59193 if (error)
59194 goto exit3;
59195 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
59196+ if (!error && (saved_dev || saved_ino))
59197+ gr_handle_delete(saved_ino, saved_dev);
59198 exit3:
59199 dput(dentry);
59200 exit2:
59201@@ -3693,6 +3837,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
59202 struct dentry *dentry;
59203 struct nameidata nd;
59204 struct inode *inode = NULL;
59205+ ino_t saved_ino = 0;
59206+ dev_t saved_dev = 0;
59207 unsigned int lookup_flags = 0;
59208 retry:
59209 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
59210@@ -3719,10 +3865,22 @@ retry:
59211 if (!inode)
59212 goto slashes;
59213 ihold(inode);
59214+
59215+ if (inode->i_nlink <= 1) {
59216+ saved_ino = inode->i_ino;
59217+ saved_dev = gr_get_dev_from_dentry(dentry);
59218+ }
59219+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
59220+ error = -EACCES;
59221+ goto exit2;
59222+ }
59223+
59224 error = security_path_unlink(&nd.path, dentry);
59225 if (error)
59226 goto exit2;
59227 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
59228+ if (!error && (saved_ino || saved_dev))
59229+ gr_handle_delete(saved_ino, saved_dev);
59230 exit2:
59231 dput(dentry);
59232 }
59233@@ -3800,9 +3958,17 @@ retry:
59234 if (IS_ERR(dentry))
59235 goto out_putname;
59236
59237+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
59238+ error = -EACCES;
59239+ goto out;
59240+ }
59241+
59242 error = security_path_symlink(&path, dentry, from->name);
59243 if (!error)
59244 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
59245+ if (!error)
59246+ gr_handle_create(dentry, path.mnt);
59247+out:
59248 done_path_create(&path, dentry);
59249 if (retry_estale(error, lookup_flags)) {
59250 lookup_flags |= LOOKUP_REVAL;
59251@@ -3882,6 +4048,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
59252 {
59253 struct dentry *new_dentry;
59254 struct path old_path, new_path;
59255+ struct filename *to = NULL;
59256 int how = 0;
59257 int error;
59258
59259@@ -3905,7 +4072,7 @@ retry:
59260 if (error)
59261 return error;
59262
59263- new_dentry = user_path_create(newdfd, newname, &new_path,
59264+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
59265 (how & LOOKUP_REVAL));
59266 error = PTR_ERR(new_dentry);
59267 if (IS_ERR(new_dentry))
59268@@ -3917,11 +4084,28 @@ retry:
59269 error = may_linkat(&old_path);
59270 if (unlikely(error))
59271 goto out_dput;
59272+
59273+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
59274+ old_path.dentry->d_inode,
59275+ old_path.dentry->d_inode->i_mode, to)) {
59276+ error = -EACCES;
59277+ goto out_dput;
59278+ }
59279+
59280+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
59281+ old_path.dentry, old_path.mnt, to)) {
59282+ error = -EACCES;
59283+ goto out_dput;
59284+ }
59285+
59286 error = security_path_link(old_path.dentry, &new_path, new_dentry);
59287 if (error)
59288 goto out_dput;
59289 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
59290+ if (!error)
59291+ gr_handle_create(new_dentry, new_path.mnt);
59292 out_dput:
59293+ putname(to);
59294 done_path_create(&new_path, new_dentry);
59295 if (retry_estale(error, how)) {
59296 how |= LOOKUP_REVAL;
59297@@ -4167,12 +4351,21 @@ retry:
59298 if (new_dentry == trap)
59299 goto exit5;
59300
59301+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
59302+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
59303+ to);
59304+ if (error)
59305+ goto exit5;
59306+
59307 error = security_path_rename(&oldnd.path, old_dentry,
59308 &newnd.path, new_dentry);
59309 if (error)
59310 goto exit5;
59311 error = vfs_rename(old_dir->d_inode, old_dentry,
59312 new_dir->d_inode, new_dentry);
59313+ if (!error)
59314+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
59315+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
59316 exit5:
59317 dput(new_dentry);
59318 exit4:
59319@@ -4204,6 +4397,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
59320
59321 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
59322 {
59323+ char tmpbuf[64];
59324+ const char *newlink;
59325 int len;
59326
59327 len = PTR_ERR(link);
59328@@ -4213,7 +4408,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
59329 len = strlen(link);
59330 if (len > (unsigned) buflen)
59331 len = buflen;
59332- if (copy_to_user(buffer, link, len))
59333+
59334+ if (len < sizeof(tmpbuf)) {
59335+ memcpy(tmpbuf, link, len);
59336+ newlink = tmpbuf;
59337+ } else
59338+ newlink = link;
59339+
59340+ if (copy_to_user(buffer, newlink, len))
59341 len = -EFAULT;
59342 out:
59343 return len;
59344diff --git a/fs/namespace.c b/fs/namespace.c
59345index da5c494..a755a54 100644
59346--- a/fs/namespace.c
59347+++ b/fs/namespace.c
59348@@ -1268,6 +1268,9 @@ static int do_umount(struct mount *mnt, int flags)
59349 if (!(sb->s_flags & MS_RDONLY))
59350 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
59351 up_write(&sb->s_umount);
59352+
59353+ gr_log_remount(mnt->mnt_devname, retval);
59354+
59355 return retval;
59356 }
59357
59358@@ -1286,6 +1289,9 @@ static int do_umount(struct mount *mnt, int flags)
59359 }
59360 br_write_unlock(&vfsmount_lock);
59361 namespace_unlock();
59362+
59363+ gr_log_unmount(mnt->mnt_devname, retval);
59364+
59365 return retval;
59366 }
59367
59368@@ -1305,7 +1311,7 @@ static inline bool may_mount(void)
59369 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
59370 */
59371
59372-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
59373+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
59374 {
59375 struct path path;
59376 struct mount *mnt;
59377@@ -1347,7 +1353,7 @@ out:
59378 /*
59379 * The 2.0 compatible umount. No flags.
59380 */
59381-SYSCALL_DEFINE1(oldumount, char __user *, name)
59382+SYSCALL_DEFINE1(oldumount, const char __user *, name)
59383 {
59384 return sys_umount(name, 0);
59385 }
59386@@ -2358,6 +2364,16 @@ long do_mount(const char *dev_name, const char *dir_name,
59387 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
59388 MS_STRICTATIME);
59389
59390+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
59391+ retval = -EPERM;
59392+ goto dput_out;
59393+ }
59394+
59395+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
59396+ retval = -EPERM;
59397+ goto dput_out;
59398+ }
59399+
59400 if (flags & MS_REMOUNT)
59401 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
59402 data_page);
59403@@ -2372,6 +2388,9 @@ long do_mount(const char *dev_name, const char *dir_name,
59404 dev_name, data_page);
59405 dput_out:
59406 path_put(&path);
59407+
59408+ gr_log_mount(dev_name, dir_name, retval);
59409+
59410 return retval;
59411 }
59412
59413@@ -2389,7 +2408,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
59414 * number incrementing at 10Ghz will take 12,427 years to wrap which
59415 * is effectively never, so we can ignore the possibility.
59416 */
59417-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
59418+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
59419
59420 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
59421 {
59422@@ -2404,7 +2423,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
59423 kfree(new_ns);
59424 return ERR_PTR(ret);
59425 }
59426- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
59427+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
59428 atomic_set(&new_ns->count, 1);
59429 new_ns->root = NULL;
59430 INIT_LIST_HEAD(&new_ns->list);
59431@@ -2418,7 +2437,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
59432 * Allocate a new namespace structure and populate it with contents
59433 * copied from the namespace of the passed in task structure.
59434 */
59435-static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
59436+static __latent_entropy struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
59437 struct user_namespace *user_ns, struct fs_struct *fs)
59438 {
59439 struct mnt_namespace *new_ns;
59440@@ -2549,8 +2568,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
59441 }
59442 EXPORT_SYMBOL(mount_subtree);
59443
59444-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
59445- char __user *, type, unsigned long, flags, void __user *, data)
59446+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
59447+ const char __user *, type, unsigned long, flags, void __user *, data)
59448 {
59449 int ret;
59450 char *kernel_type;
59451@@ -2663,6 +2682,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
59452 if (error)
59453 goto out2;
59454
59455+ if (gr_handle_chroot_pivot()) {
59456+ error = -EPERM;
59457+ goto out2;
59458+ }
59459+
59460 get_fs_root(current->fs, &root);
59461 old_mp = lock_mount(&old);
59462 error = PTR_ERR(old_mp);
59463@@ -2932,7 +2956,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
59464 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
59465 return -EPERM;
59466
59467- if (fs->users != 1)
59468+ if (atomic_read(&fs->users) != 1)
59469 return -EINVAL;
59470
59471 get_mnt_ns(mnt_ns);
59472diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
59473index f4ccfe6..a5cf064 100644
59474--- a/fs/nfs/callback_xdr.c
59475+++ b/fs/nfs/callback_xdr.c
59476@@ -51,7 +51,7 @@ struct callback_op {
59477 callback_decode_arg_t decode_args;
59478 callback_encode_res_t encode_res;
59479 long res_maxsize;
59480-};
59481+} __do_const;
59482
59483 static struct callback_op callback_ops[];
59484
59485diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
59486index eda8879..bfc6837 100644
59487--- a/fs/nfs/inode.c
59488+++ b/fs/nfs/inode.c
59489@@ -1150,16 +1150,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
59490 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
59491 }
59492
59493-static atomic_long_t nfs_attr_generation_counter;
59494+static atomic_long_unchecked_t nfs_attr_generation_counter;
59495
59496 static unsigned long nfs_read_attr_generation_counter(void)
59497 {
59498- return atomic_long_read(&nfs_attr_generation_counter);
59499+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
59500 }
59501
59502 unsigned long nfs_inc_attr_generation_counter(void)
59503 {
59504- return atomic_long_inc_return(&nfs_attr_generation_counter);
59505+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
59506 }
59507
59508 void nfs_fattr_init(struct nfs_fattr *fattr)
59509diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
59510index 419572f..5414a23 100644
59511--- a/fs/nfsd/nfs4proc.c
59512+++ b/fs/nfsd/nfs4proc.c
59513@@ -1168,7 +1168,7 @@ struct nfsd4_operation {
59514 nfsd4op_rsize op_rsize_bop;
59515 stateid_getter op_get_currentstateid;
59516 stateid_setter op_set_currentstateid;
59517-};
59518+} __do_const;
59519
59520 static struct nfsd4_operation nfsd4_ops[];
59521
59522diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
59523index ecc735e..79b2d31 100644
59524--- a/fs/nfsd/nfs4xdr.c
59525+++ b/fs/nfsd/nfs4xdr.c
59526@@ -1500,7 +1500,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
59527
59528 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
59529
59530-static nfsd4_dec nfsd4_dec_ops[] = {
59531+static const nfsd4_dec nfsd4_dec_ops[] = {
59532 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
59533 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
59534 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
59535@@ -1540,7 +1540,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
59536 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
59537 };
59538
59539-static nfsd4_dec nfsd41_dec_ops[] = {
59540+static const nfsd4_dec nfsd41_dec_ops[] = {
59541 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
59542 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
59543 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
59544@@ -1602,7 +1602,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
59545 };
59546
59547 struct nfsd4_minorversion_ops {
59548- nfsd4_dec *decoders;
59549+ const nfsd4_dec *decoders;
59550 int nops;
59551 };
59552
59553diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
59554index b6af150..f6ec5e3 100644
59555--- a/fs/nfsd/nfscache.c
59556+++ b/fs/nfsd/nfscache.c
59557@@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
59558 {
59559 struct svc_cacherep *rp = rqstp->rq_cacherep;
59560 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
59561- int len;
59562+ long len;
59563 size_t bufsize = 0;
59564
59565 if (!rp)
59566 return;
59567
59568- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
59569- len >>= 2;
59570+ if (statp) {
59571+ len = (char*)statp - (char*)resv->iov_base;
59572+ len = resv->iov_len - len;
59573+ len >>= 2;
59574+ }
59575
59576 /* Don't cache excessive amounts of data and XDR failures */
59577 if (!statp || len > (256 >> 2)) {
59578diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
59579index 72cb28e..5b5f87d 100644
59580--- a/fs/nfsd/vfs.c
59581+++ b/fs/nfsd/vfs.c
59582@@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
59583 } else {
59584 oldfs = get_fs();
59585 set_fs(KERNEL_DS);
59586- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
59587+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
59588 set_fs(oldfs);
59589 }
59590
59591@@ -1080,7 +1080,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
59592
59593 /* Write the data. */
59594 oldfs = get_fs(); set_fs(KERNEL_DS);
59595- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
59596+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
59597 set_fs(oldfs);
59598 if (host_err < 0)
59599 goto out_nfserr;
59600@@ -1626,7 +1626,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
59601 */
59602
59603 oldfs = get_fs(); set_fs(KERNEL_DS);
59604- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
59605+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
59606 set_fs(oldfs);
59607
59608 if (host_err < 0)
59609diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
59610index fea6bd5..8ee9d81 100644
59611--- a/fs/nls/nls_base.c
59612+++ b/fs/nls/nls_base.c
59613@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
59614
59615 int register_nls(struct nls_table * nls)
59616 {
59617- struct nls_table ** tmp = &tables;
59618+ struct nls_table *tmp = tables;
59619
59620 if (nls->next)
59621 return -EBUSY;
59622
59623 spin_lock(&nls_lock);
59624- while (*tmp) {
59625- if (nls == *tmp) {
59626+ while (tmp) {
59627+ if (nls == tmp) {
59628 spin_unlock(&nls_lock);
59629 return -EBUSY;
59630 }
59631- tmp = &(*tmp)->next;
59632+ tmp = tmp->next;
59633 }
59634- nls->next = tables;
59635+ pax_open_kernel();
59636+ *(struct nls_table **)&nls->next = tables;
59637+ pax_close_kernel();
59638 tables = nls;
59639 spin_unlock(&nls_lock);
59640 return 0;
59641@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
59642
59643 int unregister_nls(struct nls_table * nls)
59644 {
59645- struct nls_table ** tmp = &tables;
59646+ struct nls_table * const * tmp = &tables;
59647
59648 spin_lock(&nls_lock);
59649 while (*tmp) {
59650 if (nls == *tmp) {
59651- *tmp = nls->next;
59652+ pax_open_kernel();
59653+ *(struct nls_table **)tmp = nls->next;
59654+ pax_close_kernel();
59655 spin_unlock(&nls_lock);
59656 return 0;
59657 }
59658diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
59659index 7424929..35f6be5 100644
59660--- a/fs/nls/nls_euc-jp.c
59661+++ b/fs/nls/nls_euc-jp.c
59662@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
59663 p_nls = load_nls("cp932");
59664
59665 if (p_nls) {
59666- table.charset2upper = p_nls->charset2upper;
59667- table.charset2lower = p_nls->charset2lower;
59668+ pax_open_kernel();
59669+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
59670+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
59671+ pax_close_kernel();
59672 return register_nls(&table);
59673 }
59674
59675diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
59676index e7bc1d7..06bd4bb 100644
59677--- a/fs/nls/nls_koi8-ru.c
59678+++ b/fs/nls/nls_koi8-ru.c
59679@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
59680 p_nls = load_nls("koi8-u");
59681
59682 if (p_nls) {
59683- table.charset2upper = p_nls->charset2upper;
59684- table.charset2lower = p_nls->charset2lower;
59685+ pax_open_kernel();
59686+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
59687+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
59688+ pax_close_kernel();
59689 return register_nls(&table);
59690 }
59691
59692diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
59693index e44cb64..4807084 100644
59694--- a/fs/notify/fanotify/fanotify_user.c
59695+++ b/fs/notify/fanotify/fanotify_user.c
59696@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
59697
59698 fd = fanotify_event_metadata.fd;
59699 ret = -EFAULT;
59700- if (copy_to_user(buf, &fanotify_event_metadata,
59701- fanotify_event_metadata.event_len))
59702+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
59703+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
59704 goto out_close_fd;
59705
59706 ret = prepare_for_access_response(group, event, fd);
59707diff --git a/fs/notify/notification.c b/fs/notify/notification.c
59708index 7b51b05..5ea5ef6 100644
59709--- a/fs/notify/notification.c
59710+++ b/fs/notify/notification.c
59711@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
59712 * get set to 0 so it will never get 'freed'
59713 */
59714 static struct fsnotify_event *q_overflow_event;
59715-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
59716+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
59717
59718 /**
59719 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
59720@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
59721 */
59722 u32 fsnotify_get_cookie(void)
59723 {
59724- return atomic_inc_return(&fsnotify_sync_cookie);
59725+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
59726 }
59727 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
59728
59729diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
59730index 9e38daf..5727cae 100644
59731--- a/fs/ntfs/dir.c
59732+++ b/fs/ntfs/dir.c
59733@@ -1310,7 +1310,7 @@ find_next_index_buffer:
59734 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
59735 ~(s64)(ndir->itype.index.block_size - 1)));
59736 /* Bounds checks. */
59737- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
59738+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
59739 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
59740 "inode 0x%lx or driver bug.", vdir->i_ino);
59741 goto err_out;
59742diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
59743index ea4ba9d..1e13d34 100644
59744--- a/fs/ntfs/file.c
59745+++ b/fs/ntfs/file.c
59746@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
59747 char *addr;
59748 size_t total = 0;
59749 unsigned len;
59750- int left;
59751+ unsigned left;
59752
59753 do {
59754 len = PAGE_CACHE_SIZE - ofs;
59755diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
59756index 82650d5..db37dcf 100644
59757--- a/fs/ntfs/super.c
59758+++ b/fs/ntfs/super.c
59759@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
59760 if (!silent)
59761 ntfs_error(sb, "Primary boot sector is invalid.");
59762 } else if (!silent)
59763- ntfs_error(sb, read_err_str, "primary");
59764+ ntfs_error(sb, read_err_str, "%s", "primary");
59765 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
59766 if (bh_primary)
59767 brelse(bh_primary);
59768@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
59769 goto hotfix_primary_boot_sector;
59770 brelse(bh_backup);
59771 } else if (!silent)
59772- ntfs_error(sb, read_err_str, "backup");
59773+ ntfs_error(sb, read_err_str, "%s", "backup");
59774 /* Try to read NT3.51- backup boot sector. */
59775 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
59776 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
59777@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
59778 "sector.");
59779 brelse(bh_backup);
59780 } else if (!silent)
59781- ntfs_error(sb, read_err_str, "backup");
59782+ ntfs_error(sb, read_err_str, "%s", "backup");
59783 /* We failed. Cleanup and return. */
59784 if (bh_primary)
59785 brelse(bh_primary);
59786diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
59787index cd5496b..26a1055 100644
59788--- a/fs/ocfs2/localalloc.c
59789+++ b/fs/ocfs2/localalloc.c
59790@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
59791 goto bail;
59792 }
59793
59794- atomic_inc(&osb->alloc_stats.moves);
59795+ atomic_inc_unchecked(&osb->alloc_stats.moves);
59796
59797 bail:
59798 if (handle)
59799diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
59800index 3a90347..c40bef8 100644
59801--- a/fs/ocfs2/ocfs2.h
59802+++ b/fs/ocfs2/ocfs2.h
59803@@ -235,11 +235,11 @@ enum ocfs2_vol_state
59804
59805 struct ocfs2_alloc_stats
59806 {
59807- atomic_t moves;
59808- atomic_t local_data;
59809- atomic_t bitmap_data;
59810- atomic_t bg_allocs;
59811- atomic_t bg_extends;
59812+ atomic_unchecked_t moves;
59813+ atomic_unchecked_t local_data;
59814+ atomic_unchecked_t bitmap_data;
59815+ atomic_unchecked_t bg_allocs;
59816+ atomic_unchecked_t bg_extends;
59817 };
59818
59819 enum ocfs2_local_alloc_state
59820diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
59821index 5397c07..54afc55 100644
59822--- a/fs/ocfs2/suballoc.c
59823+++ b/fs/ocfs2/suballoc.c
59824@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
59825 mlog_errno(status);
59826 goto bail;
59827 }
59828- atomic_inc(&osb->alloc_stats.bg_extends);
59829+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
59830
59831 /* You should never ask for this much metadata */
59832 BUG_ON(bits_wanted >
59833@@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handle,
59834 mlog_errno(status);
59835 goto bail;
59836 }
59837- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59838+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59839
59840 *suballoc_loc = res.sr_bg_blkno;
59841 *suballoc_bit_start = res.sr_bit_offset;
59842@@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
59843 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
59844 res->sr_bits);
59845
59846- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59847+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59848
59849 BUG_ON(res->sr_bits != 1);
59850
59851@@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
59852 mlog_errno(status);
59853 goto bail;
59854 }
59855- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59856+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59857
59858 BUG_ON(res.sr_bits != 1);
59859
59860@@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
59861 cluster_start,
59862 num_clusters);
59863 if (!status)
59864- atomic_inc(&osb->alloc_stats.local_data);
59865+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
59866 } else {
59867 if (min_clusters > (osb->bitmap_cpg - 1)) {
59868 /* The only paths asking for contiguousness
59869@@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
59870 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
59871 res.sr_bg_blkno,
59872 res.sr_bit_offset);
59873- atomic_inc(&osb->alloc_stats.bitmap_data);
59874+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
59875 *num_clusters = res.sr_bits;
59876 }
59877 }
59878diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
59879index d4e81e4..ad89f5f 100644
59880--- a/fs/ocfs2/super.c
59881+++ b/fs/ocfs2/super.c
59882@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
59883 "%10s => GlobalAllocs: %d LocalAllocs: %d "
59884 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
59885 "Stats",
59886- atomic_read(&osb->alloc_stats.bitmap_data),
59887- atomic_read(&osb->alloc_stats.local_data),
59888- atomic_read(&osb->alloc_stats.bg_allocs),
59889- atomic_read(&osb->alloc_stats.moves),
59890- atomic_read(&osb->alloc_stats.bg_extends));
59891+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
59892+ atomic_read_unchecked(&osb->alloc_stats.local_data),
59893+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
59894+ atomic_read_unchecked(&osb->alloc_stats.moves),
59895+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
59896
59897 out += snprintf(buf + out, len - out,
59898 "%10s => State: %u Descriptor: %llu Size: %u bits "
59899@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
59900 spin_lock_init(&osb->osb_xattr_lock);
59901 ocfs2_init_steal_slots(osb);
59902
59903- atomic_set(&osb->alloc_stats.moves, 0);
59904- atomic_set(&osb->alloc_stats.local_data, 0);
59905- atomic_set(&osb->alloc_stats.bitmap_data, 0);
59906- atomic_set(&osb->alloc_stats.bg_allocs, 0);
59907- atomic_set(&osb->alloc_stats.bg_extends, 0);
59908+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
59909+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
59910+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
59911+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
59912+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
59913
59914 /* Copy the blockcheck stats from the superblock probe */
59915 osb->osb_ecc_stats = *stats;
59916diff --git a/fs/open.c b/fs/open.c
59917index d420331..2dbb3fd 100644
59918--- a/fs/open.c
59919+++ b/fs/open.c
59920@@ -32,6 +32,8 @@
59921 #include <linux/dnotify.h>
59922 #include <linux/compat.h>
59923
59924+#define CREATE_TRACE_POINTS
59925+#include <trace/events/fs.h>
59926 #include "internal.h"
59927
59928 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
59929@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
59930 error = locks_verify_truncate(inode, NULL, length);
59931 if (!error)
59932 error = security_path_truncate(path);
59933+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
59934+ error = -EACCES;
59935 if (!error)
59936 error = do_truncate(path->dentry, length, 0, NULL);
59937
59938@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
59939 error = locks_verify_truncate(inode, f.file, length);
59940 if (!error)
59941 error = security_path_truncate(&f.file->f_path);
59942+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
59943+ error = -EACCES;
59944 if (!error)
59945 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
59946 sb_end_write(inode->i_sb);
59947@@ -360,6 +366,9 @@ retry:
59948 if (__mnt_is_readonly(path.mnt))
59949 res = -EROFS;
59950
59951+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
59952+ res = -EACCES;
59953+
59954 out_path_release:
59955 path_put(&path);
59956 if (retry_estale(res, lookup_flags)) {
59957@@ -391,6 +400,8 @@ retry:
59958 if (error)
59959 goto dput_and_out;
59960
59961+ gr_log_chdir(path.dentry, path.mnt);
59962+
59963 set_fs_pwd(current->fs, &path);
59964
59965 dput_and_out:
59966@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
59967 goto out_putf;
59968
59969 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
59970+
59971+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
59972+ error = -EPERM;
59973+
59974+ if (!error)
59975+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
59976+
59977 if (!error)
59978 set_fs_pwd(current->fs, &f.file->f_path);
59979 out_putf:
59980@@ -449,7 +467,13 @@ retry:
59981 if (error)
59982 goto dput_and_out;
59983
59984+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
59985+ goto dput_and_out;
59986+
59987 set_fs_root(current->fs, &path);
59988+
59989+ gr_handle_chroot_chdir(&path);
59990+
59991 error = 0;
59992 dput_and_out:
59993 path_put(&path);
59994@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
59995 if (error)
59996 return error;
59997 mutex_lock(&inode->i_mutex);
59998+
59999+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
60000+ error = -EACCES;
60001+ goto out_unlock;
60002+ }
60003+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
60004+ error = -EACCES;
60005+ goto out_unlock;
60006+ }
60007+
60008 error = security_path_chmod(path, mode);
60009 if (error)
60010 goto out_unlock;
60011@@ -530,6 +564,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
60012 uid = make_kuid(current_user_ns(), user);
60013 gid = make_kgid(current_user_ns(), group);
60014
60015+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
60016+ return -EACCES;
60017+
60018 newattrs.ia_valid = ATTR_CTIME;
60019 if (user != (uid_t) -1) {
60020 if (!uid_valid(uid))
60021@@ -974,6 +1011,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
60022 } else {
60023 fsnotify_open(f);
60024 fd_install(fd, f);
60025+ trace_do_sys_open(tmp->name, flags, mode);
60026 }
60027 }
60028 putname(tmp);
60029diff --git a/fs/pipe.c b/fs/pipe.c
60030index 0e0752e..7cfdd50 100644
60031--- a/fs/pipe.c
60032+++ b/fs/pipe.c
60033@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
60034
60035 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
60036 {
60037- if (pipe->files)
60038+ if (atomic_read(&pipe->files))
60039 mutex_lock_nested(&pipe->mutex, subclass);
60040 }
60041
60042@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
60043
60044 void pipe_unlock(struct pipe_inode_info *pipe)
60045 {
60046- if (pipe->files)
60047+ if (atomic_read(&pipe->files))
60048 mutex_unlock(&pipe->mutex);
60049 }
60050 EXPORT_SYMBOL(pipe_unlock);
60051@@ -449,9 +449,9 @@ redo:
60052 }
60053 if (bufs) /* More to do? */
60054 continue;
60055- if (!pipe->writers)
60056+ if (!atomic_read(&pipe->writers))
60057 break;
60058- if (!pipe->waiting_writers) {
60059+ if (!atomic_read(&pipe->waiting_writers)) {
60060 /* syscall merging: Usually we must not sleep
60061 * if O_NONBLOCK is set, or if we got some data.
60062 * But if a writer sleeps in kernel space, then
60063@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
60064 ret = 0;
60065 __pipe_lock(pipe);
60066
60067- if (!pipe->readers) {
60068+ if (!atomic_read(&pipe->readers)) {
60069 send_sig(SIGPIPE, current, 0);
60070 ret = -EPIPE;
60071 goto out;
60072@@ -562,7 +562,7 @@ redo1:
60073 for (;;) {
60074 int bufs;
60075
60076- if (!pipe->readers) {
60077+ if (!atomic_read(&pipe->readers)) {
60078 send_sig(SIGPIPE, current, 0);
60079 if (!ret)
60080 ret = -EPIPE;
60081@@ -653,9 +653,9 @@ redo2:
60082 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60083 do_wakeup = 0;
60084 }
60085- pipe->waiting_writers++;
60086+ atomic_inc(&pipe->waiting_writers);
60087 pipe_wait(pipe);
60088- pipe->waiting_writers--;
60089+ atomic_dec(&pipe->waiting_writers);
60090 }
60091 out:
60092 __pipe_unlock(pipe);
60093@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
60094 mask = 0;
60095 if (filp->f_mode & FMODE_READ) {
60096 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
60097- if (!pipe->writers && filp->f_version != pipe->w_counter)
60098+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
60099 mask |= POLLHUP;
60100 }
60101
60102@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
60103 * Most Unices do not set POLLERR for FIFOs but on Linux they
60104 * behave exactly like pipes for poll().
60105 */
60106- if (!pipe->readers)
60107+ if (!atomic_read(&pipe->readers))
60108 mask |= POLLERR;
60109 }
60110
60111@@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
60112 int kill = 0;
60113
60114 spin_lock(&inode->i_lock);
60115- if (!--pipe->files) {
60116+ if (atomic_dec_and_test(&pipe->files)) {
60117 inode->i_pipe = NULL;
60118 kill = 1;
60119 }
60120@@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
60121
60122 __pipe_lock(pipe);
60123 if (file->f_mode & FMODE_READ)
60124- pipe->readers--;
60125+ atomic_dec(&pipe->readers);
60126 if (file->f_mode & FMODE_WRITE)
60127- pipe->writers--;
60128+ atomic_dec(&pipe->writers);
60129
60130- if (pipe->readers || pipe->writers) {
60131+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
60132 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
60133 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60134 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
60135@@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
60136 kfree(pipe);
60137 }
60138
60139-static struct vfsmount *pipe_mnt __read_mostly;
60140+struct vfsmount *pipe_mnt __read_mostly;
60141
60142 /*
60143 * pipefs_dname() is called from d_path().
60144@@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
60145 goto fail_iput;
60146
60147 inode->i_pipe = pipe;
60148- pipe->files = 2;
60149- pipe->readers = pipe->writers = 1;
60150+ atomic_set(&pipe->files, 2);
60151+ atomic_set(&pipe->readers, 1);
60152+ atomic_set(&pipe->writers, 1);
60153 inode->i_fop = &pipefifo_fops;
60154
60155 /*
60156@@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
60157 spin_lock(&inode->i_lock);
60158 if (inode->i_pipe) {
60159 pipe = inode->i_pipe;
60160- pipe->files++;
60161+ atomic_inc(&pipe->files);
60162 spin_unlock(&inode->i_lock);
60163 } else {
60164 spin_unlock(&inode->i_lock);
60165 pipe = alloc_pipe_info();
60166 if (!pipe)
60167 return -ENOMEM;
60168- pipe->files = 1;
60169+ atomic_set(&pipe->files, 1);
60170 spin_lock(&inode->i_lock);
60171 if (unlikely(inode->i_pipe)) {
60172- inode->i_pipe->files++;
60173+ atomic_inc(&inode->i_pipe->files);
60174 spin_unlock(&inode->i_lock);
60175 free_pipe_info(pipe);
60176 pipe = inode->i_pipe;
60177@@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
60178 * opened, even when there is no process writing the FIFO.
60179 */
60180 pipe->r_counter++;
60181- if (pipe->readers++ == 0)
60182+ if (atomic_inc_return(&pipe->readers) == 1)
60183 wake_up_partner(pipe);
60184
60185- if (!is_pipe && !pipe->writers) {
60186+ if (!is_pipe && !atomic_read(&pipe->writers)) {
60187 if ((filp->f_flags & O_NONBLOCK)) {
60188 /* suppress POLLHUP until we have
60189 * seen a writer */
60190@@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
60191 * errno=ENXIO when there is no process reading the FIFO.
60192 */
60193 ret = -ENXIO;
60194- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
60195+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
60196 goto err;
60197
60198 pipe->w_counter++;
60199- if (!pipe->writers++)
60200+ if (atomic_inc_return(&pipe->writers) == 1)
60201 wake_up_partner(pipe);
60202
60203- if (!is_pipe && !pipe->readers) {
60204+ if (!is_pipe && !atomic_read(&pipe->readers)) {
60205 if (wait_for_partner(pipe, &pipe->r_counter))
60206 goto err_wr;
60207 }
60208@@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
60209 * the process can at least talk to itself.
60210 */
60211
60212- pipe->readers++;
60213- pipe->writers++;
60214+ atomic_inc(&pipe->readers);
60215+ atomic_inc(&pipe->writers);
60216 pipe->r_counter++;
60217 pipe->w_counter++;
60218- if (pipe->readers == 1 || pipe->writers == 1)
60219+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
60220 wake_up_partner(pipe);
60221 break;
60222
60223@@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
60224 return 0;
60225
60226 err_rd:
60227- if (!--pipe->readers)
60228+ if (atomic_dec_and_test(&pipe->readers))
60229 wake_up_interruptible(&pipe->wait);
60230 ret = -ERESTARTSYS;
60231 goto err;
60232
60233 err_wr:
60234- if (!--pipe->writers)
60235+ if (atomic_dec_and_test(&pipe->writers))
60236 wake_up_interruptible(&pipe->wait);
60237 ret = -ERESTARTSYS;
60238 goto err;
60239diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
60240index 15af622..0e9f4467 100644
60241--- a/fs/proc/Kconfig
60242+++ b/fs/proc/Kconfig
60243@@ -30,12 +30,12 @@ config PROC_FS
60244
60245 config PROC_KCORE
60246 bool "/proc/kcore support" if !ARM
60247- depends on PROC_FS && MMU
60248+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
60249
60250 config PROC_VMCORE
60251 bool "/proc/vmcore support"
60252- depends on PROC_FS && CRASH_DUMP
60253- default y
60254+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
60255+ default n
60256 help
60257 Exports the dump image of crashed kernel in ELF format.
60258
60259@@ -59,8 +59,8 @@ config PROC_SYSCTL
60260 limited in memory.
60261
60262 config PROC_PAGE_MONITOR
60263- default y
60264- depends on PROC_FS && MMU
60265+ default n
60266+ depends on PROC_FS && MMU && !GRKERNSEC
60267 bool "Enable /proc page monitoring" if EXPERT
60268 help
60269 Various /proc files exist to monitor process memory utilization:
60270diff --git a/fs/proc/array.c b/fs/proc/array.c
60271index cbd0f1b..adec3f0 100644
60272--- a/fs/proc/array.c
60273+++ b/fs/proc/array.c
60274@@ -60,6 +60,7 @@
60275 #include <linux/tty.h>
60276 #include <linux/string.h>
60277 #include <linux/mman.h>
60278+#include <linux/grsecurity.h>
60279 #include <linux/proc_fs.h>
60280 #include <linux/ioport.h>
60281 #include <linux/uaccess.h>
60282@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
60283 seq_putc(m, '\n');
60284 }
60285
60286+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60287+static inline void task_pax(struct seq_file *m, struct task_struct *p)
60288+{
60289+ if (p->mm)
60290+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
60291+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
60292+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
60293+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
60294+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
60295+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
60296+ else
60297+ seq_printf(m, "PaX:\t-----\n");
60298+}
60299+#endif
60300+
60301 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
60302 struct pid *pid, struct task_struct *task)
60303 {
60304@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
60305 task_cpus_allowed(m, task);
60306 cpuset_task_status_allowed(m, task);
60307 task_context_switch_counts(m, task);
60308+
60309+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60310+ task_pax(m, task);
60311+#endif
60312+
60313+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
60314+ task_grsec_rbac(m, task);
60315+#endif
60316+
60317 return 0;
60318 }
60319
60320+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60321+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
60322+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
60323+ _mm->pax_flags & MF_PAX_SEGMEXEC))
60324+#endif
60325+
60326 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60327 struct pid *pid, struct task_struct *task, int whole)
60328 {
60329@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60330 char tcomm[sizeof(task->comm)];
60331 unsigned long flags;
60332
60333+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60334+ if (current->exec_id != m->exec_id) {
60335+ gr_log_badprocpid("stat");
60336+ return 0;
60337+ }
60338+#endif
60339+
60340 state = *get_task_state(task);
60341 vsize = eip = esp = 0;
60342 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
60343@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60344 gtime = task_gtime(task);
60345 }
60346
60347+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60348+ if (PAX_RAND_FLAGS(mm)) {
60349+ eip = 0;
60350+ esp = 0;
60351+ wchan = 0;
60352+ }
60353+#endif
60354+#ifdef CONFIG_GRKERNSEC_HIDESYM
60355+ wchan = 0;
60356+ eip =0;
60357+ esp =0;
60358+#endif
60359+
60360 /* scale priority and nice values from timeslices to -20..20 */
60361 /* to make it look like a "normal" Unix priority/nice value */
60362 priority = task_prio(task);
60363@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60364 seq_put_decimal_ull(m, ' ', vsize);
60365 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
60366 seq_put_decimal_ull(m, ' ', rsslim);
60367+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60368+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
60369+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
60370+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
60371+#else
60372 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
60373 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
60374 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
60375+#endif
60376 seq_put_decimal_ull(m, ' ', esp);
60377 seq_put_decimal_ull(m, ' ', eip);
60378 /* The signal information here is obsolete.
60379@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60380 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
60381 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
60382
60383- if (mm && permitted) {
60384+ if (mm && permitted
60385+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60386+ && !PAX_RAND_FLAGS(mm)
60387+#endif
60388+ ) {
60389 seq_put_decimal_ull(m, ' ', mm->start_data);
60390 seq_put_decimal_ull(m, ' ', mm->end_data);
60391 seq_put_decimal_ull(m, ' ', mm->start_brk);
60392@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
60393 struct pid *pid, struct task_struct *task)
60394 {
60395 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
60396- struct mm_struct *mm = get_task_mm(task);
60397+ struct mm_struct *mm;
60398
60399+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60400+ if (current->exec_id != m->exec_id) {
60401+ gr_log_badprocpid("statm");
60402+ return 0;
60403+ }
60404+#endif
60405+ mm = get_task_mm(task);
60406 if (mm) {
60407 size = task_statm(mm, &shared, &text, &data, &resident);
60408 mmput(mm);
60409@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
60410 return 0;
60411 }
60412
60413+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
60414+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
60415+{
60416+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
60417+}
60418+#endif
60419+
60420 #ifdef CONFIG_CHECKPOINT_RESTORE
60421 static struct pid *
60422 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
60423diff --git a/fs/proc/base.c b/fs/proc/base.c
60424index 1485e38..8ad4236 100644
60425--- a/fs/proc/base.c
60426+++ b/fs/proc/base.c
60427@@ -113,6 +113,14 @@ struct pid_entry {
60428 union proc_op op;
60429 };
60430
60431+struct getdents_callback {
60432+ struct linux_dirent __user * current_dir;
60433+ struct linux_dirent __user * previous;
60434+ struct file * file;
60435+ int count;
60436+ int error;
60437+};
60438+
60439 #define NOD(NAME, MODE, IOP, FOP, OP) { \
60440 .name = (NAME), \
60441 .len = sizeof(NAME) - 1, \
60442@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
60443 if (!mm->arg_end)
60444 goto out_mm; /* Shh! No looking before we're done */
60445
60446+ if (gr_acl_handle_procpidmem(task))
60447+ goto out_mm;
60448+
60449 len = mm->arg_end - mm->arg_start;
60450
60451 if (len > PAGE_SIZE)
60452@@ -237,12 +248,28 @@ out:
60453 return res;
60454 }
60455
60456+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60457+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
60458+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
60459+ _mm->pax_flags & MF_PAX_SEGMEXEC))
60460+#endif
60461+
60462 static int proc_pid_auxv(struct task_struct *task, char *buffer)
60463 {
60464 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
60465 int res = PTR_ERR(mm);
60466 if (mm && !IS_ERR(mm)) {
60467 unsigned int nwords = 0;
60468+
60469+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60470+ /* allow if we're currently ptracing this task */
60471+ if (PAX_RAND_FLAGS(mm) &&
60472+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
60473+ mmput(mm);
60474+ return 0;
60475+ }
60476+#endif
60477+
60478 do {
60479 nwords += 2;
60480 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
60481@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
60482 }
60483
60484
60485-#ifdef CONFIG_KALLSYMS
60486+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60487 /*
60488 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
60489 * Returns the resolved symbol. If that fails, simply return the address.
60490@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
60491 mutex_unlock(&task->signal->cred_guard_mutex);
60492 }
60493
60494-#ifdef CONFIG_STACKTRACE
60495+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60496
60497 #define MAX_STACK_TRACE_DEPTH 64
60498
60499@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
60500 return count;
60501 }
60502
60503-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
60504+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
60505 static int proc_pid_syscall(struct task_struct *task, char *buffer)
60506 {
60507 long nr;
60508@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
60509 /************************************************************************/
60510
60511 /* permission checks */
60512-static int proc_fd_access_allowed(struct inode *inode)
60513+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
60514 {
60515 struct task_struct *task;
60516 int allowed = 0;
60517@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
60518 */
60519 task = get_proc_task(inode);
60520 if (task) {
60521- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
60522+ if (log)
60523+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
60524+ else
60525+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
60526 put_task_struct(task);
60527 }
60528 return allowed;
60529@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
60530 struct task_struct *task,
60531 int hide_pid_min)
60532 {
60533+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60534+ return false;
60535+
60536+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60537+ rcu_read_lock();
60538+ {
60539+ const struct cred *tmpcred = current_cred();
60540+ const struct cred *cred = __task_cred(task);
60541+
60542+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
60543+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60544+ || in_group_p(grsec_proc_gid)
60545+#endif
60546+ ) {
60547+ rcu_read_unlock();
60548+ return true;
60549+ }
60550+ }
60551+ rcu_read_unlock();
60552+
60553+ if (!pid->hide_pid)
60554+ return false;
60555+#endif
60556+
60557 if (pid->hide_pid < hide_pid_min)
60558 return true;
60559 if (in_group_p(pid->pid_gid))
60560 return true;
60561+
60562 return ptrace_may_access(task, PTRACE_MODE_READ);
60563 }
60564
60565@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
60566 put_task_struct(task);
60567
60568 if (!has_perms) {
60569+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60570+ {
60571+#else
60572 if (pid->hide_pid == 2) {
60573+#endif
60574 /*
60575 * Let's make getdents(), stat(), and open()
60576 * consistent with each other. If a process
60577@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
60578 if (!task)
60579 return -ESRCH;
60580
60581+ if (gr_acl_handle_procpidmem(task)) {
60582+ put_task_struct(task);
60583+ return -EPERM;
60584+ }
60585+
60586 mm = mm_access(task, mode);
60587 put_task_struct(task);
60588
60589@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
60590
60591 file->private_data = mm;
60592
60593+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60594+ file->f_version = current->exec_id;
60595+#endif
60596+
60597 return 0;
60598 }
60599
60600@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
60601 ssize_t copied;
60602 char *page;
60603
60604+#ifdef CONFIG_GRKERNSEC
60605+ if (write)
60606+ return -EPERM;
60607+#endif
60608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60609+ if (file->f_version != current->exec_id) {
60610+ gr_log_badprocpid("mem");
60611+ return 0;
60612+ }
60613+#endif
60614+
60615 if (!mm)
60616 return 0;
60617
60618@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
60619 goto free;
60620
60621 while (count > 0) {
60622- int this_len = min_t(int, count, PAGE_SIZE);
60623+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
60624
60625 if (write && copy_from_user(page, buf, this_len)) {
60626 copied = -EFAULT;
60627@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
60628 if (!mm)
60629 return 0;
60630
60631+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60632+ if (file->f_version != current->exec_id) {
60633+ gr_log_badprocpid("environ");
60634+ return 0;
60635+ }
60636+#endif
60637+
60638 page = (char *)__get_free_page(GFP_TEMPORARY);
60639 if (!page)
60640 return -ENOMEM;
60641@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
60642 goto free;
60643 while (count > 0) {
60644 size_t this_len, max_len;
60645- int retval;
60646+ ssize_t retval;
60647
60648 if (src >= (mm->env_end - mm->env_start))
60649 break;
60650@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
60651 int error = -EACCES;
60652
60653 /* Are we allowed to snoop on the tasks file descriptors? */
60654- if (!proc_fd_access_allowed(inode))
60655+ if (!proc_fd_access_allowed(inode, 0))
60656 goto out;
60657
60658 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
60659@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
60660 struct path path;
60661
60662 /* Are we allowed to snoop on the tasks file descriptors? */
60663- if (!proc_fd_access_allowed(inode))
60664- goto out;
60665+ /* logging this is needed for learning on chromium to work properly,
60666+ but we don't want to flood the logs from 'ps' which does a readlink
60667+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
60668+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
60669+ */
60670+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
60671+ if (!proc_fd_access_allowed(inode,0))
60672+ goto out;
60673+ } else {
60674+ if (!proc_fd_access_allowed(inode,1))
60675+ goto out;
60676+ }
60677
60678 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
60679 if (error)
60680@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
60681 rcu_read_lock();
60682 cred = __task_cred(task);
60683 inode->i_uid = cred->euid;
60684+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60685+ inode->i_gid = grsec_proc_gid;
60686+#else
60687 inode->i_gid = cred->egid;
60688+#endif
60689 rcu_read_unlock();
60690 }
60691 security_task_to_inode(task, inode);
60692@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
60693 return -ENOENT;
60694 }
60695 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
60696+#ifdef CONFIG_GRKERNSEC_PROC_USER
60697+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
60698+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60699+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
60700+#endif
60701 task_dumpable(task)) {
60702 cred = __task_cred(task);
60703 stat->uid = cred->euid;
60704+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60705+ stat->gid = grsec_proc_gid;
60706+#else
60707 stat->gid = cred->egid;
60708+#endif
60709 }
60710 }
60711 rcu_read_unlock();
60712@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
60713
60714 if (task) {
60715 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
60716+#ifdef CONFIG_GRKERNSEC_PROC_USER
60717+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
60718+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60719+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
60720+#endif
60721 task_dumpable(task)) {
60722 rcu_read_lock();
60723 cred = __task_cred(task);
60724 inode->i_uid = cred->euid;
60725+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60726+ inode->i_gid = grsec_proc_gid;
60727+#else
60728 inode->i_gid = cred->egid;
60729+#endif
60730 rcu_read_unlock();
60731 } else {
60732 inode->i_uid = GLOBAL_ROOT_UID;
60733@@ -2166,6 +2284,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
60734 if (!task)
60735 goto out_no_task;
60736
60737+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60738+ goto out;
60739+
60740 /*
60741 * Yes, it does not scale. And it should not. Don't add
60742 * new entries into /proc/<tgid>/ without very good reasons.
60743@@ -2196,6 +2317,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
60744 if (!task)
60745 return -ENOENT;
60746
60747+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60748+ goto out;
60749+
60750 if (!dir_emit_dots(file, ctx))
60751 goto out;
60752
60753@@ -2585,7 +2709,7 @@ static const struct pid_entry tgid_base_stuff[] = {
60754 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
60755 #endif
60756 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
60757-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
60758+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
60759 INF("syscall", S_IRUGO, proc_pid_syscall),
60760 #endif
60761 INF("cmdline", S_IRUGO, proc_pid_cmdline),
60762@@ -2610,10 +2734,10 @@ static const struct pid_entry tgid_base_stuff[] = {
60763 #ifdef CONFIG_SECURITY
60764 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
60765 #endif
60766-#ifdef CONFIG_KALLSYMS
60767+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60768 INF("wchan", S_IRUGO, proc_pid_wchan),
60769 #endif
60770-#ifdef CONFIG_STACKTRACE
60771+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60772 ONE("stack", S_IRUGO, proc_pid_stack),
60773 #endif
60774 #ifdef CONFIG_SCHEDSTATS
60775@@ -2647,6 +2771,9 @@ static const struct pid_entry tgid_base_stuff[] = {
60776 #ifdef CONFIG_HARDWALL
60777 INF("hardwall", S_IRUGO, proc_pid_hardwall),
60778 #endif
60779+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
60780+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
60781+#endif
60782 #ifdef CONFIG_USER_NS
60783 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
60784 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
60785@@ -2777,7 +2904,14 @@ static int proc_pid_instantiate(struct inode *dir,
60786 if (!inode)
60787 goto out;
60788
60789+#ifdef CONFIG_GRKERNSEC_PROC_USER
60790+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
60791+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60792+ inode->i_gid = grsec_proc_gid;
60793+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
60794+#else
60795 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
60796+#endif
60797 inode->i_op = &proc_tgid_base_inode_operations;
60798 inode->i_fop = &proc_tgid_base_operations;
60799 inode->i_flags|=S_IMMUTABLE;
60800@@ -2815,7 +2949,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
60801 if (!task)
60802 goto out;
60803
60804+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60805+ goto out_put_task;
60806+
60807 result = proc_pid_instantiate(dir, dentry, task, NULL);
60808+out_put_task:
60809 put_task_struct(task);
60810 out:
60811 return ERR_PTR(result);
60812@@ -2921,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
60813 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
60814 #endif
60815 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
60816-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
60817+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
60818 INF("syscall", S_IRUGO, proc_pid_syscall),
60819 #endif
60820 INF("cmdline", S_IRUGO, proc_pid_cmdline),
60821@@ -2948,10 +3086,10 @@ static const struct pid_entry tid_base_stuff[] = {
60822 #ifdef CONFIG_SECURITY
60823 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
60824 #endif
60825-#ifdef CONFIG_KALLSYMS
60826+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60827 INF("wchan", S_IRUGO, proc_pid_wchan),
60828 #endif
60829-#ifdef CONFIG_STACKTRACE
60830+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60831 ONE("stack", S_IRUGO, proc_pid_stack),
60832 #endif
60833 #ifdef CONFIG_SCHEDSTATS
60834diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
60835index 82676e3..5f8518a 100644
60836--- a/fs/proc/cmdline.c
60837+++ b/fs/proc/cmdline.c
60838@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
60839
60840 static int __init proc_cmdline_init(void)
60841 {
60842+#ifdef CONFIG_GRKERNSEC_PROC_ADD
60843+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
60844+#else
60845 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
60846+#endif
60847 return 0;
60848 }
60849 module_init(proc_cmdline_init);
60850diff --git a/fs/proc/devices.c b/fs/proc/devices.c
60851index b143471..bb105e5 100644
60852--- a/fs/proc/devices.c
60853+++ b/fs/proc/devices.c
60854@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
60855
60856 static int __init proc_devices_init(void)
60857 {
60858+#ifdef CONFIG_GRKERNSEC_PROC_ADD
60859+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
60860+#else
60861 proc_create("devices", 0, NULL, &proc_devinfo_operations);
60862+#endif
60863 return 0;
60864 }
60865 module_init(proc_devices_init);
60866diff --git a/fs/proc/fd.c b/fs/proc/fd.c
60867index 985ea88..d118a0a 100644
60868--- a/fs/proc/fd.c
60869+++ b/fs/proc/fd.c
60870@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
60871 if (!task)
60872 return -ENOENT;
60873
60874- files = get_files_struct(task);
60875+ if (!gr_acl_handle_procpidmem(task))
60876+ files = get_files_struct(task);
60877 put_task_struct(task);
60878
60879 if (files) {
60880@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
60881 */
60882 int proc_fd_permission(struct inode *inode, int mask)
60883 {
60884+ struct task_struct *task;
60885 int rv = generic_permission(inode, mask);
60886- if (rv == 0)
60887- return 0;
60888+
60889 if (task_tgid(current) == proc_pid(inode))
60890 rv = 0;
60891+
60892+ task = get_proc_task(inode);
60893+ if (task == NULL)
60894+ return rv;
60895+
60896+ if (gr_acl_handle_procpidmem(task))
60897+ rv = -EACCES;
60898+
60899+ put_task_struct(task);
60900+
60901 return rv;
60902 }
60903
60904diff --git a/fs/proc/inode.c b/fs/proc/inode.c
60905index 8eaa1ba..cc6ff42 100644
60906--- a/fs/proc/inode.c
60907+++ b/fs/proc/inode.c
60908@@ -23,11 +23,17 @@
60909 #include <linux/slab.h>
60910 #include <linux/mount.h>
60911 #include <linux/magic.h>
60912+#include <linux/grsecurity.h>
60913
60914 #include <asm/uaccess.h>
60915
60916 #include "internal.h"
60917
60918+#ifdef CONFIG_PROC_SYSCTL
60919+extern const struct inode_operations proc_sys_inode_operations;
60920+extern const struct inode_operations proc_sys_dir_operations;
60921+#endif
60922+
60923 static void proc_evict_inode(struct inode *inode)
60924 {
60925 struct proc_dir_entry *de;
60926@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
60927 ns = PROC_I(inode)->ns.ns;
60928 if (ns_ops && ns)
60929 ns_ops->put(ns);
60930+
60931+#ifdef CONFIG_PROC_SYSCTL
60932+ if (inode->i_op == &proc_sys_inode_operations ||
60933+ inode->i_op == &proc_sys_dir_operations)
60934+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
60935+#endif
60936+
60937 }
60938
60939 static struct kmem_cache * proc_inode_cachep;
60940@@ -405,7 +418,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
60941 if (de->mode) {
60942 inode->i_mode = de->mode;
60943 inode->i_uid = de->uid;
60944+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60945+ inode->i_gid = grsec_proc_gid;
60946+#else
60947 inode->i_gid = de->gid;
60948+#endif
60949 }
60950 if (de->size)
60951 inode->i_size = de->size;
60952diff --git a/fs/proc/internal.h b/fs/proc/internal.h
60953index 651d09a..60c73ae 100644
60954--- a/fs/proc/internal.h
60955+++ b/fs/proc/internal.h
60956@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
60957 struct pid *, struct task_struct *);
60958 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
60959 struct pid *, struct task_struct *);
60960+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
60961+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
60962+#endif
60963
60964 /*
60965 * base.c
60966diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
60967index 06ea155..9a798c7 100644
60968--- a/fs/proc/kcore.c
60969+++ b/fs/proc/kcore.c
60970@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
60971 * the addresses in the elf_phdr on our list.
60972 */
60973 start = kc_offset_to_vaddr(*fpos - elf_buflen);
60974- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
60975+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
60976+ if (tsz > buflen)
60977 tsz = buflen;
60978-
60979+
60980 while (buflen) {
60981 struct kcore_list *m;
60982
60983@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
60984 kfree(elf_buf);
60985 } else {
60986 if (kern_addr_valid(start)) {
60987- unsigned long n;
60988+ char *elf_buf;
60989+ mm_segment_t oldfs;
60990
60991- n = copy_to_user(buffer, (char *)start, tsz);
60992- /*
60993- * We cannot distinguish between fault on source
60994- * and fault on destination. When this happens
60995- * we clear too and hope it will trigger the
60996- * EFAULT again.
60997- */
60998- if (n) {
60999- if (clear_user(buffer + tsz - n,
61000- n))
61001+ elf_buf = kmalloc(tsz, GFP_KERNEL);
61002+ if (!elf_buf)
61003+ return -ENOMEM;
61004+ oldfs = get_fs();
61005+ set_fs(KERNEL_DS);
61006+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
61007+ set_fs(oldfs);
61008+ if (copy_to_user(buffer, elf_buf, tsz)) {
61009+ kfree(elf_buf);
61010 return -EFAULT;
61011+ }
61012 }
61013+ set_fs(oldfs);
61014+ kfree(elf_buf);
61015 } else {
61016 if (clear_user(buffer, tsz))
61017 return -EFAULT;
61018@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
61019
61020 static int open_kcore(struct inode *inode, struct file *filp)
61021 {
61022+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
61023+ return -EPERM;
61024+#endif
61025 if (!capable(CAP_SYS_RAWIO))
61026 return -EPERM;
61027 if (kcore_need_update)
61028diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
61029index 59d85d6..ac6fc05 100644
61030--- a/fs/proc/meminfo.c
61031+++ b/fs/proc/meminfo.c
61032@@ -153,7 +153,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
61033 vmi.used >> 10,
61034 vmi.largest_chunk >> 10
61035 #ifdef CONFIG_MEMORY_FAILURE
61036- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
61037+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
61038 #endif
61039 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
61040 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
61041diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
61042index ccfd99b..1b7e255 100644
61043--- a/fs/proc/nommu.c
61044+++ b/fs/proc/nommu.c
61045@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
61046 if (len < 1)
61047 len = 1;
61048 seq_printf(m, "%*c", len, ' ');
61049- seq_path(m, &file->f_path, "");
61050+ seq_path(m, &file->f_path, "\n\\");
61051 }
61052
61053 seq_putc(m, '\n');
61054diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
61055index 4677bb7..408e936 100644
61056--- a/fs/proc/proc_net.c
61057+++ b/fs/proc/proc_net.c
61058@@ -23,6 +23,7 @@
61059 #include <linux/nsproxy.h>
61060 #include <net/net_namespace.h>
61061 #include <linux/seq_file.h>
61062+#include <linux/grsecurity.h>
61063
61064 #include "internal.h"
61065
61066@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
61067 struct task_struct *task;
61068 struct nsproxy *ns;
61069 struct net *net = NULL;
61070+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61071+ const struct cred *cred = current_cred();
61072+#endif
61073+
61074+#ifdef CONFIG_GRKERNSEC_PROC_USER
61075+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
61076+ return net;
61077+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61078+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
61079+ return net;
61080+#endif
61081
61082 rcu_read_lock();
61083 task = pid_task(proc_pid(dir), PIDTYPE_PID);
61084diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
61085index 7129046..6914844 100644
61086--- a/fs/proc/proc_sysctl.c
61087+++ b/fs/proc/proc_sysctl.c
61088@@ -11,13 +11,21 @@
61089 #include <linux/namei.h>
61090 #include <linux/mm.h>
61091 #include <linux/module.h>
61092+#include <linux/nsproxy.h>
61093+#ifdef CONFIG_GRKERNSEC
61094+#include <net/net_namespace.h>
61095+#endif
61096 #include "internal.h"
61097
61098+extern int gr_handle_chroot_sysctl(const int op);
61099+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
61100+ const int op);
61101+
61102 static const struct dentry_operations proc_sys_dentry_operations;
61103 static const struct file_operations proc_sys_file_operations;
61104-static const struct inode_operations proc_sys_inode_operations;
61105+const struct inode_operations proc_sys_inode_operations;
61106 static const struct file_operations proc_sys_dir_file_operations;
61107-static const struct inode_operations proc_sys_dir_operations;
61108+const struct inode_operations proc_sys_dir_operations;
61109
61110 void proc_sys_poll_notify(struct ctl_table_poll *poll)
61111 {
61112@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
61113
61114 err = NULL;
61115 d_set_d_op(dentry, &proc_sys_dentry_operations);
61116+
61117+ gr_handle_proc_create(dentry, inode);
61118+
61119 d_add(dentry, inode);
61120
61121 out:
61122@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
61123 struct inode *inode = file_inode(filp);
61124 struct ctl_table_header *head = grab_header(inode);
61125 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
61126+ int op = write ? MAY_WRITE : MAY_READ;
61127 ssize_t error;
61128 size_t res;
61129
61130@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
61131 * and won't be until we finish.
61132 */
61133 error = -EPERM;
61134- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
61135+ if (sysctl_perm(head, table, op))
61136 goto out;
61137
61138 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
61139@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
61140 if (!table->proc_handler)
61141 goto out;
61142
61143+#ifdef CONFIG_GRKERNSEC
61144+ error = -EPERM;
61145+ if (gr_handle_chroot_sysctl(op))
61146+ goto out;
61147+ dget(filp->f_path.dentry);
61148+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
61149+ dput(filp->f_path.dentry);
61150+ goto out;
61151+ }
61152+ dput(filp->f_path.dentry);
61153+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
61154+ goto out;
61155+ if (write) {
61156+ if (current->nsproxy->net_ns != table->extra2) {
61157+ if (!capable(CAP_SYS_ADMIN))
61158+ goto out;
61159+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
61160+ goto out;
61161+ }
61162+#endif
61163+
61164 /* careful: calling conventions are nasty here */
61165 res = count;
61166 error = table->proc_handler(table, write, buf, &res, ppos);
61167@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
61168 return false;
61169 } else {
61170 d_set_d_op(child, &proc_sys_dentry_operations);
61171+
61172+ gr_handle_proc_create(child, inode);
61173+
61174 d_add(child, inode);
61175 }
61176 } else {
61177@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
61178 if ((*pos)++ < ctx->pos)
61179 return true;
61180
61181+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
61182+ return 0;
61183+
61184 if (unlikely(S_ISLNK(table->mode)))
61185 res = proc_sys_link_fill_cache(file, ctx, head, table);
61186 else
61187@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
61188 if (IS_ERR(head))
61189 return PTR_ERR(head);
61190
61191+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
61192+ return -ENOENT;
61193+
61194 generic_fillattr(inode, stat);
61195 if (table)
61196 stat->mode = (stat->mode & S_IFMT) | table->mode;
61197@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
61198 .llseek = generic_file_llseek,
61199 };
61200
61201-static const struct inode_operations proc_sys_inode_operations = {
61202+const struct inode_operations proc_sys_inode_operations = {
61203 .permission = proc_sys_permission,
61204 .setattr = proc_sys_setattr,
61205 .getattr = proc_sys_getattr,
61206 };
61207
61208-static const struct inode_operations proc_sys_dir_operations = {
61209+const struct inode_operations proc_sys_dir_operations = {
61210 .lookup = proc_sys_lookup,
61211 .permission = proc_sys_permission,
61212 .setattr = proc_sys_setattr,
61213@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
61214 static struct ctl_dir *new_dir(struct ctl_table_set *set,
61215 const char *name, int namelen)
61216 {
61217- struct ctl_table *table;
61218+ ctl_table_no_const *table;
61219 struct ctl_dir *new;
61220 struct ctl_node *node;
61221 char *new_name;
61222@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
61223 return NULL;
61224
61225 node = (struct ctl_node *)(new + 1);
61226- table = (struct ctl_table *)(node + 1);
61227+ table = (ctl_table_no_const *)(node + 1);
61228 new_name = (char *)(table + 2);
61229 memcpy(new_name, name, namelen);
61230 new_name[namelen] = '\0';
61231@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
61232 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
61233 struct ctl_table_root *link_root)
61234 {
61235- struct ctl_table *link_table, *entry, *link;
61236+ ctl_table_no_const *link_table, *link;
61237+ struct ctl_table *entry;
61238 struct ctl_table_header *links;
61239 struct ctl_node *node;
61240 char *link_name;
61241@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
61242 return NULL;
61243
61244 node = (struct ctl_node *)(links + 1);
61245- link_table = (struct ctl_table *)(node + nr_entries);
61246+ link_table = (ctl_table_no_const *)(node + nr_entries);
61247 link_name = (char *)&link_table[nr_entries + 1];
61248
61249 for (link = link_table, entry = table; entry->procname; link++, entry++) {
61250@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
61251 struct ctl_table_header ***subheader, struct ctl_table_set *set,
61252 struct ctl_table *table)
61253 {
61254- struct ctl_table *ctl_table_arg = NULL;
61255- struct ctl_table *entry, *files;
61256+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
61257+ struct ctl_table *entry;
61258 int nr_files = 0;
61259 int nr_dirs = 0;
61260 int err = -ENOMEM;
61261@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
61262 nr_files++;
61263 }
61264
61265- files = table;
61266 /* If there are mixed files and directories we need a new table */
61267 if (nr_dirs && nr_files) {
61268- struct ctl_table *new;
61269+ ctl_table_no_const *new;
61270 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
61271 GFP_KERNEL);
61272 if (!files)
61273@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
61274 /* Register everything except a directory full of subdirectories */
61275 if (nr_files || !nr_dirs) {
61276 struct ctl_table_header *header;
61277- header = __register_sysctl_table(set, path, files);
61278+ header = __register_sysctl_table(set, path, files ? files : table);
61279 if (!header) {
61280 kfree(ctl_table_arg);
61281 goto out;
61282diff --git a/fs/proc/root.c b/fs/proc/root.c
61283index 87dbcbe..55e1b4d 100644
61284--- a/fs/proc/root.c
61285+++ b/fs/proc/root.c
61286@@ -186,7 +186,15 @@ void __init proc_root_init(void)
61287 #ifdef CONFIG_PROC_DEVICETREE
61288 proc_device_tree_init();
61289 #endif
61290+#ifdef CONFIG_GRKERNSEC_PROC_ADD
61291+#ifdef CONFIG_GRKERNSEC_PROC_USER
61292+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
61293+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61294+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
61295+#endif
61296+#else
61297 proc_mkdir("bus", NULL);
61298+#endif
61299 proc_sys_init();
61300 }
61301
61302diff --git a/fs/proc/self.c b/fs/proc/self.c
61303index 6b6a993..807cccc 100644
61304--- a/fs/proc/self.c
61305+++ b/fs/proc/self.c
61306@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
61307 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
61308 void *cookie)
61309 {
61310- char *s = nd_get_link(nd);
61311+ const char *s = nd_get_link(nd);
61312 if (!IS_ERR(s))
61313 kfree(s);
61314 }
61315diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
61316index 390bdab..83c1e8a 100644
61317--- a/fs/proc/task_mmu.c
61318+++ b/fs/proc/task_mmu.c
61319@@ -12,12 +12,19 @@
61320 #include <linux/swap.h>
61321 #include <linux/swapops.h>
61322 #include <linux/mmu_notifier.h>
61323+#include <linux/grsecurity.h>
61324
61325 #include <asm/elf.h>
61326 #include <asm/uaccess.h>
61327 #include <asm/tlbflush.h>
61328 #include "internal.h"
61329
61330+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61331+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
61332+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
61333+ _mm->pax_flags & MF_PAX_SEGMEXEC))
61334+#endif
61335+
61336 void task_mem(struct seq_file *m, struct mm_struct *mm)
61337 {
61338 unsigned long data, text, lib, swap;
61339@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
61340 "VmExe:\t%8lu kB\n"
61341 "VmLib:\t%8lu kB\n"
61342 "VmPTE:\t%8lu kB\n"
61343- "VmSwap:\t%8lu kB\n",
61344- hiwater_vm << (PAGE_SHIFT-10),
61345+ "VmSwap:\t%8lu kB\n"
61346+
61347+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61348+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
61349+#endif
61350+
61351+ ,hiwater_vm << (PAGE_SHIFT-10),
61352 total_vm << (PAGE_SHIFT-10),
61353 mm->locked_vm << (PAGE_SHIFT-10),
61354 mm->pinned_vm << (PAGE_SHIFT-10),
61355@@ -63,7 +75,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
61356 data << (PAGE_SHIFT-10),
61357 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
61358 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
61359- swap << (PAGE_SHIFT-10));
61360+ swap << (PAGE_SHIFT-10)
61361+
61362+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61363+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61364+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
61365+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
61366+#else
61367+ , mm->context.user_cs_base
61368+ , mm->context.user_cs_limit
61369+#endif
61370+#endif
61371+
61372+ );
61373 }
61374
61375 unsigned long task_vsize(struct mm_struct *mm)
61376@@ -278,13 +302,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61377 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
61378 }
61379
61380- /* We don't show the stack guard page in /proc/maps */
61381+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61382+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
61383+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
61384+#else
61385 start = vma->vm_start;
61386- if (stack_guard_page_start(vma, start))
61387- start += PAGE_SIZE;
61388 end = vma->vm_end;
61389- if (stack_guard_page_end(vma, end))
61390- end -= PAGE_SIZE;
61391+#endif
61392
61393 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
61394 start,
61395@@ -293,7 +317,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61396 flags & VM_WRITE ? 'w' : '-',
61397 flags & VM_EXEC ? 'x' : '-',
61398 flags & VM_MAYSHARE ? 's' : 'p',
61399+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61400+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
61401+#else
61402 pgoff,
61403+#endif
61404 MAJOR(dev), MINOR(dev), ino, &len);
61405
61406 /*
61407@@ -302,7 +330,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61408 */
61409 if (file) {
61410 pad_len_spaces(m, len);
61411- seq_path(m, &file->f_path, "\n");
61412+ seq_path(m, &file->f_path, "\n\\");
61413 goto done;
61414 }
61415
61416@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61417 * Thread stack in /proc/PID/task/TID/maps or
61418 * the main process stack.
61419 */
61420- if (!is_pid || (vma->vm_start <= mm->start_stack &&
61421- vma->vm_end >= mm->start_stack)) {
61422+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
61423+ (vma->vm_start <= mm->start_stack &&
61424+ vma->vm_end >= mm->start_stack)) {
61425 name = "[stack]";
61426 } else {
61427 /* Thread stack in /proc/PID/maps */
61428@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
61429 struct proc_maps_private *priv = m->private;
61430 struct task_struct *task = priv->task;
61431
61432+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61433+ if (current->exec_id != m->exec_id) {
61434+ gr_log_badprocpid("maps");
61435+ return 0;
61436+ }
61437+#endif
61438+
61439 show_map_vma(m, vma, is_pid);
61440
61441 if (m->count < m->size) /* vma is copied successfully */
61442@@ -590,12 +626,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
61443 .private = &mss,
61444 };
61445
61446+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61447+ if (current->exec_id != m->exec_id) {
61448+ gr_log_badprocpid("smaps");
61449+ return 0;
61450+ }
61451+#endif
61452 memset(&mss, 0, sizeof mss);
61453- mss.vma = vma;
61454- /* mmap_sem is held in m_start */
61455- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
61456- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
61457-
61458+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61459+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
61460+#endif
61461+ mss.vma = vma;
61462+ /* mmap_sem is held in m_start */
61463+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
61464+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
61465+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61466+ }
61467+#endif
61468 show_map_vma(m, vma, is_pid);
61469
61470 seq_printf(m,
61471@@ -613,7 +660,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
61472 "KernelPageSize: %8lu kB\n"
61473 "MMUPageSize: %8lu kB\n"
61474 "Locked: %8lu kB\n",
61475+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61476+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
61477+#else
61478 (vma->vm_end - vma->vm_start) >> 10,
61479+#endif
61480 mss.resident >> 10,
61481 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
61482 mss.shared_clean >> 10,
61483@@ -1390,6 +1441,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
61484 int n;
61485 char buffer[50];
61486
61487+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61488+ if (current->exec_id != m->exec_id) {
61489+ gr_log_badprocpid("numa_maps");
61490+ return 0;
61491+ }
61492+#endif
61493+
61494 if (!mm)
61495 return 0;
61496
61497@@ -1409,11 +1467,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
61498 if (n < 0)
61499 return n;
61500
61501+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61502+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
61503+#else
61504 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
61505+#endif
61506
61507 if (file) {
61508 seq_printf(m, " file=");
61509- seq_path(m, &file->f_path, "\n\t= ");
61510+ seq_path(m, &file->f_path, "\n\t\\= ");
61511 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61512 seq_printf(m, " heap");
61513 } else {
61514diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
61515index 56123a6..5a2f6ec 100644
61516--- a/fs/proc/task_nommu.c
61517+++ b/fs/proc/task_nommu.c
61518@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
61519 else
61520 bytes += kobjsize(mm);
61521
61522- if (current->fs && current->fs->users > 1)
61523+ if (current->fs && atomic_read(&current->fs->users) > 1)
61524 sbytes += kobjsize(current->fs);
61525 else
61526 bytes += kobjsize(current->fs);
61527@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
61528
61529 if (file) {
61530 pad_len_spaces(m, len);
61531- seq_path(m, &file->f_path, "");
61532+ seq_path(m, &file->f_path, "\n\\");
61533 } else if (mm) {
61534 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
61535
61536diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
61537index 9100d69..f1f9fc9 100644
61538--- a/fs/proc/vmcore.c
61539+++ b/fs/proc/vmcore.c
61540@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
61541 nr_bytes = count;
61542
61543 /* If pfn is not ram, return zeros for sparse dump files */
61544- if (pfn_is_ram(pfn) == 0)
61545- memset(buf, 0, nr_bytes);
61546- else {
61547+ if (pfn_is_ram(pfn) == 0) {
61548+ if (userbuf) {
61549+ if (clear_user((char __force_user *)buf, nr_bytes))
61550+ return -EFAULT;
61551+ } else
61552+ memset(buf, 0, nr_bytes);
61553+ } else {
61554 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
61555 offset, userbuf);
61556 if (tmp < 0)
61557@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
61558 if (*fpos < m->offset + m->size) {
61559 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
61560 start = m->paddr + *fpos - m->offset;
61561- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
61562+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
61563 if (tmp < 0)
61564 return tmp;
61565 buflen -= tsz;
61566diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
61567index b00fcc9..e0c6381 100644
61568--- a/fs/qnx6/qnx6.h
61569+++ b/fs/qnx6/qnx6.h
61570@@ -74,7 +74,7 @@ enum {
61571 BYTESEX_BE,
61572 };
61573
61574-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
61575+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
61576 {
61577 if (sbi->s_bytesex == BYTESEX_LE)
61578 return le64_to_cpu((__force __le64)n);
61579@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
61580 return (__force __fs64)cpu_to_be64(n);
61581 }
61582
61583-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
61584+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
61585 {
61586 if (sbi->s_bytesex == BYTESEX_LE)
61587 return le32_to_cpu((__force __le32)n);
61588diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
61589index 16e8abb..2dcf914 100644
61590--- a/fs/quota/netlink.c
61591+++ b/fs/quota/netlink.c
61592@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
61593 void quota_send_warning(struct kqid qid, dev_t dev,
61594 const char warntype)
61595 {
61596- static atomic_t seq;
61597+ static atomic_unchecked_t seq;
61598 struct sk_buff *skb;
61599 void *msg_head;
61600 int ret;
61601@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
61602 "VFS: Not enough memory to send quota warning.\n");
61603 return;
61604 }
61605- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
61606+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
61607 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
61608 if (!msg_head) {
61609 printk(KERN_ERR
61610diff --git a/fs/read_write.c b/fs/read_write.c
61611index e3cd280..a378473 100644
61612--- a/fs/read_write.c
61613+++ b/fs/read_write.c
61614@@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
61615
61616 old_fs = get_fs();
61617 set_fs(get_ds());
61618- p = (__force const char __user *)buf;
61619+ p = (const char __force_user *)buf;
61620 if (count > MAX_RW_COUNT)
61621 count = MAX_RW_COUNT;
61622 if (file->f_op->write)
61623diff --git a/fs/readdir.c b/fs/readdir.c
61624index 93d71e5..6a14be8 100644
61625--- a/fs/readdir.c
61626+++ b/fs/readdir.c
61627@@ -17,6 +17,7 @@
61628 #include <linux/security.h>
61629 #include <linux/syscalls.h>
61630 #include <linux/unistd.h>
61631+#include <linux/namei.h>
61632
61633 #include <asm/uaccess.h>
61634
61635@@ -69,6 +70,7 @@ struct old_linux_dirent {
61636 struct readdir_callback {
61637 struct dir_context ctx;
61638 struct old_linux_dirent __user * dirent;
61639+ struct file * file;
61640 int result;
61641 };
61642
61643@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
61644 buf->result = -EOVERFLOW;
61645 return -EOVERFLOW;
61646 }
61647+
61648+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61649+ return 0;
61650+
61651 buf->result++;
61652 dirent = buf->dirent;
61653 if (!access_ok(VERIFY_WRITE, dirent,
61654@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61655 if (!f.file)
61656 return -EBADF;
61657
61658+ buf.file = f.file;
61659 error = iterate_dir(f.file, &buf.ctx);
61660 if (buf.result)
61661 error = buf.result;
61662@@ -142,6 +149,7 @@ struct getdents_callback {
61663 struct dir_context ctx;
61664 struct linux_dirent __user * current_dir;
61665 struct linux_dirent __user * previous;
61666+ struct file * file;
61667 int count;
61668 int error;
61669 };
61670@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
61671 buf->error = -EOVERFLOW;
61672 return -EOVERFLOW;
61673 }
61674+
61675+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61676+ return 0;
61677+
61678 dirent = buf->previous;
61679 if (dirent) {
61680 if (__put_user(offset, &dirent->d_off))
61681@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
61682 if (!f.file)
61683 return -EBADF;
61684
61685+ buf.file = f.file;
61686 error = iterate_dir(f.file, &buf.ctx);
61687 if (error >= 0)
61688 error = buf.error;
61689@@ -226,6 +239,7 @@ struct getdents_callback64 {
61690 struct dir_context ctx;
61691 struct linux_dirent64 __user * current_dir;
61692 struct linux_dirent64 __user * previous;
61693+ struct file *file;
61694 int count;
61695 int error;
61696 };
61697@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
61698 buf->error = -EINVAL; /* only used if we fail.. */
61699 if (reclen > buf->count)
61700 return -EINVAL;
61701+
61702+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61703+ return 0;
61704+
61705 dirent = buf->previous;
61706 if (dirent) {
61707 if (__put_user(offset, &dirent->d_off))
61708@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61709 if (!f.file)
61710 return -EBADF;
61711
61712+ buf.file = f.file;
61713 error = iterate_dir(f.file, &buf.ctx);
61714 if (error >= 0)
61715 error = buf.error;
61716diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
61717index 2b7882b..1c5ef48 100644
61718--- a/fs/reiserfs/do_balan.c
61719+++ b/fs/reiserfs/do_balan.c
61720@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
61721 return;
61722 }
61723
61724- atomic_inc(&(fs_generation(tb->tb_sb)));
61725+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
61726 do_balance_starts(tb);
61727
61728 /* balance leaf returns 0 except if combining L R and S into
61729diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
61730index a958444..42b2323 100644
61731--- a/fs/reiserfs/procfs.c
61732+++ b/fs/reiserfs/procfs.c
61733@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
61734 "SMALL_TAILS " : "NO_TAILS ",
61735 replay_only(sb) ? "REPLAY_ONLY " : "",
61736 convert_reiserfs(sb) ? "CONV " : "",
61737- atomic_read(&r->s_generation_counter),
61738+ atomic_read_unchecked(&r->s_generation_counter),
61739 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
61740 SF(s_do_balance), SF(s_unneeded_left_neighbor),
61741 SF(s_good_search_by_key_reada), SF(s_bmaps),
61742diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
61743index f8adaee..0eeeeca 100644
61744--- a/fs/reiserfs/reiserfs.h
61745+++ b/fs/reiserfs/reiserfs.h
61746@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
61747 /* Comment? -Hans */
61748 wait_queue_head_t s_wait;
61749 /* To be obsoleted soon by per buffer seals.. -Hans */
61750- atomic_t s_generation_counter; // increased by one every time the
61751+ atomic_unchecked_t s_generation_counter; // increased by one every time the
61752 // tree gets re-balanced
61753 unsigned long s_properties; /* File system properties. Currently holds
61754 on-disk FS format */
61755@@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
61756 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61757
61758 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
61759-#define get_generation(s) atomic_read (&fs_generation(s))
61760+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
61761 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
61762 #define __fs_changed(gen,s) (gen != get_generation (s))
61763 #define fs_changed(gen,s) \
61764diff --git a/fs/select.c b/fs/select.c
61765index dfd5cb1..1754d57 100644
61766--- a/fs/select.c
61767+++ b/fs/select.c
61768@@ -20,6 +20,7 @@
61769 #include <linux/export.h>
61770 #include <linux/slab.h>
61771 #include <linux/poll.h>
61772+#include <linux/security.h>
61773 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
61774 #include <linux/file.h>
61775 #include <linux/fdtable.h>
61776@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
61777 struct poll_list *walk = head;
61778 unsigned long todo = nfds;
61779
61780+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
61781 if (nfds > rlimit(RLIMIT_NOFILE))
61782 return -EINVAL;
61783
61784diff --git a/fs/seq_file.c b/fs/seq_file.c
61785index a290157..ec3211a 100644
61786--- a/fs/seq_file.c
61787+++ b/fs/seq_file.c
61788@@ -10,6 +10,7 @@
61789 #include <linux/seq_file.h>
61790 #include <linux/slab.h>
61791 #include <linux/cred.h>
61792+#include <linux/sched.h>
61793
61794 #include <asm/uaccess.h>
61795 #include <asm/page.h>
61796@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
61797 #ifdef CONFIG_USER_NS
61798 p->user_ns = file->f_cred->user_ns;
61799 #endif
61800+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61801+ p->exec_id = current->exec_id;
61802+#endif
61803
61804 /*
61805 * Wrappers around seq_open(e.g. swaps_open) need to be
61806@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
61807 return 0;
61808 }
61809 if (!m->buf) {
61810- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
61811+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
61812 if (!m->buf)
61813 return -ENOMEM;
61814 }
61815@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
61816 Eoverflow:
61817 m->op->stop(m, p);
61818 kfree(m->buf);
61819- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
61820+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
61821 return !m->buf ? -ENOMEM : -EAGAIN;
61822 }
61823
61824@@ -152,7 +156,7 @@ Eoverflow:
61825 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
61826 {
61827 struct seq_file *m = file->private_data;
61828- size_t copied = 0;
61829+ ssize_t copied = 0;
61830 loff_t pos;
61831 size_t n;
61832 void *p;
61833@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
61834
61835 /* grab buffer if we didn't have one */
61836 if (!m->buf) {
61837- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
61838+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
61839 if (!m->buf)
61840 goto Enomem;
61841 }
61842@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
61843 goto Fill;
61844 m->op->stop(m, p);
61845 kfree(m->buf);
61846- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
61847+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
61848 if (!m->buf)
61849 goto Enomem;
61850 m->count = 0;
61851@@ -583,7 +587,7 @@ static void single_stop(struct seq_file *p, void *v)
61852 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
61853 void *data)
61854 {
61855- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
61856+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
61857 int res = -ENOMEM;
61858
61859 if (op) {
61860diff --git a/fs/splice.c b/fs/splice.c
61861index 3b7ee65..87fc2e4 100644
61862--- a/fs/splice.c
61863+++ b/fs/splice.c
61864@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
61865 pipe_lock(pipe);
61866
61867 for (;;) {
61868- if (!pipe->readers) {
61869+ if (!atomic_read(&pipe->readers)) {
61870 send_sig(SIGPIPE, current, 0);
61871 if (!ret)
61872 ret = -EPIPE;
61873@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
61874 page_nr++;
61875 ret += buf->len;
61876
61877- if (pipe->files)
61878+ if (atomic_read(&pipe->files))
61879 do_wakeup = 1;
61880
61881 if (!--spd->nr_pages)
61882@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
61883 do_wakeup = 0;
61884 }
61885
61886- pipe->waiting_writers++;
61887+ atomic_inc(&pipe->waiting_writers);
61888 pipe_wait(pipe);
61889- pipe->waiting_writers--;
61890+ atomic_dec(&pipe->waiting_writers);
61891 }
61892
61893 pipe_unlock(pipe);
61894@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
61895 old_fs = get_fs();
61896 set_fs(get_ds());
61897 /* The cast to a user pointer is valid due to the set_fs() */
61898- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
61899+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
61900 set_fs(old_fs);
61901
61902 return res;
61903@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
61904 old_fs = get_fs();
61905 set_fs(get_ds());
61906 /* The cast to a user pointer is valid due to the set_fs() */
61907- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
61908+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
61909 set_fs(old_fs);
61910
61911 return res;
61912@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
61913 goto err;
61914
61915 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
61916- vec[i].iov_base = (void __user *) page_address(page);
61917+ vec[i].iov_base = (void __force_user *) page_address(page);
61918 vec[i].iov_len = this_len;
61919 spd.pages[i] = page;
61920 spd.nr_pages++;
61921@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
61922 ops->release(pipe, buf);
61923 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
61924 pipe->nrbufs--;
61925- if (pipe->files)
61926+ if (atomic_read(&pipe->files))
61927 sd->need_wakeup = true;
61928 }
61929
61930@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
61931 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
61932 {
61933 while (!pipe->nrbufs) {
61934- if (!pipe->writers)
61935+ if (!atomic_read(&pipe->writers))
61936 return 0;
61937
61938- if (!pipe->waiting_writers && sd->num_spliced)
61939+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
61940 return 0;
61941
61942 if (sd->flags & SPLICE_F_NONBLOCK)
61943@@ -1179,7 +1179,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
61944 * out of the pipe right after the splice_to_pipe(). So set
61945 * PIPE_READERS appropriately.
61946 */
61947- pipe->readers = 1;
61948+ atomic_set(&pipe->readers, 1);
61949
61950 current->splice_pipe = pipe;
61951 }
61952@@ -1475,6 +1475,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
61953
61954 partial[buffers].offset = off;
61955 partial[buffers].len = plen;
61956+ partial[buffers].private = 0;
61957
61958 off = 0;
61959 len -= plen;
61960@@ -1777,9 +1778,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
61961 ret = -ERESTARTSYS;
61962 break;
61963 }
61964- if (!pipe->writers)
61965+ if (!atomic_read(&pipe->writers))
61966 break;
61967- if (!pipe->waiting_writers) {
61968+ if (!atomic_read(&pipe->waiting_writers)) {
61969 if (flags & SPLICE_F_NONBLOCK) {
61970 ret = -EAGAIN;
61971 break;
61972@@ -1811,7 +1812,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
61973 pipe_lock(pipe);
61974
61975 while (pipe->nrbufs >= pipe->buffers) {
61976- if (!pipe->readers) {
61977+ if (!atomic_read(&pipe->readers)) {
61978 send_sig(SIGPIPE, current, 0);
61979 ret = -EPIPE;
61980 break;
61981@@ -1824,9 +1825,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
61982 ret = -ERESTARTSYS;
61983 break;
61984 }
61985- pipe->waiting_writers++;
61986+ atomic_inc(&pipe->waiting_writers);
61987 pipe_wait(pipe);
61988- pipe->waiting_writers--;
61989+ atomic_dec(&pipe->waiting_writers);
61990 }
61991
61992 pipe_unlock(pipe);
61993@@ -1862,14 +1863,14 @@ retry:
61994 pipe_double_lock(ipipe, opipe);
61995
61996 do {
61997- if (!opipe->readers) {
61998+ if (!atomic_read(&opipe->readers)) {
61999 send_sig(SIGPIPE, current, 0);
62000 if (!ret)
62001 ret = -EPIPE;
62002 break;
62003 }
62004
62005- if (!ipipe->nrbufs && !ipipe->writers)
62006+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
62007 break;
62008
62009 /*
62010@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
62011 pipe_double_lock(ipipe, opipe);
62012
62013 do {
62014- if (!opipe->readers) {
62015+ if (!atomic_read(&opipe->readers)) {
62016 send_sig(SIGPIPE, current, 0);
62017 if (!ret)
62018 ret = -EPIPE;
62019@@ -2011,7 +2012,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
62020 * return EAGAIN if we have the potential of some data in the
62021 * future, otherwise just return 0
62022 */
62023- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
62024+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
62025 ret = -EAGAIN;
62026
62027 pipe_unlock(ipipe);
62028diff --git a/fs/stat.c b/fs/stat.c
62029index ae0c3ce..9ee641c 100644
62030--- a/fs/stat.c
62031+++ b/fs/stat.c
62032@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
62033 stat->gid = inode->i_gid;
62034 stat->rdev = inode->i_rdev;
62035 stat->size = i_size_read(inode);
62036- stat->atime = inode->i_atime;
62037- stat->mtime = inode->i_mtime;
62038+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
62039+ stat->atime = inode->i_ctime;
62040+ stat->mtime = inode->i_ctime;
62041+ } else {
62042+ stat->atime = inode->i_atime;
62043+ stat->mtime = inode->i_mtime;
62044+ }
62045 stat->ctime = inode->i_ctime;
62046 stat->blksize = (1 << inode->i_blkbits);
62047 stat->blocks = inode->i_blocks;
62048@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
62049 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
62050 {
62051 struct inode *inode = path->dentry->d_inode;
62052+ int retval;
62053
62054- if (inode->i_op->getattr)
62055- return inode->i_op->getattr(path->mnt, path->dentry, stat);
62056+ if (inode->i_op->getattr) {
62057+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
62058+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
62059+ stat->atime = stat->ctime;
62060+ stat->mtime = stat->ctime;
62061+ }
62062+ return retval;
62063+ }
62064
62065 generic_fillattr(inode, stat);
62066 return 0;
62067diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
62068index c590cab..6dfd6fc 100644
62069--- a/fs/sysfs/bin.c
62070+++ b/fs/sysfs/bin.c
62071@@ -234,13 +234,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
62072 return ret;
62073 }
62074
62075-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
62076- void *buf, int len, int write)
62077+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
62078+ void *buf, size_t len, int write)
62079 {
62080 struct file *file = vma->vm_file;
62081 struct bin_buffer *bb = file->private_data;
62082 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
62083- int ret;
62084+ ssize_t ret;
62085
62086 if (!bb->vm_ops)
62087 return -EINVAL;
62088diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
62089index 4d83ced..049dc45 100644
62090--- a/fs/sysfs/dir.c
62091+++ b/fs/sysfs/dir.c
62092@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
62093 *
62094 * Returns 31 bit hash of ns + name (so it fits in an off_t )
62095 */
62096-static unsigned int sysfs_name_hash(const void *ns, const char *name)
62097+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
62098 {
62099 unsigned long hash = init_name_hash();
62100 unsigned int len = strlen(name);
62101@@ -675,6 +675,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
62102 struct sysfs_dirent *sd;
62103 int rc;
62104
62105+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
62106+ const char *parent_name = parent_sd->s_name;
62107+
62108+ mode = S_IFDIR | S_IRWXU;
62109+
62110+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
62111+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
62112+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
62113+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
62114+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
62115+#endif
62116+
62117 /* allocate */
62118 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
62119 if (!sd)
62120diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
62121index 15ef5eb..e474372 100644
62122--- a/fs/sysfs/file.c
62123+++ b/fs/sysfs/file.c
62124@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
62125
62126 struct sysfs_open_dirent {
62127 atomic_t refcnt;
62128- atomic_t event;
62129+ atomic_unchecked_t event;
62130 wait_queue_head_t poll;
62131 struct list_head buffers; /* goes through sysfs_buffer.list */
62132 };
62133@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry *dentry, struct sysfs_buffer *buffer)
62134 if (!sysfs_get_active(attr_sd))
62135 return -ENODEV;
62136
62137- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
62138+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
62139 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
62140
62141 sysfs_put_active(attr_sd);
62142@@ -284,7 +284,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
62143 return -ENOMEM;
62144
62145 atomic_set(&new_od->refcnt, 0);
62146- atomic_set(&new_od->event, 1);
62147+ atomic_set_unchecked(&new_od->event, 1);
62148 init_waitqueue_head(&new_od->poll);
62149 INIT_LIST_HEAD(&new_od->buffers);
62150 goto retry;
62151@@ -430,7 +430,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
62152
62153 sysfs_put_active(attr_sd);
62154
62155- if (buffer->event != atomic_read(&od->event))
62156+ if (buffer->event != atomic_read_unchecked(&od->event))
62157 goto trigger;
62158
62159 return DEFAULT_POLLMASK;
62160@@ -450,7 +450,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
62161 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
62162 od = sd->s_attr.open;
62163 if (od) {
62164- atomic_inc(&od->event);
62165+ atomic_inc_unchecked(&od->event);
62166 wake_up_interruptible(&od->poll);
62167 }
62168 }
62169diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
62170index 2dd4507..62a215a 100644
62171--- a/fs/sysfs/symlink.c
62172+++ b/fs/sysfs/symlink.c
62173@@ -308,7 +308,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
62174 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
62175 void *cookie)
62176 {
62177- char *page = nd_get_link(nd);
62178+ const char *page = nd_get_link(nd);
62179 if (!IS_ERR(page))
62180 free_page((unsigned long)page);
62181 }
62182diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
62183index 69d4889..a810bd4 100644
62184--- a/fs/sysv/sysv.h
62185+++ b/fs/sysv/sysv.h
62186@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
62187 #endif
62188 }
62189
62190-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
62191+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
62192 {
62193 if (sbi->s_bytesex == BYTESEX_PDP)
62194 return PDP_swab((__force __u32)n);
62195diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
62196index e18b988..f1d4ad0f 100644
62197--- a/fs/ubifs/io.c
62198+++ b/fs/ubifs/io.c
62199@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
62200 return err;
62201 }
62202
62203-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
62204+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
62205 {
62206 int err;
62207
62208diff --git a/fs/udf/misc.c b/fs/udf/misc.c
62209index c175b4d..8f36a16 100644
62210--- a/fs/udf/misc.c
62211+++ b/fs/udf/misc.c
62212@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
62213
62214 u8 udf_tag_checksum(const struct tag *t)
62215 {
62216- u8 *data = (u8 *)t;
62217+ const u8 *data = (const u8 *)t;
62218 u8 checksum = 0;
62219 int i;
62220 for (i = 0; i < sizeof(struct tag); ++i)
62221diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
62222index 8d974c4..b82f6ec 100644
62223--- a/fs/ufs/swab.h
62224+++ b/fs/ufs/swab.h
62225@@ -22,7 +22,7 @@ enum {
62226 BYTESEX_BE
62227 };
62228
62229-static inline u64
62230+static inline u64 __intentional_overflow(-1)
62231 fs64_to_cpu(struct super_block *sbp, __fs64 n)
62232 {
62233 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
62234@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
62235 return (__force __fs64)cpu_to_be64(n);
62236 }
62237
62238-static inline u32
62239+static inline u32 __intentional_overflow(-1)
62240 fs32_to_cpu(struct super_block *sbp, __fs32 n)
62241 {
62242 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
62243diff --git a/fs/utimes.c b/fs/utimes.c
62244index f4fb7ec..3fe03c0 100644
62245--- a/fs/utimes.c
62246+++ b/fs/utimes.c
62247@@ -1,6 +1,7 @@
62248 #include <linux/compiler.h>
62249 #include <linux/file.h>
62250 #include <linux/fs.h>
62251+#include <linux/security.h>
62252 #include <linux/linkage.h>
62253 #include <linux/mount.h>
62254 #include <linux/namei.h>
62255@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
62256 goto mnt_drop_write_and_out;
62257 }
62258 }
62259+
62260+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
62261+ error = -EACCES;
62262+ goto mnt_drop_write_and_out;
62263+ }
62264+
62265 mutex_lock(&inode->i_mutex);
62266 error = notify_change(path->dentry, &newattrs);
62267 mutex_unlock(&inode->i_mutex);
62268diff --git a/fs/xattr.c b/fs/xattr.c
62269index 3377dff..f394815 100644
62270--- a/fs/xattr.c
62271+++ b/fs/xattr.c
62272@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
62273 return rc;
62274 }
62275
62276+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
62277+ssize_t
62278+pax_getxattr(struct dentry *dentry, void *value, size_t size)
62279+{
62280+ struct inode *inode = dentry->d_inode;
62281+ ssize_t error;
62282+
62283+ error = inode_permission(inode, MAY_EXEC);
62284+ if (error)
62285+ return error;
62286+
62287+ if (inode->i_op->getxattr)
62288+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
62289+ else
62290+ error = -EOPNOTSUPP;
62291+
62292+ return error;
62293+}
62294+EXPORT_SYMBOL(pax_getxattr);
62295+#endif
62296+
62297 ssize_t
62298 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
62299 {
62300@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
62301 * Extended attribute SET operations
62302 */
62303 static long
62304-setxattr(struct dentry *d, const char __user *name, const void __user *value,
62305+setxattr(struct path *path, const char __user *name, const void __user *value,
62306 size_t size, int flags)
62307 {
62308 int error;
62309@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
62310 posix_acl_fix_xattr_from_user(kvalue, size);
62311 }
62312
62313- error = vfs_setxattr(d, kname, kvalue, size, flags);
62314+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
62315+ error = -EACCES;
62316+ goto out;
62317+ }
62318+
62319+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
62320 out:
62321 if (vvalue)
62322 vfree(vvalue);
62323@@ -377,7 +403,7 @@ retry:
62324 return error;
62325 error = mnt_want_write(path.mnt);
62326 if (!error) {
62327- error = setxattr(path.dentry, name, value, size, flags);
62328+ error = setxattr(&path, name, value, size, flags);
62329 mnt_drop_write(path.mnt);
62330 }
62331 path_put(&path);
62332@@ -401,7 +427,7 @@ retry:
62333 return error;
62334 error = mnt_want_write(path.mnt);
62335 if (!error) {
62336- error = setxattr(path.dentry, name, value, size, flags);
62337+ error = setxattr(&path, name, value, size, flags);
62338 mnt_drop_write(path.mnt);
62339 }
62340 path_put(&path);
62341@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
62342 const void __user *,value, size_t, size, int, flags)
62343 {
62344 struct fd f = fdget(fd);
62345- struct dentry *dentry;
62346 int error = -EBADF;
62347
62348 if (!f.file)
62349 return error;
62350- dentry = f.file->f_path.dentry;
62351- audit_inode(NULL, dentry, 0);
62352+ audit_inode(NULL, f.file->f_path.dentry, 0);
62353 error = mnt_want_write_file(f.file);
62354 if (!error) {
62355- error = setxattr(dentry, name, value, size, flags);
62356+ error = setxattr(&f.file->f_path, name, value, size, flags);
62357 mnt_drop_write_file(f.file);
62358 }
62359 fdput(f);
62360@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
62361 * Extended attribute REMOVE operations
62362 */
62363 static long
62364-removexattr(struct dentry *d, const char __user *name)
62365+removexattr(struct path *path, const char __user *name)
62366 {
62367 int error;
62368 char kname[XATTR_NAME_MAX + 1];
62369@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
62370 if (error < 0)
62371 return error;
62372
62373- return vfs_removexattr(d, kname);
62374+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
62375+ return -EACCES;
62376+
62377+ return vfs_removexattr(path->dentry, kname);
62378 }
62379
62380 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
62381@@ -652,7 +679,7 @@ retry:
62382 return error;
62383 error = mnt_want_write(path.mnt);
62384 if (!error) {
62385- error = removexattr(path.dentry, name);
62386+ error = removexattr(&path, name);
62387 mnt_drop_write(path.mnt);
62388 }
62389 path_put(&path);
62390@@ -675,7 +702,7 @@ retry:
62391 return error;
62392 error = mnt_want_write(path.mnt);
62393 if (!error) {
62394- error = removexattr(path.dentry, name);
62395+ error = removexattr(&path, name);
62396 mnt_drop_write(path.mnt);
62397 }
62398 path_put(&path);
62399@@ -689,16 +716,16 @@ retry:
62400 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
62401 {
62402 struct fd f = fdget(fd);
62403- struct dentry *dentry;
62404+ struct path *path;
62405 int error = -EBADF;
62406
62407 if (!f.file)
62408 return error;
62409- dentry = f.file->f_path.dentry;
62410- audit_inode(NULL, dentry, 0);
62411+ path = &f.file->f_path;
62412+ audit_inode(NULL, path->dentry, 0);
62413 error = mnt_want_write_file(f.file);
62414 if (!error) {
62415- error = removexattr(dentry, name);
62416+ error = removexattr(path, name);
62417 mnt_drop_write_file(f.file);
62418 }
62419 fdput(f);
62420diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
62421index 9fbea87..6b19972 100644
62422--- a/fs/xattr_acl.c
62423+++ b/fs/xattr_acl.c
62424@@ -76,8 +76,8 @@ struct posix_acl *
62425 posix_acl_from_xattr(struct user_namespace *user_ns,
62426 const void *value, size_t size)
62427 {
62428- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
62429- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
62430+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
62431+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
62432 int count;
62433 struct posix_acl *acl;
62434 struct posix_acl_entry *acl_e;
62435diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
62436index f47e65c..e7125d9 100644
62437--- a/fs/xfs/xfs_bmap.c
62438+++ b/fs/xfs/xfs_bmap.c
62439@@ -586,7 +586,7 @@ xfs_bmap_validate_ret(
62440
62441 #else
62442 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
62443-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
62444+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
62445 #endif /* DEBUG */
62446
62447 /*
62448diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
62449index 8f84153..7ce60d0 100644
62450--- a/fs/xfs/xfs_dir2_readdir.c
62451+++ b/fs/xfs/xfs_dir2_readdir.c
62452@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
62453 ino = xfs_dir3_sfe_get_ino(mp, sfp, sfep);
62454 filetype = xfs_dir3_sfe_get_ftype(mp, sfp, sfep);
62455 ctx->pos = off & 0x7fffffff;
62456- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
62457+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
62458+ char name[sfep->namelen];
62459+ memcpy(name, sfep->name, sfep->namelen);
62460+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
62461+ return 0;
62462+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
62463 xfs_dir3_get_dtype(mp, filetype)))
62464 return 0;
62465 sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
62466diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
62467index 8c8ef24..689f742 100644
62468--- a/fs/xfs/xfs_ioctl.c
62469+++ b/fs/xfs/xfs_ioctl.c
62470@@ -127,7 +127,7 @@ xfs_find_handle(
62471 }
62472
62473 error = -EFAULT;
62474- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
62475+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
62476 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
62477 goto out_put;
62478
62479diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
62480index 2b8952d..a60c6be 100644
62481--- a/fs/xfs/xfs_iops.c
62482+++ b/fs/xfs/xfs_iops.c
62483@@ -401,7 +401,7 @@ xfs_vn_put_link(
62484 struct nameidata *nd,
62485 void *p)
62486 {
62487- char *s = nd_get_link(nd);
62488+ const char *s = nd_get_link(nd);
62489
62490 if (!IS_ERR(s))
62491 kfree(s);
62492diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
62493new file mode 100644
62494index 0000000..a78d810
62495--- /dev/null
62496+++ b/grsecurity/Kconfig
62497@@ -0,0 +1,1107 @@
62498+#
62499+# grecurity configuration
62500+#
62501+menu "Memory Protections"
62502+depends on GRKERNSEC
62503+
62504+config GRKERNSEC_KMEM
62505+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
62506+ default y if GRKERNSEC_CONFIG_AUTO
62507+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
62508+ help
62509+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
62510+ be written to or read from to modify or leak the contents of the running
62511+ kernel. /dev/port will also not be allowed to be opened, and support
62512+ for /dev/cpu/*/msr and kexec will be removed. If you have module
62513+ support disabled, enabling this will close up six ways that are
62514+ currently used to insert malicious code into the running kernel.
62515+
62516+ Even with this feature enabled, we still highly recommend that
62517+ you use the RBAC system, as it is still possible for an attacker to
62518+ modify the running kernel through other more obscure methods.
62519+
62520+ Enabling this feature will prevent the "cpupower" and "powertop" tools
62521+ from working.
62522+
62523+ It is highly recommended that you say Y here if you meet all the
62524+ conditions above.
62525+
62526+config GRKERNSEC_VM86
62527+ bool "Restrict VM86 mode"
62528+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
62529+ depends on X86_32
62530+
62531+ help
62532+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
62533+ make use of a special execution mode on 32bit x86 processors called
62534+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
62535+ video cards and will still work with this option enabled. The purpose
62536+ of the option is to prevent exploitation of emulation errors in
62537+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
62538+ Nearly all users should be able to enable this option.
62539+
62540+config GRKERNSEC_IO
62541+ bool "Disable privileged I/O"
62542+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
62543+ depends on X86
62544+ select RTC_CLASS
62545+ select RTC_INTF_DEV
62546+ select RTC_DRV_CMOS
62547+
62548+ help
62549+ If you say Y here, all ioperm and iopl calls will return an error.
62550+ Ioperm and iopl can be used to modify the running kernel.
62551+ Unfortunately, some programs need this access to operate properly,
62552+ the most notable of which are XFree86 and hwclock. hwclock can be
62553+ remedied by having RTC support in the kernel, so real-time
62554+ clock support is enabled if this option is enabled, to ensure
62555+ that hwclock operates correctly.
62556+
62557+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
62558+ you may not be able to boot into a graphical environment with this
62559+ option enabled. In this case, you should use the RBAC system instead.
62560+
62561+config GRKERNSEC_JIT_HARDEN
62562+ bool "Harden BPF JIT against spray attacks"
62563+ default y if GRKERNSEC_CONFIG_AUTO
62564+ depends on BPF_JIT
62565+ help
62566+ If you say Y here, the native code generated by the kernel's Berkeley
62567+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
62568+ attacks that attempt to fit attacker-beneficial instructions in
62569+ 32bit immediate fields of JIT-generated native instructions. The
62570+ attacker will generally aim to cause an unintended instruction sequence
62571+ of JIT-generated native code to execute by jumping into the middle of
62572+ a generated instruction. This feature effectively randomizes the 32bit
62573+ immediate constants present in the generated code to thwart such attacks.
62574+
62575+ If you're using KERNEXEC, it's recommended that you enable this option
62576+ to supplement the hardening of the kernel.
62577+
62578+config GRKERNSEC_PERF_HARDEN
62579+ bool "Disable unprivileged PERF_EVENTS usage by default"
62580+ default y if GRKERNSEC_CONFIG_AUTO
62581+ depends on PERF_EVENTS
62582+ help
62583+ If you say Y here, the range of acceptable values for the
62584+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
62585+ default to a new value: 3. When the sysctl is set to this value, no
62586+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
62587+
62588+ Though PERF_EVENTS can be used legitimately for performance monitoring
62589+ and low-level application profiling, it is forced on regardless of
62590+ configuration, has been at fault for several vulnerabilities, and
62591+ creates new opportunities for side channels and other information leaks.
62592+
62593+ This feature puts PERF_EVENTS into a secure default state and permits
62594+ the administrator to change out of it temporarily if unprivileged
62595+ application profiling is needed.
62596+
62597+config GRKERNSEC_RAND_THREADSTACK
62598+ bool "Insert random gaps between thread stacks"
62599+ default y if GRKERNSEC_CONFIG_AUTO
62600+ depends on PAX_RANDMMAP && !PPC
62601+ help
62602+ If you say Y here, a random-sized gap will be enforced between allocated
62603+ thread stacks. Glibc's NPTL and other threading libraries that
62604+ pass MAP_STACK to the kernel for thread stack allocation are supported.
62605+ The implementation currently provides 8 bits of entropy for the gap.
62606+
62607+ Many distributions do not compile threaded remote services with the
62608+ -fstack-check argument to GCC, causing the variable-sized stack-based
62609+ allocator, alloca(), to not probe the stack on allocation. This
62610+ permits an unbounded alloca() to skip over any guard page and potentially
62611+ modify another thread's stack reliably. An enforced random gap
62612+ reduces the reliability of such an attack and increases the chance
62613+ that such a read/write to another thread's stack instead lands in
62614+ an unmapped area, causing a crash and triggering grsecurity's
62615+ anti-bruteforcing logic.
62616+
62617+config GRKERNSEC_PROC_MEMMAP
62618+ bool "Harden ASLR against information leaks and entropy reduction"
62619+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
62620+ depends on PAX_NOEXEC || PAX_ASLR
62621+ help
62622+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
62623+ give no information about the addresses of its mappings if
62624+ PaX features that rely on random addresses are enabled on the task.
62625+ In addition to sanitizing this information and disabling other
62626+ dangerous sources of information, this option causes reads of sensitive
62627+ /proc/<pid> entries where the file descriptor was opened in a different
62628+ task than the one performing the read. Such attempts are logged.
62629+ This option also limits argv/env strings for suid/sgid binaries
62630+ to 512KB to prevent a complete exhaustion of the stack entropy provided
62631+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
62632+ binaries to prevent alternative mmap layouts from being abused.
62633+
62634+ If you use PaX it is essential that you say Y here as it closes up
62635+ several holes that make full ASLR useless locally.
62636+
62637+config GRKERNSEC_BRUTE
62638+ bool "Deter exploit bruteforcing"
62639+ default y if GRKERNSEC_CONFIG_AUTO
62640+ help
62641+ If you say Y here, attempts to bruteforce exploits against forking
62642+ daemons such as apache or sshd, as well as against suid/sgid binaries
62643+ will be deterred. When a child of a forking daemon is killed by PaX
62644+ or crashes due to an illegal instruction or other suspicious signal,
62645+ the parent process will be delayed 30 seconds upon every subsequent
62646+ fork until the administrator is able to assess the situation and
62647+ restart the daemon.
62648+ In the suid/sgid case, the attempt is logged, the user has all their
62649+ existing instances of the suid/sgid binary terminated and will
62650+ be unable to execute any suid/sgid binaries for 15 minutes.
62651+
62652+ It is recommended that you also enable signal logging in the auditing
62653+ section so that logs are generated when a process triggers a suspicious
62654+ signal.
62655+ If the sysctl option is enabled, a sysctl option with name
62656+ "deter_bruteforce" is created.
62657+
62658+
62659+config GRKERNSEC_MODHARDEN
62660+ bool "Harden module auto-loading"
62661+ default y if GRKERNSEC_CONFIG_AUTO
62662+ depends on MODULES
62663+ help
62664+ If you say Y here, module auto-loading in response to use of some
62665+ feature implemented by an unloaded module will be restricted to
62666+ root users. Enabling this option helps defend against attacks
62667+ by unprivileged users who abuse the auto-loading behavior to
62668+ cause a vulnerable module to load that is then exploited.
62669+
62670+ If this option prevents a legitimate use of auto-loading for a
62671+ non-root user, the administrator can execute modprobe manually
62672+ with the exact name of the module mentioned in the alert log.
62673+ Alternatively, the administrator can add the module to the list
62674+ of modules loaded at boot by modifying init scripts.
62675+
62676+ Modification of init scripts will most likely be needed on
62677+ Ubuntu servers with encrypted home directory support enabled,
62678+ as the first non-root user logging in will cause the ecb(aes),
62679+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
62680+
62681+config GRKERNSEC_HIDESYM
62682+ bool "Hide kernel symbols"
62683+ default y if GRKERNSEC_CONFIG_AUTO
62684+ select PAX_USERCOPY_SLABS
62685+ help
62686+ If you say Y here, getting information on loaded modules, and
62687+ displaying all kernel symbols through a syscall will be restricted
62688+ to users with CAP_SYS_MODULE. For software compatibility reasons,
62689+ /proc/kallsyms will be restricted to the root user. The RBAC
62690+ system can hide that entry even from root.
62691+
62692+ This option also prevents leaking of kernel addresses through
62693+ several /proc entries.
62694+
62695+ Note that this option is only effective provided the following
62696+ conditions are met:
62697+ 1) The kernel using grsecurity is not precompiled by some distribution
62698+ 2) You have also enabled GRKERNSEC_DMESG
62699+ 3) You are using the RBAC system and hiding other files such as your
62700+ kernel image and System.map. Alternatively, enabling this option
62701+ causes the permissions on /boot, /lib/modules, and the kernel
62702+ source directory to change at compile time to prevent
62703+ reading by non-root users.
62704+ If the above conditions are met, this option will aid in providing a
62705+ useful protection against local kernel exploitation of overflows
62706+ and arbitrary read/write vulnerabilities.
62707+
62708+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
62709+ in addition to this feature.
62710+
62711+config GRKERNSEC_KERN_LOCKOUT
62712+ bool "Active kernel exploit response"
62713+ default y if GRKERNSEC_CONFIG_AUTO
62714+ depends on X86 || ARM || PPC || SPARC
62715+ help
62716+ If you say Y here, when a PaX alert is triggered due to suspicious
62717+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
62718+ or an OOPS occurs due to bad memory accesses, instead of just
62719+ terminating the offending process (and potentially allowing
62720+ a subsequent exploit from the same user), we will take one of two
62721+ actions:
62722+ If the user was root, we will panic the system
62723+ If the user was non-root, we will log the attempt, terminate
62724+ all processes owned by the user, then prevent them from creating
62725+ any new processes until the system is restarted
62726+ This deters repeated kernel exploitation/bruteforcing attempts
62727+ and is useful for later forensics.
62728+
62729+config GRKERNSEC_OLD_ARM_USERLAND
62730+ bool "Old ARM userland compatibility"
62731+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
62732+ help
62733+ If you say Y here, stubs of executable code to perform such operations
62734+ as "compare-exchange" will be placed at fixed locations in the ARM vector
62735+ table. This is unfortunately needed for old ARM userland meant to run
62736+ across a wide range of processors. Without this option enabled,
62737+ the get_tls and data memory barrier stubs will be emulated by the kernel,
62738+ which is enough for Linaro userlands or other userlands designed for v6
62739+ and newer ARM CPUs. It's recommended that you try without this option enabled
62740+ first, and only enable it if your userland does not boot (it will likely fail
62741+ at init time).
62742+
62743+endmenu
62744+menu "Role Based Access Control Options"
62745+depends on GRKERNSEC
62746+
62747+config GRKERNSEC_RBAC_DEBUG
62748+ bool
62749+
62750+config GRKERNSEC_NO_RBAC
62751+ bool "Disable RBAC system"
62752+ help
62753+ If you say Y here, the /dev/grsec device will be removed from the kernel,
62754+ preventing the RBAC system from being enabled. You should only say Y
62755+ here if you have no intention of using the RBAC system, so as to prevent
62756+ an attacker with root access from misusing the RBAC system to hide files
62757+ and processes when loadable module support and /dev/[k]mem have been
62758+ locked down.
62759+
62760+config GRKERNSEC_ACL_HIDEKERN
62761+ bool "Hide kernel processes"
62762+ help
62763+ If you say Y here, all kernel threads will be hidden to all
62764+ processes but those whose subject has the "view hidden processes"
62765+ flag.
62766+
62767+config GRKERNSEC_ACL_MAXTRIES
62768+ int "Maximum tries before password lockout"
62769+ default 3
62770+ help
62771+ This option enforces the maximum number of times a user can attempt
62772+ to authorize themselves with the grsecurity RBAC system before being
62773+ denied the ability to attempt authorization again for a specified time.
62774+ The lower the number, the harder it will be to brute-force a password.
62775+
62776+config GRKERNSEC_ACL_TIMEOUT
62777+ int "Time to wait after max password tries, in seconds"
62778+ default 30
62779+ help
62780+ This option specifies the time the user must wait after attempting to
62781+ authorize to the RBAC system with the maximum number of invalid
62782+ passwords. The higher the number, the harder it will be to brute-force
62783+ a password.
62784+
62785+endmenu
62786+menu "Filesystem Protections"
62787+depends on GRKERNSEC
62788+
62789+config GRKERNSEC_PROC
62790+ bool "Proc restrictions"
62791+ default y if GRKERNSEC_CONFIG_AUTO
62792+ help
62793+ If you say Y here, the permissions of the /proc filesystem
62794+ will be altered to enhance system security and privacy. You MUST
62795+ choose either a user only restriction or a user and group restriction.
62796+ Depending upon the option you choose, you can either restrict users to
62797+ see only the processes they themselves run, or choose a group that can
62798+ view all processes and files normally restricted to root if you choose
62799+ the "restrict to user only" option. NOTE: If you're running identd or
62800+ ntpd as a non-root user, you will have to run it as the group you
62801+ specify here.
62802+
62803+config GRKERNSEC_PROC_USER
62804+ bool "Restrict /proc to user only"
62805+ depends on GRKERNSEC_PROC
62806+ help
62807+ If you say Y here, non-root users will only be able to view their own
62808+ processes, and restricts them from viewing network-related information,
62809+ and viewing kernel symbol and module information.
62810+
62811+config GRKERNSEC_PROC_USERGROUP
62812+ bool "Allow special group"
62813+ default y if GRKERNSEC_CONFIG_AUTO
62814+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
62815+ help
62816+ If you say Y here, you will be able to select a group that will be
62817+ able to view all processes and network-related information. If you've
62818+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
62819+ remain hidden. This option is useful if you want to run identd as
62820+ a non-root user. The group you select may also be chosen at boot time
62821+ via "grsec_proc_gid=" on the kernel commandline.
62822+
62823+config GRKERNSEC_PROC_GID
62824+ int "GID for special group"
62825+ depends on GRKERNSEC_PROC_USERGROUP
62826+ default 1001
62827+
62828+config GRKERNSEC_PROC_ADD
62829+ bool "Additional restrictions"
62830+ default y if GRKERNSEC_CONFIG_AUTO
62831+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
62832+ help
62833+ If you say Y here, additional restrictions will be placed on
62834+ /proc that keep normal users from viewing device information and
62835+ slabinfo information that could be useful for exploits.
62836+
62837+config GRKERNSEC_LINK
62838+ bool "Linking restrictions"
62839+ default y if GRKERNSEC_CONFIG_AUTO
62840+ help
62841+ If you say Y here, /tmp race exploits will be prevented, since users
62842+ will no longer be able to follow symlinks owned by other users in
62843+ world-writable +t directories (e.g. /tmp), unless the owner of the
62844+ symlink is the owner of the directory. users will also not be
62845+ able to hardlink to files they do not own. If the sysctl option is
62846+ enabled, a sysctl option with name "linking_restrictions" is created.
62847+
62848+config GRKERNSEC_SYMLINKOWN
62849+ bool "Kernel-enforced SymlinksIfOwnerMatch"
62850+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
62851+ help
62852+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
62853+ that prevents it from being used as a security feature. As Apache
62854+ verifies the symlink by performing a stat() against the target of
62855+ the symlink before it is followed, an attacker can setup a symlink
62856+ to point to a same-owned file, then replace the symlink with one
62857+ that targets another user's file just after Apache "validates" the
62858+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
62859+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
62860+ will be in place for the group you specify. If the sysctl option
62861+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
62862+ created.
62863+
62864+config GRKERNSEC_SYMLINKOWN_GID
62865+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
62866+ depends on GRKERNSEC_SYMLINKOWN
62867+ default 1006
62868+ help
62869+ Setting this GID determines what group kernel-enforced
62870+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
62871+ is enabled, a sysctl option with name "symlinkown_gid" is created.
62872+
62873+config GRKERNSEC_FIFO
62874+ bool "FIFO restrictions"
62875+ default y if GRKERNSEC_CONFIG_AUTO
62876+ help
62877+ If you say Y here, users will not be able to write to FIFOs they don't
62878+ own in world-writable +t directories (e.g. /tmp), unless the owner of
62879+ the FIFO is the same owner of the directory it's held in. If the sysctl
62880+ option is enabled, a sysctl option with name "fifo_restrictions" is
62881+ created.
62882+
62883+config GRKERNSEC_SYSFS_RESTRICT
62884+ bool "Sysfs/debugfs restriction"
62885+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
62886+ depends on SYSFS
62887+ help
62888+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
62889+ any filesystem normally mounted under it (e.g. debugfs) will be
62890+ mostly accessible only by root. These filesystems generally provide access
62891+ to hardware and debug information that isn't appropriate for unprivileged
62892+ users of the system. Sysfs and debugfs have also become a large source
62893+ of new vulnerabilities, ranging from infoleaks to local compromise.
62894+ There has been very little oversight with an eye toward security involved
62895+ in adding new exporters of information to these filesystems, so their
62896+ use is discouraged.
62897+ For reasons of compatibility, a few directories have been whitelisted
62898+ for access by non-root users:
62899+ /sys/fs/selinux
62900+ /sys/fs/fuse
62901+ /sys/devices/system/cpu
62902+
62903+config GRKERNSEC_ROFS
62904+ bool "Runtime read-only mount protection"
62905+ depends on SYSCTL
62906+ help
62907+ If you say Y here, a sysctl option with name "romount_protect" will
62908+ be created. By setting this option to 1 at runtime, filesystems
62909+ will be protected in the following ways:
62910+ * No new writable mounts will be allowed
62911+ * Existing read-only mounts won't be able to be remounted read/write
62912+ * Write operations will be denied on all block devices
62913+ This option acts independently of grsec_lock: once it is set to 1,
62914+ it cannot be turned off. Therefore, please be mindful of the resulting
62915+ behavior if this option is enabled in an init script on a read-only
62916+ filesystem. This feature is mainly intended for secure embedded systems.
62917+
62918+config GRKERNSEC_DEVICE_SIDECHANNEL
62919+ bool "Eliminate stat/notify-based device sidechannels"
62920+ default y if GRKERNSEC_CONFIG_AUTO
62921+ help
62922+ If you say Y here, timing analyses on block or character
62923+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
62924+ will be thwarted for unprivileged users. If a process without
62925+ CAP_MKNOD stats such a device, the last access and last modify times
62926+ will match the device's create time. No access or modify events
62927+ will be triggered through inotify/dnotify/fanotify for such devices.
62928+ This feature will prevent attacks that may at a minimum
62929+ allow an attacker to determine the administrator's password length.
62930+
62931+config GRKERNSEC_CHROOT
62932+ bool "Chroot jail restrictions"
62933+ default y if GRKERNSEC_CONFIG_AUTO
62934+ help
62935+ If you say Y here, you will be able to choose several options that will
62936+ make breaking out of a chrooted jail much more difficult. If you
62937+ encounter no software incompatibilities with the following options, it
62938+ is recommended that you enable each one.
62939+
62940+config GRKERNSEC_CHROOT_MOUNT
62941+ bool "Deny mounts"
62942+ default y if GRKERNSEC_CONFIG_AUTO
62943+ depends on GRKERNSEC_CHROOT
62944+ help
62945+ If you say Y here, processes inside a chroot will not be able to
62946+ mount or remount filesystems. If the sysctl option is enabled, a
62947+ sysctl option with name "chroot_deny_mount" is created.
62948+
62949+config GRKERNSEC_CHROOT_DOUBLE
62950+ bool "Deny double-chroots"
62951+ default y if GRKERNSEC_CONFIG_AUTO
62952+ depends on GRKERNSEC_CHROOT
62953+ help
62954+ If you say Y here, processes inside a chroot will not be able to chroot
62955+ again outside the chroot. This is a widely used method of breaking
62956+ out of a chroot jail and should not be allowed. If the sysctl
62957+ option is enabled, a sysctl option with name
62958+ "chroot_deny_chroot" is created.
62959+
62960+config GRKERNSEC_CHROOT_PIVOT
62961+ bool "Deny pivot_root in chroot"
62962+ default y if GRKERNSEC_CONFIG_AUTO
62963+ depends on GRKERNSEC_CHROOT
62964+ help
62965+ If you say Y here, processes inside a chroot will not be able to use
62966+ a function called pivot_root() that was introduced in Linux 2.3.41. It
62967+ works similar to chroot in that it changes the root filesystem. This
62968+ function could be misused in a chrooted process to attempt to break out
62969+ of the chroot, and therefore should not be allowed. If the sysctl
62970+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
62971+ created.
62972+
62973+config GRKERNSEC_CHROOT_CHDIR
62974+ bool "Enforce chdir(\"/\") on all chroots"
62975+ default y if GRKERNSEC_CONFIG_AUTO
62976+ depends on GRKERNSEC_CHROOT
62977+ help
62978+ If you say Y here, the current working directory of all newly-chrooted
62979+ applications will be set to the the root directory of the chroot.
62980+ The man page on chroot(2) states:
62981+ Note that this call does not change the current working
62982+ directory, so that `.' can be outside the tree rooted at
62983+ `/'. In particular, the super-user can escape from a
62984+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
62985+
62986+ It is recommended that you say Y here, since it's not known to break
62987+ any software. If the sysctl option is enabled, a sysctl option with
62988+ name "chroot_enforce_chdir" is created.
62989+
62990+config GRKERNSEC_CHROOT_CHMOD
62991+ bool "Deny (f)chmod +s"
62992+ default y if GRKERNSEC_CONFIG_AUTO
62993+ depends on GRKERNSEC_CHROOT
62994+ help
62995+ If you say Y here, processes inside a chroot will not be able to chmod
62996+ or fchmod files to make them have suid or sgid bits. This protects
62997+ against another published method of breaking a chroot. If the sysctl
62998+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
62999+ created.
63000+
63001+config GRKERNSEC_CHROOT_FCHDIR
63002+ bool "Deny fchdir out of chroot"
63003+ default y if GRKERNSEC_CONFIG_AUTO
63004+ depends on GRKERNSEC_CHROOT
63005+ help
63006+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
63007+ to a file descriptor of the chrooting process that points to a directory
63008+ outside the filesystem will be stopped. If the sysctl option
63009+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
63010+
63011+config GRKERNSEC_CHROOT_MKNOD
63012+ bool "Deny mknod"
63013+ default y if GRKERNSEC_CONFIG_AUTO
63014+ depends on GRKERNSEC_CHROOT
63015+ help
63016+ If you say Y here, processes inside a chroot will not be allowed to
63017+ mknod. The problem with using mknod inside a chroot is that it
63018+ would allow an attacker to create a device entry that is the same
63019+ as one on the physical root of your system, which could range from
63020+ anything from the console device to a device for your harddrive (which
63021+ they could then use to wipe the drive or steal data). It is recommended
63022+ that you say Y here, unless you run into software incompatibilities.
63023+ If the sysctl option is enabled, a sysctl option with name
63024+ "chroot_deny_mknod" is created.
63025+
63026+config GRKERNSEC_CHROOT_SHMAT
63027+ bool "Deny shmat() out of chroot"
63028+ default y if GRKERNSEC_CONFIG_AUTO
63029+ depends on GRKERNSEC_CHROOT
63030+ help
63031+ If you say Y here, processes inside a chroot will not be able to attach
63032+ to shared memory segments that were created outside of the chroot jail.
63033+ It is recommended that you say Y here. If the sysctl option is enabled,
63034+ a sysctl option with name "chroot_deny_shmat" is created.
63035+
63036+config GRKERNSEC_CHROOT_UNIX
63037+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
63038+ default y if GRKERNSEC_CONFIG_AUTO
63039+ depends on GRKERNSEC_CHROOT
63040+ help
63041+ If you say Y here, processes inside a chroot will not be able to
63042+ connect to abstract (meaning not belonging to a filesystem) Unix
63043+ domain sockets that were bound outside of a chroot. It is recommended
63044+ that you say Y here. If the sysctl option is enabled, a sysctl option
63045+ with name "chroot_deny_unix" is created.
63046+
63047+config GRKERNSEC_CHROOT_FINDTASK
63048+ bool "Protect outside processes"
63049+ default y if GRKERNSEC_CONFIG_AUTO
63050+ depends on GRKERNSEC_CHROOT
63051+ help
63052+ If you say Y here, processes inside a chroot will not be able to
63053+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
63054+ getsid, or view any process outside of the chroot. If the sysctl
63055+ option is enabled, a sysctl option with name "chroot_findtask" is
63056+ created.
63057+
63058+config GRKERNSEC_CHROOT_NICE
63059+ bool "Restrict priority changes"
63060+ default y if GRKERNSEC_CONFIG_AUTO
63061+ depends on GRKERNSEC_CHROOT
63062+ help
63063+ If you say Y here, processes inside a chroot will not be able to raise
63064+ the priority of processes in the chroot, or alter the priority of
63065+ processes outside the chroot. This provides more security than simply
63066+ removing CAP_SYS_NICE from the process' capability set. If the
63067+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
63068+ is created.
63069+
63070+config GRKERNSEC_CHROOT_SYSCTL
63071+ bool "Deny sysctl writes"
63072+ default y if GRKERNSEC_CONFIG_AUTO
63073+ depends on GRKERNSEC_CHROOT
63074+ help
63075+ If you say Y here, an attacker in a chroot will not be able to
63076+ write to sysctl entries, either by sysctl(2) or through a /proc
63077+ interface. It is strongly recommended that you say Y here. If the
63078+ sysctl option is enabled, a sysctl option with name
63079+ "chroot_deny_sysctl" is created.
63080+
63081+config GRKERNSEC_CHROOT_CAPS
63082+ bool "Capability restrictions"
63083+ default y if GRKERNSEC_CONFIG_AUTO
63084+ depends on GRKERNSEC_CHROOT
63085+ help
63086+ If you say Y here, the capabilities on all processes within a
63087+ chroot jail will be lowered to stop module insertion, raw i/o,
63088+ system and net admin tasks, rebooting the system, modifying immutable
63089+ files, modifying IPC owned by another, and changing the system time.
63090+ This is left an option because it can break some apps. Disable this
63091+ if your chrooted apps are having problems performing those kinds of
63092+ tasks. If the sysctl option is enabled, a sysctl option with
63093+ name "chroot_caps" is created.
63094+
63095+config GRKERNSEC_CHROOT_INITRD
63096+ bool "Exempt initrd tasks from restrictions"
63097+ default y if GRKERNSEC_CONFIG_AUTO
63098+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
63099+ help
63100+ If you say Y here, tasks started prior to init will be exempted from
63101+ grsecurity's chroot restrictions. This option is mainly meant to
63102+ resolve Plymouth's performing privileged operations unnecessarily
63103+ in a chroot.
63104+
63105+endmenu
63106+menu "Kernel Auditing"
63107+depends on GRKERNSEC
63108+
63109+config GRKERNSEC_AUDIT_GROUP
63110+ bool "Single group for auditing"
63111+ help
63112+ If you say Y here, the exec and chdir logging features will only operate
63113+ on a group you specify. This option is recommended if you only want to
63114+ watch certain users instead of having a large amount of logs from the
63115+ entire system. If the sysctl option is enabled, a sysctl option with
63116+ name "audit_group" is created.
63117+
63118+config GRKERNSEC_AUDIT_GID
63119+ int "GID for auditing"
63120+ depends on GRKERNSEC_AUDIT_GROUP
63121+ default 1007
63122+
63123+config GRKERNSEC_EXECLOG
63124+ bool "Exec logging"
63125+ help
63126+ If you say Y here, all execve() calls will be logged (since the
63127+ other exec*() calls are frontends to execve(), all execution
63128+ will be logged). Useful for shell-servers that like to keep track
63129+ of their users. If the sysctl option is enabled, a sysctl option with
63130+ name "exec_logging" is created.
63131+ WARNING: This option when enabled will produce a LOT of logs, especially
63132+ on an active system.
63133+
63134+config GRKERNSEC_RESLOG
63135+ bool "Resource logging"
63136+ default y if GRKERNSEC_CONFIG_AUTO
63137+ help
63138+ If you say Y here, all attempts to overstep resource limits will
63139+ be logged with the resource name, the requested size, and the current
63140+ limit. It is highly recommended that you say Y here. If the sysctl
63141+ option is enabled, a sysctl option with name "resource_logging" is
63142+ created. If the RBAC system is enabled, the sysctl value is ignored.
63143+
63144+config GRKERNSEC_CHROOT_EXECLOG
63145+ bool "Log execs within chroot"
63146+ help
63147+ If you say Y here, all executions inside a chroot jail will be logged
63148+ to syslog. This can cause a large amount of logs if certain
63149+ applications (eg. djb's daemontools) are installed on the system, and
63150+ is therefore left as an option. If the sysctl option is enabled, a
63151+ sysctl option with name "chroot_execlog" is created.
63152+
63153+config GRKERNSEC_AUDIT_PTRACE
63154+ bool "Ptrace logging"
63155+ help
63156+ If you say Y here, all attempts to attach to a process via ptrace
63157+ will be logged. If the sysctl option is enabled, a sysctl option
63158+ with name "audit_ptrace" is created.
63159+
63160+config GRKERNSEC_AUDIT_CHDIR
63161+ bool "Chdir logging"
63162+ help
63163+ If you say Y here, all chdir() calls will be logged. If the sysctl
63164+ option is enabled, a sysctl option with name "audit_chdir" is created.
63165+
63166+config GRKERNSEC_AUDIT_MOUNT
63167+ bool "(Un)Mount logging"
63168+ help
63169+ If you say Y here, all mounts and unmounts will be logged. If the
63170+ sysctl option is enabled, a sysctl option with name "audit_mount" is
63171+ created.
63172+
63173+config GRKERNSEC_SIGNAL
63174+ bool "Signal logging"
63175+ default y if GRKERNSEC_CONFIG_AUTO
63176+ help
63177+ If you say Y here, certain important signals will be logged, such as
63178+ SIGSEGV, which will as a result inform you of when a error in a program
63179+ occurred, which in some cases could mean a possible exploit attempt.
63180+ If the sysctl option is enabled, a sysctl option with name
63181+ "signal_logging" is created.
63182+
63183+config GRKERNSEC_FORKFAIL
63184+ bool "Fork failure logging"
63185+ help
63186+ If you say Y here, all failed fork() attempts will be logged.
63187+ This could suggest a fork bomb, or someone attempting to overstep
63188+ their process limit. If the sysctl option is enabled, a sysctl option
63189+ with name "forkfail_logging" is created.
63190+
63191+config GRKERNSEC_TIME
63192+ bool "Time change logging"
63193+ default y if GRKERNSEC_CONFIG_AUTO
63194+ help
63195+ If you say Y here, any changes of the system clock will be logged.
63196+ If the sysctl option is enabled, a sysctl option with name
63197+ "timechange_logging" is created.
63198+
63199+config GRKERNSEC_PROC_IPADDR
63200+ bool "/proc/<pid>/ipaddr support"
63201+ default y if GRKERNSEC_CONFIG_AUTO
63202+ help
63203+ If you say Y here, a new entry will be added to each /proc/<pid>
63204+ directory that contains the IP address of the person using the task.
63205+ The IP is carried across local TCP and AF_UNIX stream sockets.
63206+ This information can be useful for IDS/IPSes to perform remote response
63207+ to a local attack. The entry is readable by only the owner of the
63208+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
63209+ the RBAC system), and thus does not create privacy concerns.
63210+
63211+config GRKERNSEC_RWXMAP_LOG
63212+ bool 'Denied RWX mmap/mprotect logging'
63213+ default y if GRKERNSEC_CONFIG_AUTO
63214+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
63215+ help
63216+ If you say Y here, calls to mmap() and mprotect() with explicit
63217+ usage of PROT_WRITE and PROT_EXEC together will be logged when
63218+ denied by the PAX_MPROTECT feature. This feature will also
63219+ log other problematic scenarios that can occur when PAX_MPROTECT
63220+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
63221+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
63222+ is created.
63223+
63224+endmenu
63225+
63226+menu "Executable Protections"
63227+depends on GRKERNSEC
63228+
63229+config GRKERNSEC_DMESG
63230+ bool "Dmesg(8) restriction"
63231+ default y if GRKERNSEC_CONFIG_AUTO
63232+ help
63233+ If you say Y here, non-root users will not be able to use dmesg(8)
63234+ to view the contents of the kernel's circular log buffer.
63235+ The kernel's log buffer often contains kernel addresses and other
63236+ identifying information useful to an attacker in fingerprinting a
63237+ system for a targeted exploit.
63238+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
63239+ created.
63240+
63241+config GRKERNSEC_HARDEN_PTRACE
63242+ bool "Deter ptrace-based process snooping"
63243+ default y if GRKERNSEC_CONFIG_AUTO
63244+ help
63245+ If you say Y here, TTY sniffers and other malicious monitoring
63246+ programs implemented through ptrace will be defeated. If you
63247+ have been using the RBAC system, this option has already been
63248+ enabled for several years for all users, with the ability to make
63249+ fine-grained exceptions.
63250+
63251+ This option only affects the ability of non-root users to ptrace
63252+ processes that are not a descendent of the ptracing process.
63253+ This means that strace ./binary and gdb ./binary will still work,
63254+ but attaching to arbitrary processes will not. If the sysctl
63255+ option is enabled, a sysctl option with name "harden_ptrace" is
63256+ created.
63257+
63258+config GRKERNSEC_PTRACE_READEXEC
63259+ bool "Require read access to ptrace sensitive binaries"
63260+ default y if GRKERNSEC_CONFIG_AUTO
63261+ help
63262+ If you say Y here, unprivileged users will not be able to ptrace unreadable
63263+ binaries. This option is useful in environments that
63264+ remove the read bits (e.g. file mode 4711) from suid binaries to
63265+ prevent infoleaking of their contents. This option adds
63266+ consistency to the use of that file mode, as the binary could normally
63267+ be read out when run without privileges while ptracing.
63268+
63269+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
63270+ is created.
63271+
63272+config GRKERNSEC_SETXID
63273+ bool "Enforce consistent multithreaded privileges"
63274+ default y if GRKERNSEC_CONFIG_AUTO
63275+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
63276+ help
63277+ If you say Y here, a change from a root uid to a non-root uid
63278+ in a multithreaded application will cause the resulting uids,
63279+ gids, supplementary groups, and capabilities in that thread
63280+ to be propagated to the other threads of the process. In most
63281+ cases this is unnecessary, as glibc will emulate this behavior
63282+ on behalf of the application. Other libcs do not act in the
63283+ same way, allowing the other threads of the process to continue
63284+ running with root privileges. If the sysctl option is enabled,
63285+ a sysctl option with name "consistent_setxid" is created.
63286+
63287+config GRKERNSEC_HARDEN_IPC
63288+ bool "Disallow access to world-accessible IPC objects"
63289+ default y if GRKERNSEC_CONFIG_AUTO
63290+ depends on SYSVIPC
63291+ help
63292+ If you say Y here, access to overly-permissive IPC (shared memory,
63293+ message queues, and semaphores) will be denied for processes whose
63294+ effective user or group would not grant them permission. It's a
63295+ common error to grant too much permission to these objects, with
63296+ impact ranging from denial of service and information leaking to
63297+ privilege escalation. This feature was developed in response to
63298+ research by Tim Brown:
63299+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
63300+ who found hundreds of such insecure usages. Processes with
63301+ CAP_IPC_OWNER are still permitted to access these IPC objects.
63302+ If the sysctl option is enabled, a sysctl option with name
63303+ "harden_ipc" is created.
63304+
63305+config GRKERNSEC_TPE
63306+ bool "Trusted Path Execution (TPE)"
63307+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
63308+ help
63309+ If you say Y here, you will be able to choose a gid to add to the
63310+ supplementary groups of users you want to mark as "untrusted."
63311+ These users will not be able to execute any files that are not in
63312+ root-owned directories writable only by root. If the sysctl option
63313+ is enabled, a sysctl option with name "tpe" is created.
63314+
63315+config GRKERNSEC_TPE_ALL
63316+ bool "Partially restrict all non-root users"
63317+ depends on GRKERNSEC_TPE
63318+ help
63319+ If you say Y here, all non-root users will be covered under
63320+ a weaker TPE restriction. This is separate from, and in addition to,
63321+ the main TPE options that you have selected elsewhere. Thus, if a
63322+ "trusted" GID is chosen, this restriction applies to even that GID.
63323+ Under this restriction, all non-root users will only be allowed to
63324+ execute files in directories they own that are not group or
63325+ world-writable, or in directories owned by root and writable only by
63326+ root. If the sysctl option is enabled, a sysctl option with name
63327+ "tpe_restrict_all" is created.
63328+
63329+config GRKERNSEC_TPE_INVERT
63330+ bool "Invert GID option"
63331+ depends on GRKERNSEC_TPE
63332+ help
63333+ If you say Y here, the group you specify in the TPE configuration will
63334+ decide what group TPE restrictions will be *disabled* for. This
63335+ option is useful if you want TPE restrictions to be applied to most
63336+ users on the system. If the sysctl option is enabled, a sysctl option
63337+ with name "tpe_invert" is created. Unlike other sysctl options, this
63338+ entry will default to on for backward-compatibility.
63339+
63340+config GRKERNSEC_TPE_GID
63341+ int
63342+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
63343+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
63344+
63345+config GRKERNSEC_TPE_UNTRUSTED_GID
63346+ int "GID for TPE-untrusted users"
63347+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
63348+ default 1005
63349+ help
63350+ Setting this GID determines what group TPE restrictions will be
63351+ *enabled* for. If the sysctl option is enabled, a sysctl option
63352+ with name "tpe_gid" is created.
63353+
63354+config GRKERNSEC_TPE_TRUSTED_GID
63355+ int "GID for TPE-trusted users"
63356+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
63357+ default 1005
63358+ help
63359+ Setting this GID determines what group TPE restrictions will be
63360+ *disabled* for. If the sysctl option is enabled, a sysctl option
63361+ with name "tpe_gid" is created.
63362+
63363+endmenu
63364+menu "Network Protections"
63365+depends on GRKERNSEC
63366+
63367+config GRKERNSEC_RANDNET
63368+ bool "Larger entropy pools"
63369+ default y if GRKERNSEC_CONFIG_AUTO
63370+ help
63371+ If you say Y here, the entropy pools used for many features of Linux
63372+ and grsecurity will be doubled in size. Since several grsecurity
63373+ features use additional randomness, it is recommended that you say Y
63374+ here. Saying Y here has a similar effect as modifying
63375+ /proc/sys/kernel/random/poolsize.
63376+
63377+config GRKERNSEC_BLACKHOLE
63378+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
63379+ default y if GRKERNSEC_CONFIG_AUTO
63380+ depends on NET
63381+ help
63382+ If you say Y here, neither TCP resets nor ICMP
63383+ destination-unreachable packets will be sent in response to packets
63384+ sent to ports for which no associated listening process exists.
63385+ This feature supports both IPV4 and IPV6 and exempts the
63386+ loopback interface from blackholing. Enabling this feature
63387+ makes a host more resilient to DoS attacks and reduces network
63388+ visibility against scanners.
63389+
63390+ The blackhole feature as-implemented is equivalent to the FreeBSD
63391+ blackhole feature, as it prevents RST responses to all packets, not
63392+ just SYNs. Under most application behavior this causes no
63393+ problems, but applications (like haproxy) may not close certain
63394+ connections in a way that cleanly terminates them on the remote
63395+ end, leaving the remote host in LAST_ACK state. Because of this
63396+ side-effect and to prevent intentional LAST_ACK DoSes, this
63397+ feature also adds automatic mitigation against such attacks.
63398+ The mitigation drastically reduces the amount of time a socket
63399+ can spend in LAST_ACK state. If you're using haproxy and not
63400+ all servers it connects to have this option enabled, consider
63401+ disabling this feature on the haproxy host.
63402+
63403+ If the sysctl option is enabled, two sysctl options with names
63404+ "ip_blackhole" and "lastack_retries" will be created.
63405+ While "ip_blackhole" takes the standard zero/non-zero on/off
63406+ toggle, "lastack_retries" uses the same kinds of values as
63407+ "tcp_retries1" and "tcp_retries2". The default value of 4
63408+ prevents a socket from lasting more than 45 seconds in LAST_ACK
63409+ state.
63410+
63411+config GRKERNSEC_NO_SIMULT_CONNECT
63412+ bool "Disable TCP Simultaneous Connect"
63413+ default y if GRKERNSEC_CONFIG_AUTO
63414+ depends on NET
63415+ help
63416+ If you say Y here, a feature by Willy Tarreau will be enabled that
63417+ removes a weakness in Linux's strict implementation of TCP that
63418+ allows two clients to connect to each other without either entering
63419+ a listening state. The weakness allows an attacker to easily prevent
63420+ a client from connecting to a known server provided the source port
63421+ for the connection is guessed correctly.
63422+
63423+ As the weakness could be used to prevent an antivirus or IPS from
63424+ fetching updates, or prevent an SSL gateway from fetching a CRL,
63425+ it should be eliminated by enabling this option. Though Linux is
63426+ one of few operating systems supporting simultaneous connect, it
63427+ has no legitimate use in practice and is rarely supported by firewalls.
63428+
63429+config GRKERNSEC_SOCKET
63430+ bool "Socket restrictions"
63431+ depends on NET
63432+ help
63433+ If you say Y here, you will be able to choose from several options.
63434+ If you assign a GID on your system and add it to the supplementary
63435+ groups of users you want to restrict socket access to, this patch
63436+ will perform up to three things, based on the option(s) you choose.
63437+
63438+config GRKERNSEC_SOCKET_ALL
63439+ bool "Deny any sockets to group"
63440+ depends on GRKERNSEC_SOCKET
63441+ help
63442+ If you say Y here, you will be able to choose a GID of whose users will
63443+ be unable to connect to other hosts from your machine or run server
63444+ applications from your machine. If the sysctl option is enabled, a
63445+ sysctl option with name "socket_all" is created.
63446+
63447+config GRKERNSEC_SOCKET_ALL_GID
63448+ int "GID to deny all sockets for"
63449+ depends on GRKERNSEC_SOCKET_ALL
63450+ default 1004
63451+ help
63452+ Here you can choose the GID to disable socket access for. Remember to
63453+ add the users you want socket access disabled for to the GID
63454+ specified here. If the sysctl option is enabled, a sysctl option
63455+ with name "socket_all_gid" is created.
63456+
63457+config GRKERNSEC_SOCKET_CLIENT
63458+ bool "Deny client sockets to group"
63459+ depends on GRKERNSEC_SOCKET
63460+ help
63461+ If you say Y here, you will be able to choose a GID of whose users will
63462+ be unable to connect to other hosts from your machine, but will be
63463+ able to run servers. If this option is enabled, all users in the group
63464+ you specify will have to use passive mode when initiating ftp transfers
63465+ from the shell on your machine. If the sysctl option is enabled, a
63466+ sysctl option with name "socket_client" is created.
63467+
63468+config GRKERNSEC_SOCKET_CLIENT_GID
63469+ int "GID to deny client sockets for"
63470+ depends on GRKERNSEC_SOCKET_CLIENT
63471+ default 1003
63472+ help
63473+ Here you can choose the GID to disable client socket access for.
63474+ Remember to add the users you want client socket access disabled for to
63475+ the GID specified here. If the sysctl option is enabled, a sysctl
63476+ option with name "socket_client_gid" is created.
63477+
63478+config GRKERNSEC_SOCKET_SERVER
63479+ bool "Deny server sockets to group"
63480+ depends on GRKERNSEC_SOCKET
63481+ help
63482+ If you say Y here, you will be able to choose a GID of whose users will
63483+ be unable to run server applications from your machine. If the sysctl
63484+ option is enabled, a sysctl option with name "socket_server" is created.
63485+
63486+config GRKERNSEC_SOCKET_SERVER_GID
63487+ int "GID to deny server sockets for"
63488+ depends on GRKERNSEC_SOCKET_SERVER
63489+ default 1002
63490+ help
63491+ Here you can choose the GID to disable server socket access for.
63492+ Remember to add the users you want server socket access disabled for to
63493+ the GID specified here. If the sysctl option is enabled, a sysctl
63494+ option with name "socket_server_gid" is created.
63495+
63496+endmenu
63497+
63498+menu "Physical Protections"
63499+depends on GRKERNSEC
63500+
63501+config GRKERNSEC_DENYUSB
63502+ bool "Deny new USB connections after toggle"
63503+ default y if GRKERNSEC_CONFIG_AUTO
63504+ depends on SYSCTL && USB_SUPPORT
63505+ help
63506+ If you say Y here, a new sysctl option with name "deny_new_usb"
63507+ will be created. Setting its value to 1 will prevent any new
63508+ USB devices from being recognized by the OS. Any attempted USB
63509+ device insertion will be logged. This option is intended to be
63510+ used against custom USB devices designed to exploit vulnerabilities
63511+ in various USB device drivers.
63512+
63513+ For greatest effectiveness, this sysctl should be set after any
63514+ relevant init scripts. This option is safe to enable in distros
63515+ as each user can choose whether or not to toggle the sysctl.
63516+
63517+config GRKERNSEC_DENYUSB_FORCE
63518+ bool "Reject all USB devices not connected at boot"
63519+ select USB
63520+ depends on GRKERNSEC_DENYUSB
63521+ help
63522+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
63523+ that doesn't involve a sysctl entry. This option should only be
63524+ enabled if you're sure you want to deny all new USB connections
63525+ at runtime and don't want to modify init scripts. This should not
63526+ be enabled by distros. It forces the core USB code to be built
63527+ into the kernel image so that all devices connected at boot time
63528+ can be recognized and new USB device connections can be prevented
63529+ prior to init running.
63530+
63531+endmenu
63532+
63533+menu "Sysctl Support"
63534+depends on GRKERNSEC && SYSCTL
63535+
63536+config GRKERNSEC_SYSCTL
63537+ bool "Sysctl support"
63538+ default y if GRKERNSEC_CONFIG_AUTO
63539+ help
63540+ If you say Y here, you will be able to change the options that
63541+ grsecurity runs with at bootup, without having to recompile your
63542+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
63543+ to enable (1) or disable (0) various features. All the sysctl entries
63544+ are mutable until the "grsec_lock" entry is set to a non-zero value.
63545+ All features enabled in the kernel configuration are disabled at boot
63546+ if you do not say Y to the "Turn on features by default" option.
63547+ All options should be set at startup, and the grsec_lock entry should
63548+ be set to a non-zero value after all the options are set.
63549+ *THIS IS EXTREMELY IMPORTANT*
63550+
63551+config GRKERNSEC_SYSCTL_DISTRO
63552+ bool "Extra sysctl support for distro makers (READ HELP)"
63553+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
63554+ help
63555+ If you say Y here, additional sysctl options will be created
63556+ for features that affect processes running as root. Therefore,
63557+ it is critical when using this option that the grsec_lock entry be
63558+ enabled after boot. Only distros with prebuilt kernel packages
63559+ with this option enabled that can ensure grsec_lock is enabled
63560+ after boot should use this option.
63561+ *Failure to set grsec_lock after boot makes all grsec features
63562+ this option covers useless*
63563+
63564+ Currently this option creates the following sysctl entries:
63565+ "Disable Privileged I/O": "disable_priv_io"
63566+
63567+config GRKERNSEC_SYSCTL_ON
63568+ bool "Turn on features by default"
63569+ default y if GRKERNSEC_CONFIG_AUTO
63570+ depends on GRKERNSEC_SYSCTL
63571+ help
63572+ If you say Y here, instead of having all features enabled in the
63573+ kernel configuration disabled at boot time, the features will be
63574+ enabled at boot time. It is recommended you say Y here unless
63575+ there is some reason you would want all sysctl-tunable features to
63576+ be disabled by default. As mentioned elsewhere, it is important
63577+ to enable the grsec_lock entry once you have finished modifying
63578+ the sysctl entries.
63579+
63580+endmenu
63581+menu "Logging Options"
63582+depends on GRKERNSEC
63583+
63584+config GRKERNSEC_FLOODTIME
63585+ int "Seconds in between log messages (minimum)"
63586+ default 10
63587+ help
63588+ This option allows you to enforce the number of seconds between
63589+ grsecurity log messages. The default should be suitable for most
63590+ people, however, if you choose to change it, choose a value small enough
63591+ to allow informative logs to be produced, but large enough to
63592+ prevent flooding.
63593+
63594+config GRKERNSEC_FLOODBURST
63595+ int "Number of messages in a burst (maximum)"
63596+ default 6
63597+ help
63598+ This option allows you to choose the maximum number of messages allowed
63599+ within the flood time interval you chose in a separate option. The
63600+ default should be suitable for most people, however if you find that
63601+ many of your logs are being interpreted as flooding, you may want to
63602+ raise this value.
63603+
63604+endmenu
63605diff --git a/grsecurity/Makefile b/grsecurity/Makefile
63606new file mode 100644
63607index 0000000..85beb79
63608--- /dev/null
63609+++ b/grsecurity/Makefile
63610@@ -0,0 +1,43 @@
63611+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
63612+# during 2001-2009 it has been completely redesigned by Brad Spengler
63613+# into an RBAC system
63614+#
63615+# All code in this directory and various hooks inserted throughout the kernel
63616+# are copyright Brad Spengler - Open Source Security, Inc., and released
63617+# under the GPL v2 or higher
63618+
63619+KBUILD_CFLAGS += -Werror
63620+
63621+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
63622+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
63623+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
63624+ grsec_usb.o grsec_ipc.o
63625+
63626+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
63627+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
63628+ gracl_learn.o grsec_log.o gracl_policy.o
63629+ifdef CONFIG_COMPAT
63630+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
63631+endif
63632+
63633+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
63634+
63635+ifdef CONFIG_NET
63636+obj-y += grsec_sock.o
63637+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
63638+endif
63639+
63640+ifndef CONFIG_GRKERNSEC
63641+obj-y += grsec_disabled.o
63642+endif
63643+
63644+ifdef CONFIG_GRKERNSEC_HIDESYM
63645+extra-y := grsec_hidesym.o
63646+$(obj)/grsec_hidesym.o:
63647+ @-chmod -f 500 /boot
63648+ @-chmod -f 500 /lib/modules
63649+ @-chmod -f 500 /lib64/modules
63650+ @-chmod -f 500 /lib32/modules
63651+ @-chmod -f 700 .
63652+ @echo ' grsec: protected kernel image paths'
63653+endif
63654diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
63655new file mode 100644
63656index 0000000..6affeea
63657--- /dev/null
63658+++ b/grsecurity/gracl.c
63659@@ -0,0 +1,2679 @@
63660+#include <linux/kernel.h>
63661+#include <linux/module.h>
63662+#include <linux/sched.h>
63663+#include <linux/mm.h>
63664+#include <linux/file.h>
63665+#include <linux/fs.h>
63666+#include <linux/namei.h>
63667+#include <linux/mount.h>
63668+#include <linux/tty.h>
63669+#include <linux/proc_fs.h>
63670+#include <linux/lglock.h>
63671+#include <linux/slab.h>
63672+#include <linux/vmalloc.h>
63673+#include <linux/types.h>
63674+#include <linux/sysctl.h>
63675+#include <linux/netdevice.h>
63676+#include <linux/ptrace.h>
63677+#include <linux/gracl.h>
63678+#include <linux/gralloc.h>
63679+#include <linux/security.h>
63680+#include <linux/grinternal.h>
63681+#include <linux/pid_namespace.h>
63682+#include <linux/stop_machine.h>
63683+#include <linux/fdtable.h>
63684+#include <linux/percpu.h>
63685+#include <linux/lglock.h>
63686+#include <linux/hugetlb.h>
63687+#include <linux/posix-timers.h>
63688+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63689+#include <linux/magic.h>
63690+#include <linux/pagemap.h>
63691+#include "../fs/btrfs/async-thread.h"
63692+#include "../fs/btrfs/ctree.h"
63693+#include "../fs/btrfs/btrfs_inode.h"
63694+#endif
63695+#include "../fs/mount.h"
63696+
63697+#include <asm/uaccess.h>
63698+#include <asm/errno.h>
63699+#include <asm/mman.h>
63700+
63701+#define FOR_EACH_ROLE_START(role) \
63702+ role = running_polstate.role_list; \
63703+ while (role) {
63704+
63705+#define FOR_EACH_ROLE_END(role) \
63706+ role = role->prev; \
63707+ }
63708+
63709+extern struct lglock vfsmount_lock;
63710+
63711+extern struct path gr_real_root;
63712+
63713+static struct gr_policy_state running_polstate;
63714+struct gr_policy_state *polstate = &running_polstate;
63715+extern struct gr_alloc_state *current_alloc_state;
63716+
63717+extern char *gr_shared_page[4];
63718+DEFINE_RWLOCK(gr_inode_lock);
63719+
63720+static unsigned int gr_status __read_only = GR_STATUS_INIT;
63721+
63722+#ifdef CONFIG_NET
63723+extern struct vfsmount *sock_mnt;
63724+#endif
63725+
63726+extern struct vfsmount *pipe_mnt;
63727+extern struct vfsmount *shm_mnt;
63728+
63729+#ifdef CONFIG_HUGETLBFS
63730+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63731+#endif
63732+
63733+extern u16 acl_sp_role_value;
63734+extern struct acl_object_label *fakefs_obj_rw;
63735+extern struct acl_object_label *fakefs_obj_rwx;
63736+
63737+int gr_acl_is_enabled(void)
63738+{
63739+ return (gr_status & GR_READY);
63740+}
63741+
63742+void gr_enable_rbac_system(void)
63743+{
63744+ pax_open_kernel();
63745+ gr_status |= GR_READY;
63746+ pax_close_kernel();
63747+}
63748+
63749+int gr_rbac_disable(void *unused)
63750+{
63751+ pax_open_kernel();
63752+ gr_status &= ~GR_READY;
63753+ pax_close_kernel();
63754+
63755+ return 0;
63756+}
63757+
63758+static inline dev_t __get_dev(const struct dentry *dentry)
63759+{
63760+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63761+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
63762+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
63763+ else
63764+#endif
63765+ return dentry->d_sb->s_dev;
63766+}
63767+
63768+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63769+{
63770+ return __get_dev(dentry);
63771+}
63772+
63773+static char gr_task_roletype_to_char(struct task_struct *task)
63774+{
63775+ switch (task->role->roletype &
63776+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
63777+ GR_ROLE_SPECIAL)) {
63778+ case GR_ROLE_DEFAULT:
63779+ return 'D';
63780+ case GR_ROLE_USER:
63781+ return 'U';
63782+ case GR_ROLE_GROUP:
63783+ return 'G';
63784+ case GR_ROLE_SPECIAL:
63785+ return 'S';
63786+ }
63787+
63788+ return 'X';
63789+}
63790+
63791+char gr_roletype_to_char(void)
63792+{
63793+ return gr_task_roletype_to_char(current);
63794+}
63795+
63796+__inline__ int
63797+gr_acl_tpe_check(void)
63798+{
63799+ if (unlikely(!(gr_status & GR_READY)))
63800+ return 0;
63801+ if (current->role->roletype & GR_ROLE_TPE)
63802+ return 1;
63803+ else
63804+ return 0;
63805+}
63806+
63807+int
63808+gr_handle_rawio(const struct inode *inode)
63809+{
63810+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63811+ if (inode && S_ISBLK(inode->i_mode) &&
63812+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
63813+ !capable(CAP_SYS_RAWIO))
63814+ return 1;
63815+#endif
63816+ return 0;
63817+}
63818+
63819+int
63820+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
63821+{
63822+ if (likely(lena != lenb))
63823+ return 0;
63824+
63825+ return !memcmp(a, b, lena);
63826+}
63827+
63828+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
63829+{
63830+ *buflen -= namelen;
63831+ if (*buflen < 0)
63832+ return -ENAMETOOLONG;
63833+ *buffer -= namelen;
63834+ memcpy(*buffer, str, namelen);
63835+ return 0;
63836+}
63837+
63838+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
63839+{
63840+ return prepend(buffer, buflen, name->name, name->len);
63841+}
63842+
63843+static int prepend_path(const struct path *path, struct path *root,
63844+ char **buffer, int *buflen)
63845+{
63846+ struct dentry *dentry = path->dentry;
63847+ struct vfsmount *vfsmnt = path->mnt;
63848+ struct mount *mnt = real_mount(vfsmnt);
63849+ bool slash = false;
63850+ int error = 0;
63851+
63852+ while (dentry != root->dentry || vfsmnt != root->mnt) {
63853+ struct dentry * parent;
63854+
63855+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
63856+ /* Global root? */
63857+ if (!mnt_has_parent(mnt)) {
63858+ goto out;
63859+ }
63860+ dentry = mnt->mnt_mountpoint;
63861+ mnt = mnt->mnt_parent;
63862+ vfsmnt = &mnt->mnt;
63863+ continue;
63864+ }
63865+ parent = dentry->d_parent;
63866+ prefetch(parent);
63867+ spin_lock(&dentry->d_lock);
63868+ error = prepend_name(buffer, buflen, &dentry->d_name);
63869+ spin_unlock(&dentry->d_lock);
63870+ if (!error)
63871+ error = prepend(buffer, buflen, "/", 1);
63872+ if (error)
63873+ break;
63874+
63875+ slash = true;
63876+ dentry = parent;
63877+ }
63878+
63879+out:
63880+ if (!error && !slash)
63881+ error = prepend(buffer, buflen, "/", 1);
63882+
63883+ return error;
63884+}
63885+
63886+/* this must be called with vfsmount_lock and rename_lock held */
63887+
63888+static char *__our_d_path(const struct path *path, struct path *root,
63889+ char *buf, int buflen)
63890+{
63891+ char *res = buf + buflen;
63892+ int error;
63893+
63894+ prepend(&res, &buflen, "\0", 1);
63895+ error = prepend_path(path, root, &res, &buflen);
63896+ if (error)
63897+ return ERR_PTR(error);
63898+
63899+ return res;
63900+}
63901+
63902+static char *
63903+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
63904+{
63905+ char *retval;
63906+
63907+ retval = __our_d_path(path, root, buf, buflen);
63908+ if (unlikely(IS_ERR(retval)))
63909+ retval = strcpy(buf, "<path too long>");
63910+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
63911+ retval[1] = '\0';
63912+
63913+ return retval;
63914+}
63915+
63916+static char *
63917+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
63918+ char *buf, int buflen)
63919+{
63920+ struct path path;
63921+ char *res;
63922+
63923+ path.dentry = (struct dentry *)dentry;
63924+ path.mnt = (struct vfsmount *)vfsmnt;
63925+
63926+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
63927+ by the RBAC system */
63928+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
63929+
63930+ return res;
63931+}
63932+
63933+static char *
63934+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
63935+ char *buf, int buflen)
63936+{
63937+ char *res;
63938+ struct path path;
63939+ struct path root;
63940+ struct task_struct *reaper = init_pid_ns.child_reaper;
63941+
63942+ path.dentry = (struct dentry *)dentry;
63943+ path.mnt = (struct vfsmount *)vfsmnt;
63944+
63945+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
63946+ get_fs_root(reaper->fs, &root);
63947+
63948+ br_read_lock(&vfsmount_lock);
63949+ write_seqlock(&rename_lock);
63950+ res = gen_full_path(&path, &root, buf, buflen);
63951+ write_sequnlock(&rename_lock);
63952+ br_read_unlock(&vfsmount_lock);
63953+
63954+ path_put(&root);
63955+ return res;
63956+}
63957+
63958+char *
63959+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
63960+{
63961+ char *ret;
63962+ br_read_lock(&vfsmount_lock);
63963+ write_seqlock(&rename_lock);
63964+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
63965+ PAGE_SIZE);
63966+ write_sequnlock(&rename_lock);
63967+ br_read_unlock(&vfsmount_lock);
63968+ return ret;
63969+}
63970+
63971+static char *
63972+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
63973+{
63974+ char *ret;
63975+ char *buf;
63976+ int buflen;
63977+
63978+ br_read_lock(&vfsmount_lock);
63979+ write_seqlock(&rename_lock);
63980+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
63981+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
63982+ buflen = (int)(ret - buf);
63983+ if (buflen >= 5)
63984+ prepend(&ret, &buflen, "/proc", 5);
63985+ else
63986+ ret = strcpy(buf, "<path too long>");
63987+ write_sequnlock(&rename_lock);
63988+ br_read_unlock(&vfsmount_lock);
63989+ return ret;
63990+}
63991+
63992+char *
63993+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
63994+{
63995+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
63996+ PAGE_SIZE);
63997+}
63998+
63999+char *
64000+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
64001+{
64002+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
64003+ PAGE_SIZE);
64004+}
64005+
64006+char *
64007+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
64008+{
64009+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
64010+ PAGE_SIZE);
64011+}
64012+
64013+char *
64014+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
64015+{
64016+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
64017+ PAGE_SIZE);
64018+}
64019+
64020+char *
64021+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
64022+{
64023+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
64024+ PAGE_SIZE);
64025+}
64026+
64027+__inline__ __u32
64028+to_gr_audit(const __u32 reqmode)
64029+{
64030+ /* masks off auditable permission flags, then shifts them to create
64031+ auditing flags, and adds the special case of append auditing if
64032+ we're requesting write */
64033+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
64034+}
64035+
64036+struct acl_role_label *
64037+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
64038+ const gid_t gid)
64039+{
64040+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
64041+ struct acl_role_label *match;
64042+ struct role_allowed_ip *ipp;
64043+ unsigned int x;
64044+ u32 curr_ip = task->signal->saved_ip;
64045+
64046+ match = state->acl_role_set.r_hash[index];
64047+
64048+ while (match) {
64049+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
64050+ for (x = 0; x < match->domain_child_num; x++) {
64051+ if (match->domain_children[x] == uid)
64052+ goto found;
64053+ }
64054+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
64055+ break;
64056+ match = match->next;
64057+ }
64058+found:
64059+ if (match == NULL) {
64060+ try_group:
64061+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
64062+ match = state->acl_role_set.r_hash[index];
64063+
64064+ while (match) {
64065+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
64066+ for (x = 0; x < match->domain_child_num; x++) {
64067+ if (match->domain_children[x] == gid)
64068+ goto found2;
64069+ }
64070+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
64071+ break;
64072+ match = match->next;
64073+ }
64074+found2:
64075+ if (match == NULL)
64076+ match = state->default_role;
64077+ if (match->allowed_ips == NULL)
64078+ return match;
64079+ else {
64080+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
64081+ if (likely
64082+ ((ntohl(curr_ip) & ipp->netmask) ==
64083+ (ntohl(ipp->addr) & ipp->netmask)))
64084+ return match;
64085+ }
64086+ match = state->default_role;
64087+ }
64088+ } else if (match->allowed_ips == NULL) {
64089+ return match;
64090+ } else {
64091+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
64092+ if (likely
64093+ ((ntohl(curr_ip) & ipp->netmask) ==
64094+ (ntohl(ipp->addr) & ipp->netmask)))
64095+ return match;
64096+ }
64097+ goto try_group;
64098+ }
64099+
64100+ return match;
64101+}
64102+
64103+static struct acl_role_label *
64104+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
64105+ const gid_t gid)
64106+{
64107+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
64108+}
64109+
64110+struct acl_subject_label *
64111+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
64112+ const struct acl_role_label *role)
64113+{
64114+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
64115+ struct acl_subject_label *match;
64116+
64117+ match = role->subj_hash[index];
64118+
64119+ while (match && (match->inode != ino || match->device != dev ||
64120+ (match->mode & GR_DELETED))) {
64121+ match = match->next;
64122+ }
64123+
64124+ if (match && !(match->mode & GR_DELETED))
64125+ return match;
64126+ else
64127+ return NULL;
64128+}
64129+
64130+struct acl_subject_label *
64131+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
64132+ const struct acl_role_label *role)
64133+{
64134+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
64135+ struct acl_subject_label *match;
64136+
64137+ match = role->subj_hash[index];
64138+
64139+ while (match && (match->inode != ino || match->device != dev ||
64140+ !(match->mode & GR_DELETED))) {
64141+ match = match->next;
64142+ }
64143+
64144+ if (match && (match->mode & GR_DELETED))
64145+ return match;
64146+ else
64147+ return NULL;
64148+}
64149+
64150+static struct acl_object_label *
64151+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
64152+ const struct acl_subject_label *subj)
64153+{
64154+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
64155+ struct acl_object_label *match;
64156+
64157+ match = subj->obj_hash[index];
64158+
64159+ while (match && (match->inode != ino || match->device != dev ||
64160+ (match->mode & GR_DELETED))) {
64161+ match = match->next;
64162+ }
64163+
64164+ if (match && !(match->mode & GR_DELETED))
64165+ return match;
64166+ else
64167+ return NULL;
64168+}
64169+
64170+static struct acl_object_label *
64171+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
64172+ const struct acl_subject_label *subj)
64173+{
64174+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
64175+ struct acl_object_label *match;
64176+
64177+ match = subj->obj_hash[index];
64178+
64179+ while (match && (match->inode != ino || match->device != dev ||
64180+ !(match->mode & GR_DELETED))) {
64181+ match = match->next;
64182+ }
64183+
64184+ if (match && (match->mode & GR_DELETED))
64185+ return match;
64186+
64187+ match = subj->obj_hash[index];
64188+
64189+ while (match && (match->inode != ino || match->device != dev ||
64190+ (match->mode & GR_DELETED))) {
64191+ match = match->next;
64192+ }
64193+
64194+ if (match && !(match->mode & GR_DELETED))
64195+ return match;
64196+ else
64197+ return NULL;
64198+}
64199+
64200+struct name_entry *
64201+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
64202+{
64203+ unsigned int len = strlen(name);
64204+ unsigned int key = full_name_hash(name, len);
64205+ unsigned int index = key % state->name_set.n_size;
64206+ struct name_entry *match;
64207+
64208+ match = state->name_set.n_hash[index];
64209+
64210+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
64211+ match = match->next;
64212+
64213+ return match;
64214+}
64215+
64216+static struct name_entry *
64217+lookup_name_entry(const char *name)
64218+{
64219+ return __lookup_name_entry(&running_polstate, name);
64220+}
64221+
64222+static struct name_entry *
64223+lookup_name_entry_create(const char *name)
64224+{
64225+ unsigned int len = strlen(name);
64226+ unsigned int key = full_name_hash(name, len);
64227+ unsigned int index = key % running_polstate.name_set.n_size;
64228+ struct name_entry *match;
64229+
64230+ match = running_polstate.name_set.n_hash[index];
64231+
64232+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
64233+ !match->deleted))
64234+ match = match->next;
64235+
64236+ if (match && match->deleted)
64237+ return match;
64238+
64239+ match = running_polstate.name_set.n_hash[index];
64240+
64241+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
64242+ match->deleted))
64243+ match = match->next;
64244+
64245+ if (match && !match->deleted)
64246+ return match;
64247+ else
64248+ return NULL;
64249+}
64250+
64251+static struct inodev_entry *
64252+lookup_inodev_entry(const ino_t ino, const dev_t dev)
64253+{
64254+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
64255+ struct inodev_entry *match;
64256+
64257+ match = running_polstate.inodev_set.i_hash[index];
64258+
64259+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
64260+ match = match->next;
64261+
64262+ return match;
64263+}
64264+
64265+void
64266+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
64267+{
64268+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
64269+ state->inodev_set.i_size);
64270+ struct inodev_entry **curr;
64271+
64272+ entry->prev = NULL;
64273+
64274+ curr = &state->inodev_set.i_hash[index];
64275+ if (*curr != NULL)
64276+ (*curr)->prev = entry;
64277+
64278+ entry->next = *curr;
64279+ *curr = entry;
64280+
64281+ return;
64282+}
64283+
64284+static void
64285+insert_inodev_entry(struct inodev_entry *entry)
64286+{
64287+ __insert_inodev_entry(&running_polstate, entry);
64288+}
64289+
64290+void
64291+insert_acl_obj_label(struct acl_object_label *obj,
64292+ struct acl_subject_label *subj)
64293+{
64294+ unsigned int index =
64295+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
64296+ struct acl_object_label **curr;
64297+
64298+ obj->prev = NULL;
64299+
64300+ curr = &subj->obj_hash[index];
64301+ if (*curr != NULL)
64302+ (*curr)->prev = obj;
64303+
64304+ obj->next = *curr;
64305+ *curr = obj;
64306+
64307+ return;
64308+}
64309+
64310+void
64311+insert_acl_subj_label(struct acl_subject_label *obj,
64312+ struct acl_role_label *role)
64313+{
64314+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
64315+ struct acl_subject_label **curr;
64316+
64317+ obj->prev = NULL;
64318+
64319+ curr = &role->subj_hash[index];
64320+ if (*curr != NULL)
64321+ (*curr)->prev = obj;
64322+
64323+ obj->next = *curr;
64324+ *curr = obj;
64325+
64326+ return;
64327+}
64328+
64329+/* derived from glibc fnmatch() 0: match, 1: no match*/
64330+
64331+static int
64332+glob_match(const char *p, const char *n)
64333+{
64334+ char c;
64335+
64336+ while ((c = *p++) != '\0') {
64337+ switch (c) {
64338+ case '?':
64339+ if (*n == '\0')
64340+ return 1;
64341+ else if (*n == '/')
64342+ return 1;
64343+ break;
64344+ case '\\':
64345+ if (*n != c)
64346+ return 1;
64347+ break;
64348+ case '*':
64349+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
64350+ if (*n == '/')
64351+ return 1;
64352+ else if (c == '?') {
64353+ if (*n == '\0')
64354+ return 1;
64355+ else
64356+ ++n;
64357+ }
64358+ }
64359+ if (c == '\0') {
64360+ return 0;
64361+ } else {
64362+ const char *endp;
64363+
64364+ if ((endp = strchr(n, '/')) == NULL)
64365+ endp = n + strlen(n);
64366+
64367+ if (c == '[') {
64368+ for (--p; n < endp; ++n)
64369+ if (!glob_match(p, n))
64370+ return 0;
64371+ } else if (c == '/') {
64372+ while (*n != '\0' && *n != '/')
64373+ ++n;
64374+ if (*n == '/' && !glob_match(p, n + 1))
64375+ return 0;
64376+ } else {
64377+ for (--p; n < endp; ++n)
64378+ if (*n == c && !glob_match(p, n))
64379+ return 0;
64380+ }
64381+
64382+ return 1;
64383+ }
64384+ case '[':
64385+ {
64386+ int not;
64387+ char cold;
64388+
64389+ if (*n == '\0' || *n == '/')
64390+ return 1;
64391+
64392+ not = (*p == '!' || *p == '^');
64393+ if (not)
64394+ ++p;
64395+
64396+ c = *p++;
64397+ for (;;) {
64398+ unsigned char fn = (unsigned char)*n;
64399+
64400+ if (c == '\0')
64401+ return 1;
64402+ else {
64403+ if (c == fn)
64404+ goto matched;
64405+ cold = c;
64406+ c = *p++;
64407+
64408+ if (c == '-' && *p != ']') {
64409+ unsigned char cend = *p++;
64410+
64411+ if (cend == '\0')
64412+ return 1;
64413+
64414+ if (cold <= fn && fn <= cend)
64415+ goto matched;
64416+
64417+ c = *p++;
64418+ }
64419+ }
64420+
64421+ if (c == ']')
64422+ break;
64423+ }
64424+ if (!not)
64425+ return 1;
64426+ break;
64427+ matched:
64428+ while (c != ']') {
64429+ if (c == '\0')
64430+ return 1;
64431+
64432+ c = *p++;
64433+ }
64434+ if (not)
64435+ return 1;
64436+ }
64437+ break;
64438+ default:
64439+ if (c != *n)
64440+ return 1;
64441+ }
64442+
64443+ ++n;
64444+ }
64445+
64446+ if (*n == '\0')
64447+ return 0;
64448+
64449+ if (*n == '/')
64450+ return 0;
64451+
64452+ return 1;
64453+}
64454+
64455+static struct acl_object_label *
64456+chk_glob_label(struct acl_object_label *globbed,
64457+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
64458+{
64459+ struct acl_object_label *tmp;
64460+
64461+ if (*path == NULL)
64462+ *path = gr_to_filename_nolock(dentry, mnt);
64463+
64464+ tmp = globbed;
64465+
64466+ while (tmp) {
64467+ if (!glob_match(tmp->filename, *path))
64468+ return tmp;
64469+ tmp = tmp->next;
64470+ }
64471+
64472+ return NULL;
64473+}
64474+
64475+static struct acl_object_label *
64476+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64477+ const ino_t curr_ino, const dev_t curr_dev,
64478+ const struct acl_subject_label *subj, char **path, const int checkglob)
64479+{
64480+ struct acl_subject_label *tmpsubj;
64481+ struct acl_object_label *retval;
64482+ struct acl_object_label *retval2;
64483+
64484+ tmpsubj = (struct acl_subject_label *) subj;
64485+ read_lock(&gr_inode_lock);
64486+ do {
64487+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
64488+ if (retval) {
64489+ if (checkglob && retval->globbed) {
64490+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
64491+ if (retval2)
64492+ retval = retval2;
64493+ }
64494+ break;
64495+ }
64496+ } while ((tmpsubj = tmpsubj->parent_subject));
64497+ read_unlock(&gr_inode_lock);
64498+
64499+ return retval;
64500+}
64501+
64502+static __inline__ struct acl_object_label *
64503+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64504+ struct dentry *curr_dentry,
64505+ const struct acl_subject_label *subj, char **path, const int checkglob)
64506+{
64507+ int newglob = checkglob;
64508+ ino_t inode;
64509+ dev_t device;
64510+
64511+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
64512+ as we don't want a / * rule to match instead of the / object
64513+ don't do this for create lookups that call this function though, since they're looking up
64514+ on the parent and thus need globbing checks on all paths
64515+ */
64516+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
64517+ newglob = GR_NO_GLOB;
64518+
64519+ spin_lock(&curr_dentry->d_lock);
64520+ inode = curr_dentry->d_inode->i_ino;
64521+ device = __get_dev(curr_dentry);
64522+ spin_unlock(&curr_dentry->d_lock);
64523+
64524+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
64525+}
64526+
64527+#ifdef CONFIG_HUGETLBFS
64528+static inline bool
64529+is_hugetlbfs_mnt(const struct vfsmount *mnt)
64530+{
64531+ int i;
64532+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
64533+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
64534+ return true;
64535+ }
64536+
64537+ return false;
64538+}
64539+#endif
64540+
64541+static struct acl_object_label *
64542+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64543+ const struct acl_subject_label *subj, char *path, const int checkglob)
64544+{
64545+ struct dentry *dentry = (struct dentry *) l_dentry;
64546+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64547+ struct mount *real_mnt = real_mount(mnt);
64548+ struct acl_object_label *retval;
64549+ struct dentry *parent;
64550+
64551+ br_read_lock(&vfsmount_lock);
64552+ write_seqlock(&rename_lock);
64553+
64554+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
64555+#ifdef CONFIG_NET
64556+ mnt == sock_mnt ||
64557+#endif
64558+#ifdef CONFIG_HUGETLBFS
64559+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
64560+#endif
64561+ /* ignore Eric Biederman */
64562+ IS_PRIVATE(l_dentry->d_inode))) {
64563+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
64564+ goto out;
64565+ }
64566+
64567+ for (;;) {
64568+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
64569+ break;
64570+
64571+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64572+ if (!mnt_has_parent(real_mnt))
64573+ break;
64574+
64575+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64576+ if (retval != NULL)
64577+ goto out;
64578+
64579+ dentry = real_mnt->mnt_mountpoint;
64580+ real_mnt = real_mnt->mnt_parent;
64581+ mnt = &real_mnt->mnt;
64582+ continue;
64583+ }
64584+
64585+ parent = dentry->d_parent;
64586+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64587+ if (retval != NULL)
64588+ goto out;
64589+
64590+ dentry = parent;
64591+ }
64592+
64593+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64594+
64595+ /* gr_real_root is pinned so we don't have to hold a reference */
64596+ if (retval == NULL)
64597+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
64598+out:
64599+ write_sequnlock(&rename_lock);
64600+ br_read_unlock(&vfsmount_lock);
64601+
64602+ BUG_ON(retval == NULL);
64603+
64604+ return retval;
64605+}
64606+
64607+static __inline__ struct acl_object_label *
64608+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64609+ const struct acl_subject_label *subj)
64610+{
64611+ char *path = NULL;
64612+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
64613+}
64614+
64615+static __inline__ struct acl_object_label *
64616+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64617+ const struct acl_subject_label *subj)
64618+{
64619+ char *path = NULL;
64620+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
64621+}
64622+
64623+static __inline__ struct acl_object_label *
64624+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64625+ const struct acl_subject_label *subj, char *path)
64626+{
64627+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
64628+}
64629+
64630+struct acl_subject_label *
64631+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64632+ const struct acl_role_label *role)
64633+{
64634+ struct dentry *dentry = (struct dentry *) l_dentry;
64635+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64636+ struct mount *real_mnt = real_mount(mnt);
64637+ struct acl_subject_label *retval;
64638+ struct dentry *parent;
64639+
64640+ br_read_lock(&vfsmount_lock);
64641+ write_seqlock(&rename_lock);
64642+
64643+ for (;;) {
64644+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
64645+ break;
64646+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64647+ if (!mnt_has_parent(real_mnt))
64648+ break;
64649+
64650+ spin_lock(&dentry->d_lock);
64651+ read_lock(&gr_inode_lock);
64652+ retval =
64653+ lookup_acl_subj_label(dentry->d_inode->i_ino,
64654+ __get_dev(dentry), role);
64655+ read_unlock(&gr_inode_lock);
64656+ spin_unlock(&dentry->d_lock);
64657+ if (retval != NULL)
64658+ goto out;
64659+
64660+ dentry = real_mnt->mnt_mountpoint;
64661+ real_mnt = real_mnt->mnt_parent;
64662+ mnt = &real_mnt->mnt;
64663+ continue;
64664+ }
64665+
64666+ spin_lock(&dentry->d_lock);
64667+ read_lock(&gr_inode_lock);
64668+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64669+ __get_dev(dentry), role);
64670+ read_unlock(&gr_inode_lock);
64671+ parent = dentry->d_parent;
64672+ spin_unlock(&dentry->d_lock);
64673+
64674+ if (retval != NULL)
64675+ goto out;
64676+
64677+ dentry = parent;
64678+ }
64679+
64680+ spin_lock(&dentry->d_lock);
64681+ read_lock(&gr_inode_lock);
64682+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64683+ __get_dev(dentry), role);
64684+ read_unlock(&gr_inode_lock);
64685+ spin_unlock(&dentry->d_lock);
64686+
64687+ if (unlikely(retval == NULL)) {
64688+ /* gr_real_root is pinned, we don't need to hold a reference */
64689+ read_lock(&gr_inode_lock);
64690+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
64691+ __get_dev(gr_real_root.dentry), role);
64692+ read_unlock(&gr_inode_lock);
64693+ }
64694+out:
64695+ write_sequnlock(&rename_lock);
64696+ br_read_unlock(&vfsmount_lock);
64697+
64698+ BUG_ON(retval == NULL);
64699+
64700+ return retval;
64701+}
64702+
64703+void
64704+assign_special_role(const char *rolename)
64705+{
64706+ struct acl_object_label *obj;
64707+ struct acl_role_label *r;
64708+ struct acl_role_label *assigned = NULL;
64709+ struct task_struct *tsk;
64710+ struct file *filp;
64711+
64712+ FOR_EACH_ROLE_START(r)
64713+ if (!strcmp(rolename, r->rolename) &&
64714+ (r->roletype & GR_ROLE_SPECIAL)) {
64715+ assigned = r;
64716+ break;
64717+ }
64718+ FOR_EACH_ROLE_END(r)
64719+
64720+ if (!assigned)
64721+ return;
64722+
64723+ read_lock(&tasklist_lock);
64724+ read_lock(&grsec_exec_file_lock);
64725+
64726+ tsk = current->real_parent;
64727+ if (tsk == NULL)
64728+ goto out_unlock;
64729+
64730+ filp = tsk->exec_file;
64731+ if (filp == NULL)
64732+ goto out_unlock;
64733+
64734+ tsk->is_writable = 0;
64735+ tsk->inherited = 0;
64736+
64737+ tsk->acl_sp_role = 1;
64738+ tsk->acl_role_id = ++acl_sp_role_value;
64739+ tsk->role = assigned;
64740+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
64741+
64742+ /* ignore additional mmap checks for processes that are writable
64743+ by the default ACL */
64744+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
64745+ if (unlikely(obj->mode & GR_WRITE))
64746+ tsk->is_writable = 1;
64747+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
64748+ if (unlikely(obj->mode & GR_WRITE))
64749+ tsk->is_writable = 1;
64750+
64751+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64752+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
64753+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
64754+#endif
64755+
64756+out_unlock:
64757+ read_unlock(&grsec_exec_file_lock);
64758+ read_unlock(&tasklist_lock);
64759+ return;
64760+}
64761+
64762+
64763+static void
64764+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
64765+{
64766+ struct task_struct *task = current;
64767+ const struct cred *cred = current_cred();
64768+
64769+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
64770+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64771+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64772+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
64773+
64774+ return;
64775+}
64776+
64777+static void
64778+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
64779+{
64780+ struct task_struct *task = current;
64781+ const struct cred *cred = current_cred();
64782+
64783+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64784+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64785+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64786+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
64787+
64788+ return;
64789+}
64790+
64791+static void
64792+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
64793+{
64794+ struct task_struct *task = current;
64795+ const struct cred *cred = current_cred();
64796+
64797+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64798+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64799+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64800+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
64801+
64802+ return;
64803+}
64804+
64805+static void
64806+gr_set_proc_res(struct task_struct *task)
64807+{
64808+ struct acl_subject_label *proc;
64809+ unsigned short i;
64810+
64811+ proc = task->acl;
64812+
64813+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
64814+ return;
64815+
64816+ for (i = 0; i < RLIM_NLIMITS; i++) {
64817+ if (!(proc->resmask & (1U << i)))
64818+ continue;
64819+
64820+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
64821+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
64822+
64823+ if (i == RLIMIT_CPU)
64824+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
64825+ }
64826+
64827+ return;
64828+}
64829+
64830+/* both of the below must be called with
64831+ rcu_read_lock();
64832+ read_lock(&tasklist_lock);
64833+ read_lock(&grsec_exec_file_lock);
64834+*/
64835+
64836+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
64837+{
64838+ char *tmpname;
64839+ struct acl_subject_label *tmpsubj;
64840+ struct file *filp;
64841+ struct name_entry *nmatch;
64842+
64843+ filp = task->exec_file;
64844+ if (filp == NULL)
64845+ return NULL;
64846+
64847+ /* the following is to apply the correct subject
64848+ on binaries running when the RBAC system
64849+ is enabled, when the binaries have been
64850+ replaced or deleted since their execution
64851+ -----
64852+ when the RBAC system starts, the inode/dev
64853+ from exec_file will be one the RBAC system
64854+ is unaware of. It only knows the inode/dev
64855+ of the present file on disk, or the absence
64856+ of it.
64857+ */
64858+
64859+ if (filename)
64860+ nmatch = __lookup_name_entry(state, filename);
64861+ else {
64862+ preempt_disable();
64863+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
64864+
64865+ nmatch = __lookup_name_entry(state, tmpname);
64866+ preempt_enable();
64867+ }
64868+ tmpsubj = NULL;
64869+ if (nmatch) {
64870+ if (nmatch->deleted)
64871+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
64872+ else
64873+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
64874+ }
64875+ /* this also works for the reload case -- if we don't match a potentially inherited subject
64876+ then we fall back to a normal lookup based on the binary's ino/dev
64877+ */
64878+ if (tmpsubj == NULL)
64879+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
64880+
64881+ return tmpsubj;
64882+}
64883+
64884+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
64885+{
64886+ return __gr_get_subject_for_task(&running_polstate, task, filename);
64887+}
64888+
64889+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
64890+{
64891+ struct acl_object_label *obj;
64892+ struct file *filp;
64893+
64894+ filp = task->exec_file;
64895+
64896+ task->acl = subj;
64897+ task->is_writable = 0;
64898+ /* ignore additional mmap checks for processes that are writable
64899+ by the default ACL */
64900+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
64901+ if (unlikely(obj->mode & GR_WRITE))
64902+ task->is_writable = 1;
64903+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64904+ if (unlikely(obj->mode & GR_WRITE))
64905+ task->is_writable = 1;
64906+
64907+ gr_set_proc_res(task);
64908+
64909+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64910+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64911+#endif
64912+}
64913+
64914+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
64915+{
64916+ __gr_apply_subject_to_task(&running_polstate, task, subj);
64917+}
64918+
64919+__u32
64920+gr_search_file(const struct dentry * dentry, const __u32 mode,
64921+ const struct vfsmount * mnt)
64922+{
64923+ __u32 retval = mode;
64924+ struct acl_subject_label *curracl;
64925+ struct acl_object_label *currobj;
64926+
64927+ if (unlikely(!(gr_status & GR_READY)))
64928+ return (mode & ~GR_AUDITS);
64929+
64930+ curracl = current->acl;
64931+
64932+ currobj = chk_obj_label(dentry, mnt, curracl);
64933+ retval = currobj->mode & mode;
64934+
64935+ /* if we're opening a specified transfer file for writing
64936+ (e.g. /dev/initctl), then transfer our role to init
64937+ */
64938+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
64939+ current->role->roletype & GR_ROLE_PERSIST)) {
64940+ struct task_struct *task = init_pid_ns.child_reaper;
64941+
64942+ if (task->role != current->role) {
64943+ struct acl_subject_label *subj;
64944+
64945+ task->acl_sp_role = 0;
64946+ task->acl_role_id = current->acl_role_id;
64947+ task->role = current->role;
64948+ rcu_read_lock();
64949+ read_lock(&grsec_exec_file_lock);
64950+ subj = gr_get_subject_for_task(task, NULL);
64951+ gr_apply_subject_to_task(task, subj);
64952+ read_unlock(&grsec_exec_file_lock);
64953+ rcu_read_unlock();
64954+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
64955+ }
64956+ }
64957+
64958+ if (unlikely
64959+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
64960+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
64961+ __u32 new_mode = mode;
64962+
64963+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64964+
64965+ retval = new_mode;
64966+
64967+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
64968+ new_mode |= GR_INHERIT;
64969+
64970+ if (!(mode & GR_NOLEARN))
64971+ gr_log_learn(dentry, mnt, new_mode);
64972+ }
64973+
64974+ return retval;
64975+}
64976+
64977+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
64978+ const struct dentry *parent,
64979+ const struct vfsmount *mnt)
64980+{
64981+ struct name_entry *match;
64982+ struct acl_object_label *matchpo;
64983+ struct acl_subject_label *curracl;
64984+ char *path;
64985+
64986+ if (unlikely(!(gr_status & GR_READY)))
64987+ return NULL;
64988+
64989+ preempt_disable();
64990+ path = gr_to_filename_rbac(new_dentry, mnt);
64991+ match = lookup_name_entry_create(path);
64992+
64993+ curracl = current->acl;
64994+
64995+ if (match) {
64996+ read_lock(&gr_inode_lock);
64997+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
64998+ read_unlock(&gr_inode_lock);
64999+
65000+ if (matchpo) {
65001+ preempt_enable();
65002+ return matchpo;
65003+ }
65004+ }
65005+
65006+ // lookup parent
65007+
65008+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
65009+
65010+ preempt_enable();
65011+ return matchpo;
65012+}
65013+
65014+__u32
65015+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
65016+ const struct vfsmount * mnt, const __u32 mode)
65017+{
65018+ struct acl_object_label *matchpo;
65019+ __u32 retval;
65020+
65021+ if (unlikely(!(gr_status & GR_READY)))
65022+ return (mode & ~GR_AUDITS);
65023+
65024+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
65025+
65026+ retval = matchpo->mode & mode;
65027+
65028+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
65029+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
65030+ __u32 new_mode = mode;
65031+
65032+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
65033+
65034+ gr_log_learn(new_dentry, mnt, new_mode);
65035+ return new_mode;
65036+ }
65037+
65038+ return retval;
65039+}
65040+
65041+__u32
65042+gr_check_link(const struct dentry * new_dentry,
65043+ const struct dentry * parent_dentry,
65044+ const struct vfsmount * parent_mnt,
65045+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
65046+{
65047+ struct acl_object_label *obj;
65048+ __u32 oldmode, newmode;
65049+ __u32 needmode;
65050+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
65051+ GR_DELETE | GR_INHERIT;
65052+
65053+ if (unlikely(!(gr_status & GR_READY)))
65054+ return (GR_CREATE | GR_LINK);
65055+
65056+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
65057+ oldmode = obj->mode;
65058+
65059+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
65060+ newmode = obj->mode;
65061+
65062+ needmode = newmode & checkmodes;
65063+
65064+ // old name for hardlink must have at least the permissions of the new name
65065+ if ((oldmode & needmode) != needmode)
65066+ goto bad;
65067+
65068+ // if old name had restrictions/auditing, make sure the new name does as well
65069+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
65070+
65071+ // don't allow hardlinking of suid/sgid/fcapped files without permission
65072+ if (is_privileged_binary(old_dentry))
65073+ needmode |= GR_SETID;
65074+
65075+ if ((newmode & needmode) != needmode)
65076+ goto bad;
65077+
65078+ // enforce minimum permissions
65079+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
65080+ return newmode;
65081+bad:
65082+ needmode = oldmode;
65083+ if (is_privileged_binary(old_dentry))
65084+ needmode |= GR_SETID;
65085+
65086+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
65087+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
65088+ return (GR_CREATE | GR_LINK);
65089+ } else if (newmode & GR_SUPPRESS)
65090+ return GR_SUPPRESS;
65091+ else
65092+ return 0;
65093+}
65094+
65095+int
65096+gr_check_hidden_task(const struct task_struct *task)
65097+{
65098+ if (unlikely(!(gr_status & GR_READY)))
65099+ return 0;
65100+
65101+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
65102+ return 1;
65103+
65104+ return 0;
65105+}
65106+
65107+int
65108+gr_check_protected_task(const struct task_struct *task)
65109+{
65110+ if (unlikely(!(gr_status & GR_READY) || !task))
65111+ return 0;
65112+
65113+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
65114+ task->acl != current->acl)
65115+ return 1;
65116+
65117+ return 0;
65118+}
65119+
65120+int
65121+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
65122+{
65123+ struct task_struct *p;
65124+ int ret = 0;
65125+
65126+ if (unlikely(!(gr_status & GR_READY) || !pid))
65127+ return ret;
65128+
65129+ read_lock(&tasklist_lock);
65130+ do_each_pid_task(pid, type, p) {
65131+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
65132+ p->acl != current->acl) {
65133+ ret = 1;
65134+ goto out;
65135+ }
65136+ } while_each_pid_task(pid, type, p);
65137+out:
65138+ read_unlock(&tasklist_lock);
65139+
65140+ return ret;
65141+}
65142+
65143+void
65144+gr_copy_label(struct task_struct *tsk)
65145+{
65146+ struct task_struct *p = current;
65147+
65148+ tsk->inherited = p->inherited;
65149+ tsk->acl_sp_role = 0;
65150+ tsk->acl_role_id = p->acl_role_id;
65151+ tsk->acl = p->acl;
65152+ tsk->role = p->role;
65153+ tsk->signal->used_accept = 0;
65154+ tsk->signal->curr_ip = p->signal->curr_ip;
65155+ tsk->signal->saved_ip = p->signal->saved_ip;
65156+ if (p->exec_file)
65157+ get_file(p->exec_file);
65158+ tsk->exec_file = p->exec_file;
65159+ tsk->is_writable = p->is_writable;
65160+ if (unlikely(p->signal->used_accept)) {
65161+ p->signal->curr_ip = 0;
65162+ p->signal->saved_ip = 0;
65163+ }
65164+
65165+ return;
65166+}
65167+
65168+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
65169+
65170+int
65171+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
65172+{
65173+ unsigned int i;
65174+ __u16 num;
65175+ uid_t *uidlist;
65176+ uid_t curuid;
65177+ int realok = 0;
65178+ int effectiveok = 0;
65179+ int fsok = 0;
65180+ uid_t globalreal, globaleffective, globalfs;
65181+
65182+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
65183+ struct user_struct *user;
65184+
65185+ if (!uid_valid(real))
65186+ goto skipit;
65187+
65188+ /* find user based on global namespace */
65189+
65190+ globalreal = GR_GLOBAL_UID(real);
65191+
65192+ user = find_user(make_kuid(&init_user_ns, globalreal));
65193+ if (user == NULL)
65194+ goto skipit;
65195+
65196+ if (gr_process_kernel_setuid_ban(user)) {
65197+ /* for find_user */
65198+ free_uid(user);
65199+ return 1;
65200+ }
65201+
65202+ /* for find_user */
65203+ free_uid(user);
65204+
65205+skipit:
65206+#endif
65207+
65208+ if (unlikely(!(gr_status & GR_READY)))
65209+ return 0;
65210+
65211+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
65212+ gr_log_learn_uid_change(real, effective, fs);
65213+
65214+ num = current->acl->user_trans_num;
65215+ uidlist = current->acl->user_transitions;
65216+
65217+ if (uidlist == NULL)
65218+ return 0;
65219+
65220+ if (!uid_valid(real)) {
65221+ realok = 1;
65222+ globalreal = (uid_t)-1;
65223+ } else {
65224+ globalreal = GR_GLOBAL_UID(real);
65225+ }
65226+ if (!uid_valid(effective)) {
65227+ effectiveok = 1;
65228+ globaleffective = (uid_t)-1;
65229+ } else {
65230+ globaleffective = GR_GLOBAL_UID(effective);
65231+ }
65232+ if (!uid_valid(fs)) {
65233+ fsok = 1;
65234+ globalfs = (uid_t)-1;
65235+ } else {
65236+ globalfs = GR_GLOBAL_UID(fs);
65237+ }
65238+
65239+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
65240+ for (i = 0; i < num; i++) {
65241+ curuid = uidlist[i];
65242+ if (globalreal == curuid)
65243+ realok = 1;
65244+ if (globaleffective == curuid)
65245+ effectiveok = 1;
65246+ if (globalfs == curuid)
65247+ fsok = 1;
65248+ }
65249+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
65250+ for (i = 0; i < num; i++) {
65251+ curuid = uidlist[i];
65252+ if (globalreal == curuid)
65253+ break;
65254+ if (globaleffective == curuid)
65255+ break;
65256+ if (globalfs == curuid)
65257+ break;
65258+ }
65259+ /* not in deny list */
65260+ if (i == num) {
65261+ realok = 1;
65262+ effectiveok = 1;
65263+ fsok = 1;
65264+ }
65265+ }
65266+
65267+ if (realok && effectiveok && fsok)
65268+ return 0;
65269+ else {
65270+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
65271+ return 1;
65272+ }
65273+}
65274+
65275+int
65276+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
65277+{
65278+ unsigned int i;
65279+ __u16 num;
65280+ gid_t *gidlist;
65281+ gid_t curgid;
65282+ int realok = 0;
65283+ int effectiveok = 0;
65284+ int fsok = 0;
65285+ gid_t globalreal, globaleffective, globalfs;
65286+
65287+ if (unlikely(!(gr_status & GR_READY)))
65288+ return 0;
65289+
65290+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
65291+ gr_log_learn_gid_change(real, effective, fs);
65292+
65293+ num = current->acl->group_trans_num;
65294+ gidlist = current->acl->group_transitions;
65295+
65296+ if (gidlist == NULL)
65297+ return 0;
65298+
65299+ if (!gid_valid(real)) {
65300+ realok = 1;
65301+ globalreal = (gid_t)-1;
65302+ } else {
65303+ globalreal = GR_GLOBAL_GID(real);
65304+ }
65305+ if (!gid_valid(effective)) {
65306+ effectiveok = 1;
65307+ globaleffective = (gid_t)-1;
65308+ } else {
65309+ globaleffective = GR_GLOBAL_GID(effective);
65310+ }
65311+ if (!gid_valid(fs)) {
65312+ fsok = 1;
65313+ globalfs = (gid_t)-1;
65314+ } else {
65315+ globalfs = GR_GLOBAL_GID(fs);
65316+ }
65317+
65318+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
65319+ for (i = 0; i < num; i++) {
65320+ curgid = gidlist[i];
65321+ if (globalreal == curgid)
65322+ realok = 1;
65323+ if (globaleffective == curgid)
65324+ effectiveok = 1;
65325+ if (globalfs == curgid)
65326+ fsok = 1;
65327+ }
65328+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
65329+ for (i = 0; i < num; i++) {
65330+ curgid = gidlist[i];
65331+ if (globalreal == curgid)
65332+ break;
65333+ if (globaleffective == curgid)
65334+ break;
65335+ if (globalfs == curgid)
65336+ break;
65337+ }
65338+ /* not in deny list */
65339+ if (i == num) {
65340+ realok = 1;
65341+ effectiveok = 1;
65342+ fsok = 1;
65343+ }
65344+ }
65345+
65346+ if (realok && effectiveok && fsok)
65347+ return 0;
65348+ else {
65349+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
65350+ return 1;
65351+ }
65352+}
65353+
65354+extern int gr_acl_is_capable(const int cap);
65355+
65356+void
65357+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
65358+{
65359+ struct acl_role_label *role = task->role;
65360+ struct acl_subject_label *subj = NULL;
65361+ struct acl_object_label *obj;
65362+ struct file *filp;
65363+ uid_t uid;
65364+ gid_t gid;
65365+
65366+ if (unlikely(!(gr_status & GR_READY)))
65367+ return;
65368+
65369+ uid = GR_GLOBAL_UID(kuid);
65370+ gid = GR_GLOBAL_GID(kgid);
65371+
65372+ filp = task->exec_file;
65373+
65374+ /* kernel process, we'll give them the kernel role */
65375+ if (unlikely(!filp)) {
65376+ task->role = running_polstate.kernel_role;
65377+ task->acl = running_polstate.kernel_role->root_label;
65378+ return;
65379+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
65380+ /* save the current ip at time of role lookup so that the proper
65381+ IP will be learned for role_allowed_ip */
65382+ task->signal->saved_ip = task->signal->curr_ip;
65383+ role = lookup_acl_role_label(task, uid, gid);
65384+ }
65385+
65386+ /* don't change the role if we're not a privileged process */
65387+ if (role && task->role != role &&
65388+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
65389+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
65390+ return;
65391+
65392+ /* perform subject lookup in possibly new role
65393+ we can use this result below in the case where role == task->role
65394+ */
65395+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
65396+
65397+ /* if we changed uid/gid, but result in the same role
65398+ and are using inheritance, don't lose the inherited subject
65399+ if current subject is other than what normal lookup
65400+ would result in, we arrived via inheritance, don't
65401+ lose subject
65402+ */
65403+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
65404+ (subj == task->acl)))
65405+ task->acl = subj;
65406+
65407+ /* leave task->inherited unaffected */
65408+
65409+ task->role = role;
65410+
65411+ task->is_writable = 0;
65412+
65413+ /* ignore additional mmap checks for processes that are writable
65414+ by the default ACL */
65415+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
65416+ if (unlikely(obj->mode & GR_WRITE))
65417+ task->is_writable = 1;
65418+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
65419+ if (unlikely(obj->mode & GR_WRITE))
65420+ task->is_writable = 1;
65421+
65422+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65423+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65424+#endif
65425+
65426+ gr_set_proc_res(task);
65427+
65428+ return;
65429+}
65430+
65431+int
65432+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
65433+ const int unsafe_flags)
65434+{
65435+ struct task_struct *task = current;
65436+ struct acl_subject_label *newacl;
65437+ struct acl_object_label *obj;
65438+ __u32 retmode;
65439+
65440+ if (unlikely(!(gr_status & GR_READY)))
65441+ return 0;
65442+
65443+ newacl = chk_subj_label(dentry, mnt, task->role);
65444+
65445+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
65446+ did an exec
65447+ */
65448+ rcu_read_lock();
65449+ read_lock(&tasklist_lock);
65450+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
65451+ (task->parent->acl->mode & GR_POVERRIDE))) {
65452+ read_unlock(&tasklist_lock);
65453+ rcu_read_unlock();
65454+ goto skip_check;
65455+ }
65456+ read_unlock(&tasklist_lock);
65457+ rcu_read_unlock();
65458+
65459+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
65460+ !(task->role->roletype & GR_ROLE_GOD) &&
65461+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
65462+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
65463+ if (unsafe_flags & LSM_UNSAFE_SHARE)
65464+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
65465+ else
65466+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
65467+ return -EACCES;
65468+ }
65469+
65470+skip_check:
65471+
65472+ obj = chk_obj_label(dentry, mnt, task->acl);
65473+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
65474+
65475+ if (!(task->acl->mode & GR_INHERITLEARN) &&
65476+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
65477+ if (obj->nested)
65478+ task->acl = obj->nested;
65479+ else
65480+ task->acl = newacl;
65481+ task->inherited = 0;
65482+ } else {
65483+ task->inherited = 1;
65484+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
65485+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
65486+ }
65487+
65488+ task->is_writable = 0;
65489+
65490+ /* ignore additional mmap checks for processes that are writable
65491+ by the default ACL */
65492+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
65493+ if (unlikely(obj->mode & GR_WRITE))
65494+ task->is_writable = 1;
65495+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
65496+ if (unlikely(obj->mode & GR_WRITE))
65497+ task->is_writable = 1;
65498+
65499+ gr_set_proc_res(task);
65500+
65501+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65502+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65503+#endif
65504+ return 0;
65505+}
65506+
65507+/* always called with valid inodev ptr */
65508+static void
65509+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
65510+{
65511+ struct acl_object_label *matchpo;
65512+ struct acl_subject_label *matchps;
65513+ struct acl_subject_label *subj;
65514+ struct acl_role_label *role;
65515+ unsigned int x;
65516+
65517+ FOR_EACH_ROLE_START(role)
65518+ FOR_EACH_SUBJECT_START(role, subj, x)
65519+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
65520+ matchpo->mode |= GR_DELETED;
65521+ FOR_EACH_SUBJECT_END(subj,x)
65522+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
65523+ /* nested subjects aren't in the role's subj_hash table */
65524+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
65525+ matchpo->mode |= GR_DELETED;
65526+ FOR_EACH_NESTED_SUBJECT_END(subj)
65527+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
65528+ matchps->mode |= GR_DELETED;
65529+ FOR_EACH_ROLE_END(role)
65530+
65531+ inodev->nentry->deleted = 1;
65532+
65533+ return;
65534+}
65535+
65536+void
65537+gr_handle_delete(const ino_t ino, const dev_t dev)
65538+{
65539+ struct inodev_entry *inodev;
65540+
65541+ if (unlikely(!(gr_status & GR_READY)))
65542+ return;
65543+
65544+ write_lock(&gr_inode_lock);
65545+ inodev = lookup_inodev_entry(ino, dev);
65546+ if (inodev != NULL)
65547+ do_handle_delete(inodev, ino, dev);
65548+ write_unlock(&gr_inode_lock);
65549+
65550+ return;
65551+}
65552+
65553+static void
65554+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
65555+ const ino_t newinode, const dev_t newdevice,
65556+ struct acl_subject_label *subj)
65557+{
65558+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
65559+ struct acl_object_label *match;
65560+
65561+ match = subj->obj_hash[index];
65562+
65563+ while (match && (match->inode != oldinode ||
65564+ match->device != olddevice ||
65565+ !(match->mode & GR_DELETED)))
65566+ match = match->next;
65567+
65568+ if (match && (match->inode == oldinode)
65569+ && (match->device == olddevice)
65570+ && (match->mode & GR_DELETED)) {
65571+ if (match->prev == NULL) {
65572+ subj->obj_hash[index] = match->next;
65573+ if (match->next != NULL)
65574+ match->next->prev = NULL;
65575+ } else {
65576+ match->prev->next = match->next;
65577+ if (match->next != NULL)
65578+ match->next->prev = match->prev;
65579+ }
65580+ match->prev = NULL;
65581+ match->next = NULL;
65582+ match->inode = newinode;
65583+ match->device = newdevice;
65584+ match->mode &= ~GR_DELETED;
65585+
65586+ insert_acl_obj_label(match, subj);
65587+ }
65588+
65589+ return;
65590+}
65591+
65592+static void
65593+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
65594+ const ino_t newinode, const dev_t newdevice,
65595+ struct acl_role_label *role)
65596+{
65597+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
65598+ struct acl_subject_label *match;
65599+
65600+ match = role->subj_hash[index];
65601+
65602+ while (match && (match->inode != oldinode ||
65603+ match->device != olddevice ||
65604+ !(match->mode & GR_DELETED)))
65605+ match = match->next;
65606+
65607+ if (match && (match->inode == oldinode)
65608+ && (match->device == olddevice)
65609+ && (match->mode & GR_DELETED)) {
65610+ if (match->prev == NULL) {
65611+ role->subj_hash[index] = match->next;
65612+ if (match->next != NULL)
65613+ match->next->prev = NULL;
65614+ } else {
65615+ match->prev->next = match->next;
65616+ if (match->next != NULL)
65617+ match->next->prev = match->prev;
65618+ }
65619+ match->prev = NULL;
65620+ match->next = NULL;
65621+ match->inode = newinode;
65622+ match->device = newdevice;
65623+ match->mode &= ~GR_DELETED;
65624+
65625+ insert_acl_subj_label(match, role);
65626+ }
65627+
65628+ return;
65629+}
65630+
65631+static void
65632+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
65633+ const ino_t newinode, const dev_t newdevice)
65634+{
65635+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
65636+ struct inodev_entry *match;
65637+
65638+ match = running_polstate.inodev_set.i_hash[index];
65639+
65640+ while (match && (match->nentry->inode != oldinode ||
65641+ match->nentry->device != olddevice || !match->nentry->deleted))
65642+ match = match->next;
65643+
65644+ if (match && (match->nentry->inode == oldinode)
65645+ && (match->nentry->device == olddevice) &&
65646+ match->nentry->deleted) {
65647+ if (match->prev == NULL) {
65648+ running_polstate.inodev_set.i_hash[index] = match->next;
65649+ if (match->next != NULL)
65650+ match->next->prev = NULL;
65651+ } else {
65652+ match->prev->next = match->next;
65653+ if (match->next != NULL)
65654+ match->next->prev = match->prev;
65655+ }
65656+ match->prev = NULL;
65657+ match->next = NULL;
65658+ match->nentry->inode = newinode;
65659+ match->nentry->device = newdevice;
65660+ match->nentry->deleted = 0;
65661+
65662+ insert_inodev_entry(match);
65663+ }
65664+
65665+ return;
65666+}
65667+
65668+static void
65669+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
65670+{
65671+ struct acl_subject_label *subj;
65672+ struct acl_role_label *role;
65673+ unsigned int x;
65674+
65675+ FOR_EACH_ROLE_START(role)
65676+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
65677+
65678+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
65679+ if ((subj->inode == ino) && (subj->device == dev)) {
65680+ subj->inode = ino;
65681+ subj->device = dev;
65682+ }
65683+ /* nested subjects aren't in the role's subj_hash table */
65684+ update_acl_obj_label(matchn->inode, matchn->device,
65685+ ino, dev, subj);
65686+ FOR_EACH_NESTED_SUBJECT_END(subj)
65687+ FOR_EACH_SUBJECT_START(role, subj, x)
65688+ update_acl_obj_label(matchn->inode, matchn->device,
65689+ ino, dev, subj);
65690+ FOR_EACH_SUBJECT_END(subj,x)
65691+ FOR_EACH_ROLE_END(role)
65692+
65693+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
65694+
65695+ return;
65696+}
65697+
65698+static void
65699+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
65700+ const struct vfsmount *mnt)
65701+{
65702+ ino_t ino = dentry->d_inode->i_ino;
65703+ dev_t dev = __get_dev(dentry);
65704+
65705+ __do_handle_create(matchn, ino, dev);
65706+
65707+ return;
65708+}
65709+
65710+void
65711+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
65712+{
65713+ struct name_entry *matchn;
65714+
65715+ if (unlikely(!(gr_status & GR_READY)))
65716+ return;
65717+
65718+ preempt_disable();
65719+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
65720+
65721+ if (unlikely((unsigned long)matchn)) {
65722+ write_lock(&gr_inode_lock);
65723+ do_handle_create(matchn, dentry, mnt);
65724+ write_unlock(&gr_inode_lock);
65725+ }
65726+ preempt_enable();
65727+
65728+ return;
65729+}
65730+
65731+void
65732+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
65733+{
65734+ struct name_entry *matchn;
65735+
65736+ if (unlikely(!(gr_status & GR_READY)))
65737+ return;
65738+
65739+ preempt_disable();
65740+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
65741+
65742+ if (unlikely((unsigned long)matchn)) {
65743+ write_lock(&gr_inode_lock);
65744+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
65745+ write_unlock(&gr_inode_lock);
65746+ }
65747+ preempt_enable();
65748+
65749+ return;
65750+}
65751+
65752+void
65753+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65754+ struct dentry *old_dentry,
65755+ struct dentry *new_dentry,
65756+ struct vfsmount *mnt, const __u8 replace)
65757+{
65758+ struct name_entry *matchn;
65759+ struct inodev_entry *inodev;
65760+ struct inode *inode = new_dentry->d_inode;
65761+ ino_t old_ino = old_dentry->d_inode->i_ino;
65762+ dev_t old_dev = __get_dev(old_dentry);
65763+
65764+ /* vfs_rename swaps the name and parent link for old_dentry and
65765+ new_dentry
65766+ at this point, old_dentry has the new name, parent link, and inode
65767+ for the renamed file
65768+ if a file is being replaced by a rename, new_dentry has the inode
65769+ and name for the replaced file
65770+ */
65771+
65772+ if (unlikely(!(gr_status & GR_READY)))
65773+ return;
65774+
65775+ preempt_disable();
65776+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
65777+
65778+ /* we wouldn't have to check d_inode if it weren't for
65779+ NFS silly-renaming
65780+ */
65781+
65782+ write_lock(&gr_inode_lock);
65783+ if (unlikely(replace && inode)) {
65784+ ino_t new_ino = inode->i_ino;
65785+ dev_t new_dev = __get_dev(new_dentry);
65786+
65787+ inodev = lookup_inodev_entry(new_ino, new_dev);
65788+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
65789+ do_handle_delete(inodev, new_ino, new_dev);
65790+ }
65791+
65792+ inodev = lookup_inodev_entry(old_ino, old_dev);
65793+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
65794+ do_handle_delete(inodev, old_ino, old_dev);
65795+
65796+ if (unlikely((unsigned long)matchn))
65797+ do_handle_create(matchn, old_dentry, mnt);
65798+
65799+ write_unlock(&gr_inode_lock);
65800+ preempt_enable();
65801+
65802+ return;
65803+}
65804+
65805+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
65806+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
65807+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
65808+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
65809+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
65810+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
65811+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
65812+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
65813+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
65814+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
65815+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
65816+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
65817+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
65818+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
65819+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
65820+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
65821+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
65822+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
65823+};
65824+
65825+void
65826+gr_learn_resource(const struct task_struct *task,
65827+ const int res, const unsigned long wanted, const int gt)
65828+{
65829+ struct acl_subject_label *acl;
65830+ const struct cred *cred;
65831+
65832+ if (unlikely((gr_status & GR_READY) &&
65833+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
65834+ goto skip_reslog;
65835+
65836+ gr_log_resource(task, res, wanted, gt);
65837+skip_reslog:
65838+
65839+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
65840+ return;
65841+
65842+ acl = task->acl;
65843+
65844+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
65845+ !(acl->resmask & (1U << (unsigned short) res))))
65846+ return;
65847+
65848+ if (wanted >= acl->res[res].rlim_cur) {
65849+ unsigned long res_add;
65850+
65851+ res_add = wanted + res_learn_bumps[res];
65852+
65853+ acl->res[res].rlim_cur = res_add;
65854+
65855+ if (wanted > acl->res[res].rlim_max)
65856+ acl->res[res].rlim_max = res_add;
65857+
65858+ /* only log the subject filename, since resource logging is supported for
65859+ single-subject learning only */
65860+ rcu_read_lock();
65861+ cred = __task_cred(task);
65862+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65863+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
65864+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
65865+ "", (unsigned long) res, &task->signal->saved_ip);
65866+ rcu_read_unlock();
65867+ }
65868+
65869+ return;
65870+}
65871+EXPORT_SYMBOL(gr_learn_resource);
65872+#endif
65873+
65874+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
65875+void
65876+pax_set_initial_flags(struct linux_binprm *bprm)
65877+{
65878+ struct task_struct *task = current;
65879+ struct acl_subject_label *proc;
65880+ unsigned long flags;
65881+
65882+ if (unlikely(!(gr_status & GR_READY)))
65883+ return;
65884+
65885+ flags = pax_get_flags(task);
65886+
65887+ proc = task->acl;
65888+
65889+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
65890+ flags &= ~MF_PAX_PAGEEXEC;
65891+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
65892+ flags &= ~MF_PAX_SEGMEXEC;
65893+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
65894+ flags &= ~MF_PAX_RANDMMAP;
65895+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
65896+ flags &= ~MF_PAX_EMUTRAMP;
65897+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
65898+ flags &= ~MF_PAX_MPROTECT;
65899+
65900+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
65901+ flags |= MF_PAX_PAGEEXEC;
65902+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
65903+ flags |= MF_PAX_SEGMEXEC;
65904+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
65905+ flags |= MF_PAX_RANDMMAP;
65906+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
65907+ flags |= MF_PAX_EMUTRAMP;
65908+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
65909+ flags |= MF_PAX_MPROTECT;
65910+
65911+ pax_set_flags(task, flags);
65912+
65913+ return;
65914+}
65915+#endif
65916+
65917+int
65918+gr_handle_proc_ptrace(struct task_struct *task)
65919+{
65920+ struct file *filp;
65921+ struct task_struct *tmp = task;
65922+ struct task_struct *curtemp = current;
65923+ __u32 retmode;
65924+
65925+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65926+ if (unlikely(!(gr_status & GR_READY)))
65927+ return 0;
65928+#endif
65929+
65930+ read_lock(&tasklist_lock);
65931+ read_lock(&grsec_exec_file_lock);
65932+ filp = task->exec_file;
65933+
65934+ while (task_pid_nr(tmp) > 0) {
65935+ if (tmp == curtemp)
65936+ break;
65937+ tmp = tmp->real_parent;
65938+ }
65939+
65940+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65941+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
65942+ read_unlock(&grsec_exec_file_lock);
65943+ read_unlock(&tasklist_lock);
65944+ return 1;
65945+ }
65946+
65947+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65948+ if (!(gr_status & GR_READY)) {
65949+ read_unlock(&grsec_exec_file_lock);
65950+ read_unlock(&tasklist_lock);
65951+ return 0;
65952+ }
65953+#endif
65954+
65955+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
65956+ read_unlock(&grsec_exec_file_lock);
65957+ read_unlock(&tasklist_lock);
65958+
65959+ if (retmode & GR_NOPTRACE)
65960+ return 1;
65961+
65962+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
65963+ && (current->acl != task->acl || (current->acl != current->role->root_label
65964+ && task_pid_nr(current) != task_pid_nr(task))))
65965+ return 1;
65966+
65967+ return 0;
65968+}
65969+
65970+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
65971+{
65972+ if (unlikely(!(gr_status & GR_READY)))
65973+ return;
65974+
65975+ if (!(current->role->roletype & GR_ROLE_GOD))
65976+ return;
65977+
65978+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
65979+ p->role->rolename, gr_task_roletype_to_char(p),
65980+ p->acl->filename);
65981+}
65982+
65983+int
65984+gr_handle_ptrace(struct task_struct *task, const long request)
65985+{
65986+ struct task_struct *tmp = task;
65987+ struct task_struct *curtemp = current;
65988+ __u32 retmode;
65989+
65990+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65991+ if (unlikely(!(gr_status & GR_READY)))
65992+ return 0;
65993+#endif
65994+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65995+ read_lock(&tasklist_lock);
65996+ while (task_pid_nr(tmp) > 0) {
65997+ if (tmp == curtemp)
65998+ break;
65999+ tmp = tmp->real_parent;
66000+ }
66001+
66002+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
66003+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
66004+ read_unlock(&tasklist_lock);
66005+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66006+ return 1;
66007+ }
66008+ read_unlock(&tasklist_lock);
66009+ }
66010+
66011+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66012+ if (!(gr_status & GR_READY))
66013+ return 0;
66014+#endif
66015+
66016+ read_lock(&grsec_exec_file_lock);
66017+ if (unlikely(!task->exec_file)) {
66018+ read_unlock(&grsec_exec_file_lock);
66019+ return 0;
66020+ }
66021+
66022+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
66023+ read_unlock(&grsec_exec_file_lock);
66024+
66025+ if (retmode & GR_NOPTRACE) {
66026+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66027+ return 1;
66028+ }
66029+
66030+ if (retmode & GR_PTRACERD) {
66031+ switch (request) {
66032+ case PTRACE_SEIZE:
66033+ case PTRACE_POKETEXT:
66034+ case PTRACE_POKEDATA:
66035+ case PTRACE_POKEUSR:
66036+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
66037+ case PTRACE_SETREGS:
66038+ case PTRACE_SETFPREGS:
66039+#endif
66040+#ifdef CONFIG_X86
66041+ case PTRACE_SETFPXREGS:
66042+#endif
66043+#ifdef CONFIG_ALTIVEC
66044+ case PTRACE_SETVRREGS:
66045+#endif
66046+ return 1;
66047+ default:
66048+ return 0;
66049+ }
66050+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
66051+ !(current->role->roletype & GR_ROLE_GOD) &&
66052+ (current->acl != task->acl)) {
66053+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66054+ return 1;
66055+ }
66056+
66057+ return 0;
66058+}
66059+
66060+static int is_writable_mmap(const struct file *filp)
66061+{
66062+ struct task_struct *task = current;
66063+ struct acl_object_label *obj, *obj2;
66064+
66065+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
66066+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
66067+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
66068+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
66069+ task->role->root_label);
66070+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
66071+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
66072+ return 1;
66073+ }
66074+ }
66075+ return 0;
66076+}
66077+
66078+int
66079+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
66080+{
66081+ __u32 mode;
66082+
66083+ if (unlikely(!file || !(prot & PROT_EXEC)))
66084+ return 1;
66085+
66086+ if (is_writable_mmap(file))
66087+ return 0;
66088+
66089+ mode =
66090+ gr_search_file(file->f_path.dentry,
66091+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66092+ file->f_path.mnt);
66093+
66094+ if (!gr_tpe_allow(file))
66095+ return 0;
66096+
66097+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66098+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66099+ return 0;
66100+ } else if (unlikely(!(mode & GR_EXEC))) {
66101+ return 0;
66102+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66103+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66104+ return 1;
66105+ }
66106+
66107+ return 1;
66108+}
66109+
66110+int
66111+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
66112+{
66113+ __u32 mode;
66114+
66115+ if (unlikely(!file || !(prot & PROT_EXEC)))
66116+ return 1;
66117+
66118+ if (is_writable_mmap(file))
66119+ return 0;
66120+
66121+ mode =
66122+ gr_search_file(file->f_path.dentry,
66123+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66124+ file->f_path.mnt);
66125+
66126+ if (!gr_tpe_allow(file))
66127+ return 0;
66128+
66129+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66130+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66131+ return 0;
66132+ } else if (unlikely(!(mode & GR_EXEC))) {
66133+ return 0;
66134+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66135+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66136+ return 1;
66137+ }
66138+
66139+ return 1;
66140+}
66141+
66142+void
66143+gr_acl_handle_psacct(struct task_struct *task, const long code)
66144+{
66145+ unsigned long runtime;
66146+ unsigned long cputime;
66147+ unsigned int wday, cday;
66148+ __u8 whr, chr;
66149+ __u8 wmin, cmin;
66150+ __u8 wsec, csec;
66151+ struct timespec timeval;
66152+
66153+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
66154+ !(task->acl->mode & GR_PROCACCT)))
66155+ return;
66156+
66157+ do_posix_clock_monotonic_gettime(&timeval);
66158+ runtime = timeval.tv_sec - task->start_time.tv_sec;
66159+ wday = runtime / (3600 * 24);
66160+ runtime -= wday * (3600 * 24);
66161+ whr = runtime / 3600;
66162+ runtime -= whr * 3600;
66163+ wmin = runtime / 60;
66164+ runtime -= wmin * 60;
66165+ wsec = runtime;
66166+
66167+ cputime = (task->utime + task->stime) / HZ;
66168+ cday = cputime / (3600 * 24);
66169+ cputime -= cday * (3600 * 24);
66170+ chr = cputime / 3600;
66171+ cputime -= chr * 3600;
66172+ cmin = cputime / 60;
66173+ cputime -= cmin * 60;
66174+ csec = cputime;
66175+
66176+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
66177+
66178+ return;
66179+}
66180+
66181+#ifdef CONFIG_TASKSTATS
66182+int gr_is_taskstats_denied(int pid)
66183+{
66184+ struct task_struct *task;
66185+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66186+ const struct cred *cred;
66187+#endif
66188+ int ret = 0;
66189+
66190+ /* restrict taskstats viewing to un-chrooted root users
66191+ who have the 'view' subject flag if the RBAC system is enabled
66192+ */
66193+
66194+ rcu_read_lock();
66195+ read_lock(&tasklist_lock);
66196+ task = find_task_by_vpid(pid);
66197+ if (task) {
66198+#ifdef CONFIG_GRKERNSEC_CHROOT
66199+ if (proc_is_chrooted(task))
66200+ ret = -EACCES;
66201+#endif
66202+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66203+ cred = __task_cred(task);
66204+#ifdef CONFIG_GRKERNSEC_PROC_USER
66205+ if (gr_is_global_nonroot(cred->uid))
66206+ ret = -EACCES;
66207+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66208+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
66209+ ret = -EACCES;
66210+#endif
66211+#endif
66212+ if (gr_status & GR_READY) {
66213+ if (!(task->acl->mode & GR_VIEW))
66214+ ret = -EACCES;
66215+ }
66216+ } else
66217+ ret = -ENOENT;
66218+
66219+ read_unlock(&tasklist_lock);
66220+ rcu_read_unlock();
66221+
66222+ return ret;
66223+}
66224+#endif
66225+
66226+/* AUXV entries are filled via a descendant of search_binary_handler
66227+ after we've already applied the subject for the target
66228+*/
66229+int gr_acl_enable_at_secure(void)
66230+{
66231+ if (unlikely(!(gr_status & GR_READY)))
66232+ return 0;
66233+
66234+ if (current->acl->mode & GR_ATSECURE)
66235+ return 1;
66236+
66237+ return 0;
66238+}
66239+
66240+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
66241+{
66242+ struct task_struct *task = current;
66243+ struct dentry *dentry = file->f_path.dentry;
66244+ struct vfsmount *mnt = file->f_path.mnt;
66245+ struct acl_object_label *obj, *tmp;
66246+ struct acl_subject_label *subj;
66247+ unsigned int bufsize;
66248+ int is_not_root;
66249+ char *path;
66250+ dev_t dev = __get_dev(dentry);
66251+
66252+ if (unlikely(!(gr_status & GR_READY)))
66253+ return 1;
66254+
66255+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
66256+ return 1;
66257+
66258+ /* ignore Eric Biederman */
66259+ if (IS_PRIVATE(dentry->d_inode))
66260+ return 1;
66261+
66262+ subj = task->acl;
66263+ read_lock(&gr_inode_lock);
66264+ do {
66265+ obj = lookup_acl_obj_label(ino, dev, subj);
66266+ if (obj != NULL) {
66267+ read_unlock(&gr_inode_lock);
66268+ return (obj->mode & GR_FIND) ? 1 : 0;
66269+ }
66270+ } while ((subj = subj->parent_subject));
66271+ read_unlock(&gr_inode_lock);
66272+
66273+ /* this is purely an optimization since we're looking for an object
66274+ for the directory we're doing a readdir on
66275+ if it's possible for any globbed object to match the entry we're
66276+ filling into the directory, then the object we find here will be
66277+ an anchor point with attached globbed objects
66278+ */
66279+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
66280+ if (obj->globbed == NULL)
66281+ return (obj->mode & GR_FIND) ? 1 : 0;
66282+
66283+ is_not_root = ((obj->filename[0] == '/') &&
66284+ (obj->filename[1] == '\0')) ? 0 : 1;
66285+ bufsize = PAGE_SIZE - namelen - is_not_root;
66286+
66287+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
66288+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
66289+ return 1;
66290+
66291+ preempt_disable();
66292+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66293+ bufsize);
66294+
66295+ bufsize = strlen(path);
66296+
66297+ /* if base is "/", don't append an additional slash */
66298+ if (is_not_root)
66299+ *(path + bufsize) = '/';
66300+ memcpy(path + bufsize + is_not_root, name, namelen);
66301+ *(path + bufsize + namelen + is_not_root) = '\0';
66302+
66303+ tmp = obj->globbed;
66304+ while (tmp) {
66305+ if (!glob_match(tmp->filename, path)) {
66306+ preempt_enable();
66307+ return (tmp->mode & GR_FIND) ? 1 : 0;
66308+ }
66309+ tmp = tmp->next;
66310+ }
66311+ preempt_enable();
66312+ return (obj->mode & GR_FIND) ? 1 : 0;
66313+}
66314+
66315+void gr_put_exec_file(struct task_struct *task)
66316+{
66317+ struct file *filp;
66318+
66319+ write_lock(&grsec_exec_file_lock);
66320+ filp = task->exec_file;
66321+ task->exec_file = NULL;
66322+ write_unlock(&grsec_exec_file_lock);
66323+
66324+ if (filp)
66325+ fput(filp);
66326+
66327+ return;
66328+}
66329+
66330+
66331+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
66332+EXPORT_SYMBOL(gr_acl_is_enabled);
66333+#endif
66334+#ifdef CONFIG_SECURITY
66335+EXPORT_SYMBOL(gr_check_user_change);
66336+EXPORT_SYMBOL(gr_check_group_change);
66337+#endif
66338+
66339diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
66340new file mode 100644
66341index 0000000..18ffbbd
66342--- /dev/null
66343+++ b/grsecurity/gracl_alloc.c
66344@@ -0,0 +1,105 @@
66345+#include <linux/kernel.h>
66346+#include <linux/mm.h>
66347+#include <linux/slab.h>
66348+#include <linux/vmalloc.h>
66349+#include <linux/gracl.h>
66350+#include <linux/grsecurity.h>
66351+
66352+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
66353+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
66354+
66355+static __inline__ int
66356+alloc_pop(void)
66357+{
66358+ if (current_alloc_state->alloc_stack_next == 1)
66359+ return 0;
66360+
66361+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
66362+
66363+ current_alloc_state->alloc_stack_next--;
66364+
66365+ return 1;
66366+}
66367+
66368+static __inline__ int
66369+alloc_push(void *buf)
66370+{
66371+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
66372+ return 1;
66373+
66374+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
66375+
66376+ current_alloc_state->alloc_stack_next++;
66377+
66378+ return 0;
66379+}
66380+
66381+void *
66382+acl_alloc(unsigned long len)
66383+{
66384+ void *ret = NULL;
66385+
66386+ if (!len || len > PAGE_SIZE)
66387+ goto out;
66388+
66389+ ret = kmalloc(len, GFP_KERNEL);
66390+
66391+ if (ret) {
66392+ if (alloc_push(ret)) {
66393+ kfree(ret);
66394+ ret = NULL;
66395+ }
66396+ }
66397+
66398+out:
66399+ return ret;
66400+}
66401+
66402+void *
66403+acl_alloc_num(unsigned long num, unsigned long len)
66404+{
66405+ if (!len || (num > (PAGE_SIZE / len)))
66406+ return NULL;
66407+
66408+ return acl_alloc(num * len);
66409+}
66410+
66411+void
66412+acl_free_all(void)
66413+{
66414+ if (!current_alloc_state->alloc_stack)
66415+ return;
66416+
66417+ while (alloc_pop()) ;
66418+
66419+ if (current_alloc_state->alloc_stack) {
66420+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
66421+ kfree(current_alloc_state->alloc_stack);
66422+ else
66423+ vfree(current_alloc_state->alloc_stack);
66424+ }
66425+
66426+ current_alloc_state->alloc_stack = NULL;
66427+ current_alloc_state->alloc_stack_size = 1;
66428+ current_alloc_state->alloc_stack_next = 1;
66429+
66430+ return;
66431+}
66432+
66433+int
66434+acl_alloc_stack_init(unsigned long size)
66435+{
66436+ if ((size * sizeof (void *)) <= PAGE_SIZE)
66437+ current_alloc_state->alloc_stack =
66438+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
66439+ else
66440+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
66441+
66442+ current_alloc_state->alloc_stack_size = size;
66443+ current_alloc_state->alloc_stack_next = 1;
66444+
66445+ if (!current_alloc_state->alloc_stack)
66446+ return 0;
66447+ else
66448+ return 1;
66449+}
66450diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
66451new file mode 100644
66452index 0000000..bdd51ea
66453--- /dev/null
66454+++ b/grsecurity/gracl_cap.c
66455@@ -0,0 +1,110 @@
66456+#include <linux/kernel.h>
66457+#include <linux/module.h>
66458+#include <linux/sched.h>
66459+#include <linux/gracl.h>
66460+#include <linux/grsecurity.h>
66461+#include <linux/grinternal.h>
66462+
66463+extern const char *captab_log[];
66464+extern int captab_log_entries;
66465+
66466+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
66467+{
66468+ struct acl_subject_label *curracl;
66469+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66470+ kernel_cap_t cap_audit = __cap_empty_set;
66471+
66472+ if (!gr_acl_is_enabled())
66473+ return 1;
66474+
66475+ curracl = task->acl;
66476+
66477+ cap_drop = curracl->cap_lower;
66478+ cap_mask = curracl->cap_mask;
66479+ cap_audit = curracl->cap_invert_audit;
66480+
66481+ while ((curracl = curracl->parent_subject)) {
66482+ /* if the cap isn't specified in the current computed mask but is specified in the
66483+ current level subject, and is lowered in the current level subject, then add
66484+ it to the set of dropped capabilities
66485+ otherwise, add the current level subject's mask to the current computed mask
66486+ */
66487+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66488+ cap_raise(cap_mask, cap);
66489+ if (cap_raised(curracl->cap_lower, cap))
66490+ cap_raise(cap_drop, cap);
66491+ if (cap_raised(curracl->cap_invert_audit, cap))
66492+ cap_raise(cap_audit, cap);
66493+ }
66494+ }
66495+
66496+ if (!cap_raised(cap_drop, cap)) {
66497+ if (cap_raised(cap_audit, cap))
66498+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
66499+ return 1;
66500+ }
66501+
66502+ curracl = task->acl;
66503+
66504+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
66505+ && cap_raised(cred->cap_effective, cap)) {
66506+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
66507+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
66508+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
66509+ gr_to_filename(task->exec_file->f_path.dentry,
66510+ task->exec_file->f_path.mnt) : curracl->filename,
66511+ curracl->filename, 0UL,
66512+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
66513+ return 1;
66514+ }
66515+
66516+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
66517+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
66518+
66519+ return 0;
66520+}
66521+
66522+int
66523+gr_acl_is_capable(const int cap)
66524+{
66525+ return gr_task_acl_is_capable(current, current_cred(), cap);
66526+}
66527+
66528+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
66529+{
66530+ struct acl_subject_label *curracl;
66531+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66532+
66533+ if (!gr_acl_is_enabled())
66534+ return 1;
66535+
66536+ curracl = task->acl;
66537+
66538+ cap_drop = curracl->cap_lower;
66539+ cap_mask = curracl->cap_mask;
66540+
66541+ while ((curracl = curracl->parent_subject)) {
66542+ /* if the cap isn't specified in the current computed mask but is specified in the
66543+ current level subject, and is lowered in the current level subject, then add
66544+ it to the set of dropped capabilities
66545+ otherwise, add the current level subject's mask to the current computed mask
66546+ */
66547+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66548+ cap_raise(cap_mask, cap);
66549+ if (cap_raised(curracl->cap_lower, cap))
66550+ cap_raise(cap_drop, cap);
66551+ }
66552+ }
66553+
66554+ if (!cap_raised(cap_drop, cap))
66555+ return 1;
66556+
66557+ return 0;
66558+}
66559+
66560+int
66561+gr_acl_is_capable_nolog(const int cap)
66562+{
66563+ return gr_task_acl_is_capable_nolog(current, cap);
66564+}
66565+
66566diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
66567new file mode 100644
66568index 0000000..ca25605
66569--- /dev/null
66570+++ b/grsecurity/gracl_compat.c
66571@@ -0,0 +1,270 @@
66572+#include <linux/kernel.h>
66573+#include <linux/gracl.h>
66574+#include <linux/compat.h>
66575+#include <linux/gracl_compat.h>
66576+
66577+#include <asm/uaccess.h>
66578+
66579+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
66580+{
66581+ struct gr_arg_wrapper_compat uwrapcompat;
66582+
66583+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
66584+ return -EFAULT;
66585+
66586+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
66587+ (uwrapcompat.version != 0x2901)) ||
66588+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
66589+ return -EINVAL;
66590+
66591+ uwrap->arg = compat_ptr(uwrapcompat.arg);
66592+ uwrap->version = uwrapcompat.version;
66593+ uwrap->size = sizeof(struct gr_arg);
66594+
66595+ return 0;
66596+}
66597+
66598+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
66599+{
66600+ struct gr_arg_compat argcompat;
66601+
66602+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
66603+ return -EFAULT;
66604+
66605+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
66606+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
66607+ arg->role_db.num_roles = argcompat.role_db.num_roles;
66608+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
66609+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
66610+ arg->role_db.num_objects = argcompat.role_db.num_objects;
66611+
66612+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
66613+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
66614+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
66615+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
66616+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
66617+ arg->segv_device = argcompat.segv_device;
66618+ arg->segv_inode = argcompat.segv_inode;
66619+ arg->segv_uid = argcompat.segv_uid;
66620+ arg->num_sprole_pws = argcompat.num_sprole_pws;
66621+ arg->mode = argcompat.mode;
66622+
66623+ return 0;
66624+}
66625+
66626+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
66627+{
66628+ struct acl_object_label_compat objcompat;
66629+
66630+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
66631+ return -EFAULT;
66632+
66633+ obj->filename = compat_ptr(objcompat.filename);
66634+ obj->inode = objcompat.inode;
66635+ obj->device = objcompat.device;
66636+ obj->mode = objcompat.mode;
66637+
66638+ obj->nested = compat_ptr(objcompat.nested);
66639+ obj->globbed = compat_ptr(objcompat.globbed);
66640+
66641+ obj->prev = compat_ptr(objcompat.prev);
66642+ obj->next = compat_ptr(objcompat.next);
66643+
66644+ return 0;
66645+}
66646+
66647+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
66648+{
66649+ unsigned int i;
66650+ struct acl_subject_label_compat subjcompat;
66651+
66652+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
66653+ return -EFAULT;
66654+
66655+ subj->filename = compat_ptr(subjcompat.filename);
66656+ subj->inode = subjcompat.inode;
66657+ subj->device = subjcompat.device;
66658+ subj->mode = subjcompat.mode;
66659+ subj->cap_mask = subjcompat.cap_mask;
66660+ subj->cap_lower = subjcompat.cap_lower;
66661+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
66662+
66663+ for (i = 0; i < GR_NLIMITS; i++) {
66664+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
66665+ subj->res[i].rlim_cur = RLIM_INFINITY;
66666+ else
66667+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
66668+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
66669+ subj->res[i].rlim_max = RLIM_INFINITY;
66670+ else
66671+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
66672+ }
66673+ subj->resmask = subjcompat.resmask;
66674+
66675+ subj->user_trans_type = subjcompat.user_trans_type;
66676+ subj->group_trans_type = subjcompat.group_trans_type;
66677+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
66678+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
66679+ subj->user_trans_num = subjcompat.user_trans_num;
66680+ subj->group_trans_num = subjcompat.group_trans_num;
66681+
66682+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
66683+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
66684+ subj->ip_type = subjcompat.ip_type;
66685+ subj->ips = compat_ptr(subjcompat.ips);
66686+ subj->ip_num = subjcompat.ip_num;
66687+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
66688+
66689+ subj->crashes = subjcompat.crashes;
66690+ subj->expires = subjcompat.expires;
66691+
66692+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
66693+ subj->hash = compat_ptr(subjcompat.hash);
66694+ subj->prev = compat_ptr(subjcompat.prev);
66695+ subj->next = compat_ptr(subjcompat.next);
66696+
66697+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
66698+ subj->obj_hash_size = subjcompat.obj_hash_size;
66699+ subj->pax_flags = subjcompat.pax_flags;
66700+
66701+ return 0;
66702+}
66703+
66704+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
66705+{
66706+ struct acl_role_label_compat rolecompat;
66707+
66708+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
66709+ return -EFAULT;
66710+
66711+ role->rolename = compat_ptr(rolecompat.rolename);
66712+ role->uidgid = rolecompat.uidgid;
66713+ role->roletype = rolecompat.roletype;
66714+
66715+ role->auth_attempts = rolecompat.auth_attempts;
66716+ role->expires = rolecompat.expires;
66717+
66718+ role->root_label = compat_ptr(rolecompat.root_label);
66719+ role->hash = compat_ptr(rolecompat.hash);
66720+
66721+ role->prev = compat_ptr(rolecompat.prev);
66722+ role->next = compat_ptr(rolecompat.next);
66723+
66724+ role->transitions = compat_ptr(rolecompat.transitions);
66725+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
66726+ role->domain_children = compat_ptr(rolecompat.domain_children);
66727+ role->domain_child_num = rolecompat.domain_child_num;
66728+
66729+ role->umask = rolecompat.umask;
66730+
66731+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
66732+ role->subj_hash_size = rolecompat.subj_hash_size;
66733+
66734+ return 0;
66735+}
66736+
66737+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
66738+{
66739+ struct role_allowed_ip_compat roleip_compat;
66740+
66741+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
66742+ return -EFAULT;
66743+
66744+ roleip->addr = roleip_compat.addr;
66745+ roleip->netmask = roleip_compat.netmask;
66746+
66747+ roleip->prev = compat_ptr(roleip_compat.prev);
66748+ roleip->next = compat_ptr(roleip_compat.next);
66749+
66750+ return 0;
66751+}
66752+
66753+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
66754+{
66755+ struct role_transition_compat trans_compat;
66756+
66757+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
66758+ return -EFAULT;
66759+
66760+ trans->rolename = compat_ptr(trans_compat.rolename);
66761+
66762+ trans->prev = compat_ptr(trans_compat.prev);
66763+ trans->next = compat_ptr(trans_compat.next);
66764+
66765+ return 0;
66766+
66767+}
66768+
66769+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
66770+{
66771+ struct gr_hash_struct_compat hash_compat;
66772+
66773+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
66774+ return -EFAULT;
66775+
66776+ hash->table = compat_ptr(hash_compat.table);
66777+ hash->nametable = compat_ptr(hash_compat.nametable);
66778+ hash->first = compat_ptr(hash_compat.first);
66779+
66780+ hash->table_size = hash_compat.table_size;
66781+ hash->used_size = hash_compat.used_size;
66782+
66783+ hash->type = hash_compat.type;
66784+
66785+ return 0;
66786+}
66787+
66788+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
66789+{
66790+ compat_uptr_t ptrcompat;
66791+
66792+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
66793+ return -EFAULT;
66794+
66795+ *(void **)ptr = compat_ptr(ptrcompat);
66796+
66797+ return 0;
66798+}
66799+
66800+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
66801+{
66802+ struct acl_ip_label_compat ip_compat;
66803+
66804+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
66805+ return -EFAULT;
66806+
66807+ ip->iface = compat_ptr(ip_compat.iface);
66808+ ip->addr = ip_compat.addr;
66809+ ip->netmask = ip_compat.netmask;
66810+ ip->low = ip_compat.low;
66811+ ip->high = ip_compat.high;
66812+ ip->mode = ip_compat.mode;
66813+ ip->type = ip_compat.type;
66814+
66815+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
66816+
66817+ ip->prev = compat_ptr(ip_compat.prev);
66818+ ip->next = compat_ptr(ip_compat.next);
66819+
66820+ return 0;
66821+}
66822+
66823+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
66824+{
66825+ struct sprole_pw_compat pw_compat;
66826+
66827+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
66828+ return -EFAULT;
66829+
66830+ pw->rolename = compat_ptr(pw_compat.rolename);
66831+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
66832+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
66833+
66834+ return 0;
66835+}
66836+
66837+size_t get_gr_arg_wrapper_size_compat(void)
66838+{
66839+ return sizeof(struct gr_arg_wrapper_compat);
66840+}
66841+
66842diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
66843new file mode 100644
66844index 0000000..deb6f3b
66845--- /dev/null
66846+++ b/grsecurity/gracl_fs.c
66847@@ -0,0 +1,437 @@
66848+#include <linux/kernel.h>
66849+#include <linux/sched.h>
66850+#include <linux/types.h>
66851+#include <linux/fs.h>
66852+#include <linux/file.h>
66853+#include <linux/stat.h>
66854+#include <linux/grsecurity.h>
66855+#include <linux/grinternal.h>
66856+#include <linux/gracl.h>
66857+
66858+umode_t
66859+gr_acl_umask(void)
66860+{
66861+ if (unlikely(!gr_acl_is_enabled()))
66862+ return 0;
66863+
66864+ return current->role->umask;
66865+}
66866+
66867+__u32
66868+gr_acl_handle_hidden_file(const struct dentry * dentry,
66869+ const struct vfsmount * mnt)
66870+{
66871+ __u32 mode;
66872+
66873+ if (unlikely(!dentry->d_inode))
66874+ return GR_FIND;
66875+
66876+ mode =
66877+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
66878+
66879+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
66880+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66881+ return mode;
66882+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
66883+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66884+ return 0;
66885+ } else if (unlikely(!(mode & GR_FIND)))
66886+ return 0;
66887+
66888+ return GR_FIND;
66889+}
66890+
66891+__u32
66892+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
66893+ int acc_mode)
66894+{
66895+ __u32 reqmode = GR_FIND;
66896+ __u32 mode;
66897+
66898+ if (unlikely(!dentry->d_inode))
66899+ return reqmode;
66900+
66901+ if (acc_mode & MAY_APPEND)
66902+ reqmode |= GR_APPEND;
66903+ else if (acc_mode & MAY_WRITE)
66904+ reqmode |= GR_WRITE;
66905+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
66906+ reqmode |= GR_READ;
66907+
66908+ mode =
66909+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66910+ mnt);
66911+
66912+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66913+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66914+ reqmode & GR_READ ? " reading" : "",
66915+ reqmode & GR_WRITE ? " writing" : reqmode &
66916+ GR_APPEND ? " appending" : "");
66917+ return reqmode;
66918+ } else
66919+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66920+ {
66921+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66922+ reqmode & GR_READ ? " reading" : "",
66923+ reqmode & GR_WRITE ? " writing" : reqmode &
66924+ GR_APPEND ? " appending" : "");
66925+ return 0;
66926+ } else if (unlikely((mode & reqmode) != reqmode))
66927+ return 0;
66928+
66929+ return reqmode;
66930+}
66931+
66932+__u32
66933+gr_acl_handle_creat(const struct dentry * dentry,
66934+ const struct dentry * p_dentry,
66935+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
66936+ const int imode)
66937+{
66938+ __u32 reqmode = GR_WRITE | GR_CREATE;
66939+ __u32 mode;
66940+
66941+ if (acc_mode & MAY_APPEND)
66942+ reqmode |= GR_APPEND;
66943+ // if a directory was required or the directory already exists, then
66944+ // don't count this open as a read
66945+ if ((acc_mode & MAY_READ) &&
66946+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
66947+ reqmode |= GR_READ;
66948+ if ((open_flags & O_CREAT) &&
66949+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
66950+ reqmode |= GR_SETID;
66951+
66952+ mode =
66953+ gr_check_create(dentry, p_dentry, p_mnt,
66954+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
66955+
66956+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66957+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66958+ reqmode & GR_READ ? " reading" : "",
66959+ reqmode & GR_WRITE ? " writing" : reqmode &
66960+ GR_APPEND ? " appending" : "");
66961+ return reqmode;
66962+ } else
66963+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66964+ {
66965+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66966+ reqmode & GR_READ ? " reading" : "",
66967+ reqmode & GR_WRITE ? " writing" : reqmode &
66968+ GR_APPEND ? " appending" : "");
66969+ return 0;
66970+ } else if (unlikely((mode & reqmode) != reqmode))
66971+ return 0;
66972+
66973+ return reqmode;
66974+}
66975+
66976+__u32
66977+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
66978+ const int fmode)
66979+{
66980+ __u32 mode, reqmode = GR_FIND;
66981+
66982+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
66983+ reqmode |= GR_EXEC;
66984+ if (fmode & S_IWOTH)
66985+ reqmode |= GR_WRITE;
66986+ if (fmode & S_IROTH)
66987+ reqmode |= GR_READ;
66988+
66989+ mode =
66990+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66991+ mnt);
66992+
66993+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66994+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
66995+ reqmode & GR_READ ? " reading" : "",
66996+ reqmode & GR_WRITE ? " writing" : "",
66997+ reqmode & GR_EXEC ? " executing" : "");
66998+ return reqmode;
66999+ } else
67000+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
67001+ {
67002+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
67003+ reqmode & GR_READ ? " reading" : "",
67004+ reqmode & GR_WRITE ? " writing" : "",
67005+ reqmode & GR_EXEC ? " executing" : "");
67006+ return 0;
67007+ } else if (unlikely((mode & reqmode) != reqmode))
67008+ return 0;
67009+
67010+ return reqmode;
67011+}
67012+
67013+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
67014+{
67015+ __u32 mode;
67016+
67017+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
67018+
67019+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67020+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
67021+ return mode;
67022+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67023+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
67024+ return 0;
67025+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
67026+ return 0;
67027+
67028+ return (reqmode);
67029+}
67030+
67031+__u32
67032+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
67033+{
67034+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
67035+}
67036+
67037+__u32
67038+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
67039+{
67040+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
67041+}
67042+
67043+__u32
67044+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
67045+{
67046+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
67047+}
67048+
67049+__u32
67050+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
67051+{
67052+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
67053+}
67054+
67055+__u32
67056+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
67057+ umode_t *modeptr)
67058+{
67059+ umode_t mode;
67060+
67061+ *modeptr &= ~gr_acl_umask();
67062+ mode = *modeptr;
67063+
67064+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
67065+ return 1;
67066+
67067+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
67068+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
67069+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
67070+ GR_CHMOD_ACL_MSG);
67071+ } else {
67072+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
67073+ }
67074+}
67075+
67076+__u32
67077+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
67078+{
67079+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
67080+}
67081+
67082+__u32
67083+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
67084+{
67085+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
67086+}
67087+
67088+__u32
67089+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
67090+{
67091+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
67092+}
67093+
67094+__u32
67095+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
67096+{
67097+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
67098+}
67099+
67100+__u32
67101+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
67102+{
67103+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
67104+ GR_UNIXCONNECT_ACL_MSG);
67105+}
67106+
67107+/* hardlinks require at minimum create and link permission,
67108+ any additional privilege required is based on the
67109+ privilege of the file being linked to
67110+*/
67111+__u32
67112+gr_acl_handle_link(const struct dentry * new_dentry,
67113+ const struct dentry * parent_dentry,
67114+ const struct vfsmount * parent_mnt,
67115+ const struct dentry * old_dentry,
67116+ const struct vfsmount * old_mnt, const struct filename *to)
67117+{
67118+ __u32 mode;
67119+ __u32 needmode = GR_CREATE | GR_LINK;
67120+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
67121+
67122+ mode =
67123+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
67124+ old_mnt);
67125+
67126+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
67127+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67128+ return mode;
67129+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67130+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67131+ return 0;
67132+ } else if (unlikely((mode & needmode) != needmode))
67133+ return 0;
67134+
67135+ return 1;
67136+}
67137+
67138+__u32
67139+gr_acl_handle_symlink(const struct dentry * new_dentry,
67140+ const struct dentry * parent_dentry,
67141+ const struct vfsmount * parent_mnt, const struct filename *from)
67142+{
67143+ __u32 needmode = GR_WRITE | GR_CREATE;
67144+ __u32 mode;
67145+
67146+ mode =
67147+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
67148+ GR_CREATE | GR_AUDIT_CREATE |
67149+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
67150+
67151+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
67152+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67153+ return mode;
67154+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67155+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67156+ return 0;
67157+ } else if (unlikely((mode & needmode) != needmode))
67158+ return 0;
67159+
67160+ return (GR_WRITE | GR_CREATE);
67161+}
67162+
67163+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
67164+{
67165+ __u32 mode;
67166+
67167+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
67168+
67169+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67170+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
67171+ return mode;
67172+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67173+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
67174+ return 0;
67175+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
67176+ return 0;
67177+
67178+ return (reqmode);
67179+}
67180+
67181+__u32
67182+gr_acl_handle_mknod(const struct dentry * new_dentry,
67183+ const struct dentry * parent_dentry,
67184+ const struct vfsmount * parent_mnt,
67185+ const int mode)
67186+{
67187+ __u32 reqmode = GR_WRITE | GR_CREATE;
67188+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
67189+ reqmode |= GR_SETID;
67190+
67191+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67192+ reqmode, GR_MKNOD_ACL_MSG);
67193+}
67194+
67195+__u32
67196+gr_acl_handle_mkdir(const struct dentry *new_dentry,
67197+ const struct dentry *parent_dentry,
67198+ const struct vfsmount *parent_mnt)
67199+{
67200+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67201+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
67202+}
67203+
67204+#define RENAME_CHECK_SUCCESS(old, new) \
67205+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
67206+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
67207+
67208+int
67209+gr_acl_handle_rename(struct dentry *new_dentry,
67210+ struct dentry *parent_dentry,
67211+ const struct vfsmount *parent_mnt,
67212+ struct dentry *old_dentry,
67213+ struct inode *old_parent_inode,
67214+ struct vfsmount *old_mnt, const struct filename *newname)
67215+{
67216+ __u32 comp1, comp2;
67217+ int error = 0;
67218+
67219+ if (unlikely(!gr_acl_is_enabled()))
67220+ return 0;
67221+
67222+ if (!new_dentry->d_inode) {
67223+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
67224+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
67225+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
67226+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
67227+ GR_DELETE | GR_AUDIT_DELETE |
67228+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67229+ GR_SUPPRESS, old_mnt);
67230+ } else {
67231+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
67232+ GR_CREATE | GR_DELETE |
67233+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
67234+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67235+ GR_SUPPRESS, parent_mnt);
67236+ comp2 =
67237+ gr_search_file(old_dentry,
67238+ GR_READ | GR_WRITE | GR_AUDIT_READ |
67239+ GR_DELETE | GR_AUDIT_DELETE |
67240+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
67241+ }
67242+
67243+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
67244+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
67245+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67246+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
67247+ && !(comp2 & GR_SUPPRESS)) {
67248+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67249+ error = -EACCES;
67250+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
67251+ error = -EACCES;
67252+
67253+ return error;
67254+}
67255+
67256+void
67257+gr_acl_handle_exit(void)
67258+{
67259+ u16 id;
67260+ char *rolename;
67261+
67262+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
67263+ !(current->role->roletype & GR_ROLE_PERSIST))) {
67264+ id = current->acl_role_id;
67265+ rolename = current->role->rolename;
67266+ gr_set_acls(1);
67267+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
67268+ }
67269+
67270+ gr_put_exec_file(current);
67271+ return;
67272+}
67273+
67274+int
67275+gr_acl_handle_procpidmem(const struct task_struct *task)
67276+{
67277+ if (unlikely(!gr_acl_is_enabled()))
67278+ return 0;
67279+
67280+ if (task != current && task->acl->mode & GR_PROTPROCFD)
67281+ return -EACCES;
67282+
67283+ return 0;
67284+}
67285diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
67286new file mode 100644
67287index 0000000..f056b81
67288--- /dev/null
67289+++ b/grsecurity/gracl_ip.c
67290@@ -0,0 +1,386 @@
67291+#include <linux/kernel.h>
67292+#include <asm/uaccess.h>
67293+#include <asm/errno.h>
67294+#include <net/sock.h>
67295+#include <linux/file.h>
67296+#include <linux/fs.h>
67297+#include <linux/net.h>
67298+#include <linux/in.h>
67299+#include <linux/skbuff.h>
67300+#include <linux/ip.h>
67301+#include <linux/udp.h>
67302+#include <linux/types.h>
67303+#include <linux/sched.h>
67304+#include <linux/netdevice.h>
67305+#include <linux/inetdevice.h>
67306+#include <linux/gracl.h>
67307+#include <linux/grsecurity.h>
67308+#include <linux/grinternal.h>
67309+
67310+#define GR_BIND 0x01
67311+#define GR_CONNECT 0x02
67312+#define GR_INVERT 0x04
67313+#define GR_BINDOVERRIDE 0x08
67314+#define GR_CONNECTOVERRIDE 0x10
67315+#define GR_SOCK_FAMILY 0x20
67316+
67317+static const char * gr_protocols[IPPROTO_MAX] = {
67318+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
67319+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
67320+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
67321+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
67322+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
67323+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
67324+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
67325+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
67326+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
67327+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
67328+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
67329+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
67330+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
67331+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
67332+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
67333+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
67334+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
67335+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
67336+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
67337+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
67338+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
67339+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
67340+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
67341+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
67342+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
67343+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
67344+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
67345+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
67346+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
67347+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
67348+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
67349+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
67350+ };
67351+
67352+static const char * gr_socktypes[SOCK_MAX] = {
67353+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
67354+ "unknown:7", "unknown:8", "unknown:9", "packet"
67355+ };
67356+
67357+static const char * gr_sockfamilies[AF_MAX+1] = {
67358+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
67359+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
67360+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
67361+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
67362+ };
67363+
67364+const char *
67365+gr_proto_to_name(unsigned char proto)
67366+{
67367+ return gr_protocols[proto];
67368+}
67369+
67370+const char *
67371+gr_socktype_to_name(unsigned char type)
67372+{
67373+ return gr_socktypes[type];
67374+}
67375+
67376+const char *
67377+gr_sockfamily_to_name(unsigned char family)
67378+{
67379+ return gr_sockfamilies[family];
67380+}
67381+
67382+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
67383+
67384+int
67385+gr_search_socket(const int domain, const int type, const int protocol)
67386+{
67387+ struct acl_subject_label *curr;
67388+ const struct cred *cred = current_cred();
67389+
67390+ if (unlikely(!gr_acl_is_enabled()))
67391+ goto exit;
67392+
67393+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
67394+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
67395+ goto exit; // let the kernel handle it
67396+
67397+ curr = current->acl;
67398+
67399+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
67400+ /* the family is allowed, if this is PF_INET allow it only if
67401+ the extra sock type/protocol checks pass */
67402+ if (domain == PF_INET)
67403+ goto inet_check;
67404+ goto exit;
67405+ } else {
67406+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67407+ __u32 fakeip = 0;
67408+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67409+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67410+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67411+ gr_to_filename(current->exec_file->f_path.dentry,
67412+ current->exec_file->f_path.mnt) :
67413+ curr->filename, curr->filename,
67414+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
67415+ &current->signal->saved_ip);
67416+ goto exit;
67417+ }
67418+ goto exit_fail;
67419+ }
67420+
67421+inet_check:
67422+ /* the rest of this checking is for IPv4 only */
67423+ if (!curr->ips)
67424+ goto exit;
67425+
67426+ if ((curr->ip_type & (1U << type)) &&
67427+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
67428+ goto exit;
67429+
67430+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67431+ /* we don't place acls on raw sockets , and sometimes
67432+ dgram/ip sockets are opened for ioctl and not
67433+ bind/connect, so we'll fake a bind learn log */
67434+ if (type == SOCK_RAW || type == SOCK_PACKET) {
67435+ __u32 fakeip = 0;
67436+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67437+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67438+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67439+ gr_to_filename(current->exec_file->f_path.dentry,
67440+ current->exec_file->f_path.mnt) :
67441+ curr->filename, curr->filename,
67442+ &fakeip, 0, type,
67443+ protocol, GR_CONNECT, &current->signal->saved_ip);
67444+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
67445+ __u32 fakeip = 0;
67446+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67447+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67448+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67449+ gr_to_filename(current->exec_file->f_path.dentry,
67450+ current->exec_file->f_path.mnt) :
67451+ curr->filename, curr->filename,
67452+ &fakeip, 0, type,
67453+ protocol, GR_BIND, &current->signal->saved_ip);
67454+ }
67455+ /* we'll log when they use connect or bind */
67456+ goto exit;
67457+ }
67458+
67459+exit_fail:
67460+ if (domain == PF_INET)
67461+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
67462+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
67463+ else if (rcu_access_pointer(net_families[domain]) != NULL)
67464+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
67465+ gr_socktype_to_name(type), protocol);
67466+
67467+ return 0;
67468+exit:
67469+ return 1;
67470+}
67471+
67472+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
67473+{
67474+ if ((ip->mode & mode) &&
67475+ (ip_port >= ip->low) &&
67476+ (ip_port <= ip->high) &&
67477+ ((ntohl(ip_addr) & our_netmask) ==
67478+ (ntohl(our_addr) & our_netmask))
67479+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
67480+ && (ip->type & (1U << type))) {
67481+ if (ip->mode & GR_INVERT)
67482+ return 2; // specifically denied
67483+ else
67484+ return 1; // allowed
67485+ }
67486+
67487+ return 0; // not specifically allowed, may continue parsing
67488+}
67489+
67490+static int
67491+gr_search_connectbind(const int full_mode, struct sock *sk,
67492+ struct sockaddr_in *addr, const int type)
67493+{
67494+ char iface[IFNAMSIZ] = {0};
67495+ struct acl_subject_label *curr;
67496+ struct acl_ip_label *ip;
67497+ struct inet_sock *isk;
67498+ struct net_device *dev;
67499+ struct in_device *idev;
67500+ unsigned long i;
67501+ int ret;
67502+ int mode = full_mode & (GR_BIND | GR_CONNECT);
67503+ __u32 ip_addr = 0;
67504+ __u32 our_addr;
67505+ __u32 our_netmask;
67506+ char *p;
67507+ __u16 ip_port = 0;
67508+ const struct cred *cred = current_cred();
67509+
67510+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
67511+ return 0;
67512+
67513+ curr = current->acl;
67514+ isk = inet_sk(sk);
67515+
67516+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
67517+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
67518+ addr->sin_addr.s_addr = curr->inaddr_any_override;
67519+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
67520+ struct sockaddr_in saddr;
67521+ int err;
67522+
67523+ saddr.sin_family = AF_INET;
67524+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
67525+ saddr.sin_port = isk->inet_sport;
67526+
67527+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67528+ if (err)
67529+ return err;
67530+
67531+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67532+ if (err)
67533+ return err;
67534+ }
67535+
67536+ if (!curr->ips)
67537+ return 0;
67538+
67539+ ip_addr = addr->sin_addr.s_addr;
67540+ ip_port = ntohs(addr->sin_port);
67541+
67542+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67543+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67544+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67545+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67546+ gr_to_filename(current->exec_file->f_path.dentry,
67547+ current->exec_file->f_path.mnt) :
67548+ curr->filename, curr->filename,
67549+ &ip_addr, ip_port, type,
67550+ sk->sk_protocol, mode, &current->signal->saved_ip);
67551+ return 0;
67552+ }
67553+
67554+ for (i = 0; i < curr->ip_num; i++) {
67555+ ip = *(curr->ips + i);
67556+ if (ip->iface != NULL) {
67557+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
67558+ p = strchr(iface, ':');
67559+ if (p != NULL)
67560+ *p = '\0';
67561+ dev = dev_get_by_name(sock_net(sk), iface);
67562+ if (dev == NULL)
67563+ continue;
67564+ idev = in_dev_get(dev);
67565+ if (idev == NULL) {
67566+ dev_put(dev);
67567+ continue;
67568+ }
67569+ rcu_read_lock();
67570+ for_ifa(idev) {
67571+ if (!strcmp(ip->iface, ifa->ifa_label)) {
67572+ our_addr = ifa->ifa_address;
67573+ our_netmask = 0xffffffff;
67574+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67575+ if (ret == 1) {
67576+ rcu_read_unlock();
67577+ in_dev_put(idev);
67578+ dev_put(dev);
67579+ return 0;
67580+ } else if (ret == 2) {
67581+ rcu_read_unlock();
67582+ in_dev_put(idev);
67583+ dev_put(dev);
67584+ goto denied;
67585+ }
67586+ }
67587+ } endfor_ifa(idev);
67588+ rcu_read_unlock();
67589+ in_dev_put(idev);
67590+ dev_put(dev);
67591+ } else {
67592+ our_addr = ip->addr;
67593+ our_netmask = ip->netmask;
67594+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67595+ if (ret == 1)
67596+ return 0;
67597+ else if (ret == 2)
67598+ goto denied;
67599+ }
67600+ }
67601+
67602+denied:
67603+ if (mode == GR_BIND)
67604+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67605+ else if (mode == GR_CONNECT)
67606+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67607+
67608+ return -EACCES;
67609+}
67610+
67611+int
67612+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
67613+{
67614+ /* always allow disconnection of dgram sockets with connect */
67615+ if (addr->sin_family == AF_UNSPEC)
67616+ return 0;
67617+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
67618+}
67619+
67620+int
67621+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
67622+{
67623+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
67624+}
67625+
67626+int gr_search_listen(struct socket *sock)
67627+{
67628+ struct sock *sk = sock->sk;
67629+ struct sockaddr_in addr;
67630+
67631+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67632+ addr.sin_port = inet_sk(sk)->inet_sport;
67633+
67634+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67635+}
67636+
67637+int gr_search_accept(struct socket *sock)
67638+{
67639+ struct sock *sk = sock->sk;
67640+ struct sockaddr_in addr;
67641+
67642+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67643+ addr.sin_port = inet_sk(sk)->inet_sport;
67644+
67645+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67646+}
67647+
67648+int
67649+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
67650+{
67651+ if (addr)
67652+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
67653+ else {
67654+ struct sockaddr_in sin;
67655+ const struct inet_sock *inet = inet_sk(sk);
67656+
67657+ sin.sin_addr.s_addr = inet->inet_daddr;
67658+ sin.sin_port = inet->inet_dport;
67659+
67660+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67661+ }
67662+}
67663+
67664+int
67665+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
67666+{
67667+ struct sockaddr_in sin;
67668+
67669+ if (unlikely(skb->len < sizeof (struct udphdr)))
67670+ return 0; // skip this packet
67671+
67672+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
67673+ sin.sin_port = udp_hdr(skb)->source;
67674+
67675+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67676+}
67677diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
67678new file mode 100644
67679index 0000000..25f54ef
67680--- /dev/null
67681+++ b/grsecurity/gracl_learn.c
67682@@ -0,0 +1,207 @@
67683+#include <linux/kernel.h>
67684+#include <linux/mm.h>
67685+#include <linux/sched.h>
67686+#include <linux/poll.h>
67687+#include <linux/string.h>
67688+#include <linux/file.h>
67689+#include <linux/types.h>
67690+#include <linux/vmalloc.h>
67691+#include <linux/grinternal.h>
67692+
67693+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
67694+ size_t count, loff_t *ppos);
67695+extern int gr_acl_is_enabled(void);
67696+
67697+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
67698+static int gr_learn_attached;
67699+
67700+/* use a 512k buffer */
67701+#define LEARN_BUFFER_SIZE (512 * 1024)
67702+
67703+static DEFINE_SPINLOCK(gr_learn_lock);
67704+static DEFINE_MUTEX(gr_learn_user_mutex);
67705+
67706+/* we need to maintain two buffers, so that the kernel context of grlearn
67707+ uses a semaphore around the userspace copying, and the other kernel contexts
67708+ use a spinlock when copying into the buffer, since they cannot sleep
67709+*/
67710+static char *learn_buffer;
67711+static char *learn_buffer_user;
67712+static int learn_buffer_len;
67713+static int learn_buffer_user_len;
67714+
67715+static ssize_t
67716+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
67717+{
67718+ DECLARE_WAITQUEUE(wait, current);
67719+ ssize_t retval = 0;
67720+
67721+ add_wait_queue(&learn_wait, &wait);
67722+ set_current_state(TASK_INTERRUPTIBLE);
67723+ do {
67724+ mutex_lock(&gr_learn_user_mutex);
67725+ spin_lock(&gr_learn_lock);
67726+ if (learn_buffer_len)
67727+ break;
67728+ spin_unlock(&gr_learn_lock);
67729+ mutex_unlock(&gr_learn_user_mutex);
67730+ if (file->f_flags & O_NONBLOCK) {
67731+ retval = -EAGAIN;
67732+ goto out;
67733+ }
67734+ if (signal_pending(current)) {
67735+ retval = -ERESTARTSYS;
67736+ goto out;
67737+ }
67738+
67739+ schedule();
67740+ } while (1);
67741+
67742+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
67743+ learn_buffer_user_len = learn_buffer_len;
67744+ retval = learn_buffer_len;
67745+ learn_buffer_len = 0;
67746+
67747+ spin_unlock(&gr_learn_lock);
67748+
67749+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
67750+ retval = -EFAULT;
67751+
67752+ mutex_unlock(&gr_learn_user_mutex);
67753+out:
67754+ set_current_state(TASK_RUNNING);
67755+ remove_wait_queue(&learn_wait, &wait);
67756+ return retval;
67757+}
67758+
67759+static unsigned int
67760+poll_learn(struct file * file, poll_table * wait)
67761+{
67762+ poll_wait(file, &learn_wait, wait);
67763+
67764+ if (learn_buffer_len)
67765+ return (POLLIN | POLLRDNORM);
67766+
67767+ return 0;
67768+}
67769+
67770+void
67771+gr_clear_learn_entries(void)
67772+{
67773+ char *tmp;
67774+
67775+ mutex_lock(&gr_learn_user_mutex);
67776+ spin_lock(&gr_learn_lock);
67777+ tmp = learn_buffer;
67778+ learn_buffer = NULL;
67779+ spin_unlock(&gr_learn_lock);
67780+ if (tmp)
67781+ vfree(tmp);
67782+ if (learn_buffer_user != NULL) {
67783+ vfree(learn_buffer_user);
67784+ learn_buffer_user = NULL;
67785+ }
67786+ learn_buffer_len = 0;
67787+ mutex_unlock(&gr_learn_user_mutex);
67788+
67789+ return;
67790+}
67791+
67792+void
67793+gr_add_learn_entry(const char *fmt, ...)
67794+{
67795+ va_list args;
67796+ unsigned int len;
67797+
67798+ if (!gr_learn_attached)
67799+ return;
67800+
67801+ spin_lock(&gr_learn_lock);
67802+
67803+ /* leave a gap at the end so we know when it's "full" but don't have to
67804+ compute the exact length of the string we're trying to append
67805+ */
67806+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
67807+ spin_unlock(&gr_learn_lock);
67808+ wake_up_interruptible(&learn_wait);
67809+ return;
67810+ }
67811+ if (learn_buffer == NULL) {
67812+ spin_unlock(&gr_learn_lock);
67813+ return;
67814+ }
67815+
67816+ va_start(args, fmt);
67817+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
67818+ va_end(args);
67819+
67820+ learn_buffer_len += len + 1;
67821+
67822+ spin_unlock(&gr_learn_lock);
67823+ wake_up_interruptible(&learn_wait);
67824+
67825+ return;
67826+}
67827+
67828+static int
67829+open_learn(struct inode *inode, struct file *file)
67830+{
67831+ if (file->f_mode & FMODE_READ && gr_learn_attached)
67832+ return -EBUSY;
67833+ if (file->f_mode & FMODE_READ) {
67834+ int retval = 0;
67835+ mutex_lock(&gr_learn_user_mutex);
67836+ if (learn_buffer == NULL)
67837+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
67838+ if (learn_buffer_user == NULL)
67839+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
67840+ if (learn_buffer == NULL) {
67841+ retval = -ENOMEM;
67842+ goto out_error;
67843+ }
67844+ if (learn_buffer_user == NULL) {
67845+ retval = -ENOMEM;
67846+ goto out_error;
67847+ }
67848+ learn_buffer_len = 0;
67849+ learn_buffer_user_len = 0;
67850+ gr_learn_attached = 1;
67851+out_error:
67852+ mutex_unlock(&gr_learn_user_mutex);
67853+ return retval;
67854+ }
67855+ return 0;
67856+}
67857+
67858+static int
67859+close_learn(struct inode *inode, struct file *file)
67860+{
67861+ if (file->f_mode & FMODE_READ) {
67862+ char *tmp = NULL;
67863+ mutex_lock(&gr_learn_user_mutex);
67864+ spin_lock(&gr_learn_lock);
67865+ tmp = learn_buffer;
67866+ learn_buffer = NULL;
67867+ spin_unlock(&gr_learn_lock);
67868+ if (tmp)
67869+ vfree(tmp);
67870+ if (learn_buffer_user != NULL) {
67871+ vfree(learn_buffer_user);
67872+ learn_buffer_user = NULL;
67873+ }
67874+ learn_buffer_len = 0;
67875+ learn_buffer_user_len = 0;
67876+ gr_learn_attached = 0;
67877+ mutex_unlock(&gr_learn_user_mutex);
67878+ }
67879+
67880+ return 0;
67881+}
67882+
67883+const struct file_operations grsec_fops = {
67884+ .read = read_learn,
67885+ .write = write_grsec_handler,
67886+ .open = open_learn,
67887+ .release = close_learn,
67888+ .poll = poll_learn,
67889+};
67890diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
67891new file mode 100644
67892index 0000000..361a099
67893--- /dev/null
67894+++ b/grsecurity/gracl_policy.c
67895@@ -0,0 +1,1782 @@
67896+#include <linux/kernel.h>
67897+#include <linux/module.h>
67898+#include <linux/sched.h>
67899+#include <linux/mm.h>
67900+#include <linux/file.h>
67901+#include <linux/fs.h>
67902+#include <linux/namei.h>
67903+#include <linux/mount.h>
67904+#include <linux/tty.h>
67905+#include <linux/proc_fs.h>
67906+#include <linux/lglock.h>
67907+#include <linux/slab.h>
67908+#include <linux/vmalloc.h>
67909+#include <linux/types.h>
67910+#include <linux/sysctl.h>
67911+#include <linux/netdevice.h>
67912+#include <linux/ptrace.h>
67913+#include <linux/gracl.h>
67914+#include <linux/gralloc.h>
67915+#include <linux/security.h>
67916+#include <linux/grinternal.h>
67917+#include <linux/pid_namespace.h>
67918+#include <linux/stop_machine.h>
67919+#include <linux/fdtable.h>
67920+#include <linux/percpu.h>
67921+#include <linux/lglock.h>
67922+#include <linux/hugetlb.h>
67923+#include <linux/posix-timers.h>
67924+#include "../fs/mount.h"
67925+
67926+#include <asm/uaccess.h>
67927+#include <asm/errno.h>
67928+#include <asm/mman.h>
67929+
67930+extern struct gr_policy_state *polstate;
67931+
67932+#define FOR_EACH_ROLE_START(role) \
67933+ role = polstate->role_list; \
67934+ while (role) {
67935+
67936+#define FOR_EACH_ROLE_END(role) \
67937+ role = role->prev; \
67938+ }
67939+
67940+struct path gr_real_root;
67941+
67942+extern struct gr_alloc_state *current_alloc_state;
67943+
67944+u16 acl_sp_role_value;
67945+
67946+static DEFINE_MUTEX(gr_dev_mutex);
67947+
67948+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
67949+extern void gr_clear_learn_entries(void);
67950+
67951+static struct gr_arg gr_usermode;
67952+static unsigned char gr_system_salt[GR_SALT_LEN];
67953+static unsigned char gr_system_sum[GR_SHA_LEN];
67954+
67955+static unsigned int gr_auth_attempts = 0;
67956+static unsigned long gr_auth_expires = 0UL;
67957+
67958+struct acl_object_label *fakefs_obj_rw;
67959+struct acl_object_label *fakefs_obj_rwx;
67960+
67961+extern int gr_init_uidset(void);
67962+extern void gr_free_uidset(void);
67963+extern void gr_remove_uid(uid_t uid);
67964+extern int gr_find_uid(uid_t uid);
67965+
67966+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
67967+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
67968+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
67969+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
67970+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
67971+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
67972+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
67973+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
67974+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
67975+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
67976+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
67977+extern void assign_special_role(const char *rolename);
67978+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
67979+extern int gr_rbac_disable(void *unused);
67980+extern void gr_enable_rbac_system(void);
67981+
67982+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
67983+{
67984+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
67985+ return -EFAULT;
67986+
67987+ return 0;
67988+}
67989+
67990+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
67991+{
67992+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
67993+ return -EFAULT;
67994+
67995+ return 0;
67996+}
67997+
67998+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
67999+{
68000+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
68001+ return -EFAULT;
68002+
68003+ return 0;
68004+}
68005+
68006+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
68007+{
68008+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
68009+ return -EFAULT;
68010+
68011+ return 0;
68012+}
68013+
68014+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
68015+{
68016+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
68017+ return -EFAULT;
68018+
68019+ return 0;
68020+}
68021+
68022+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
68023+{
68024+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
68025+ return -EFAULT;
68026+
68027+ return 0;
68028+}
68029+
68030+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
68031+{
68032+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
68033+ return -EFAULT;
68034+
68035+ return 0;
68036+}
68037+
68038+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
68039+{
68040+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
68041+ return -EFAULT;
68042+
68043+ return 0;
68044+}
68045+
68046+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
68047+{
68048+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
68049+ return -EFAULT;
68050+
68051+ return 0;
68052+}
68053+
68054+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
68055+{
68056+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
68057+ return -EFAULT;
68058+
68059+ if (((uwrap->version != GRSECURITY_VERSION) &&
68060+ (uwrap->version != 0x2901)) ||
68061+ (uwrap->size != sizeof(struct gr_arg)))
68062+ return -EINVAL;
68063+
68064+ return 0;
68065+}
68066+
68067+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
68068+{
68069+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
68070+ return -EFAULT;
68071+
68072+ return 0;
68073+}
68074+
68075+static size_t get_gr_arg_wrapper_size_normal(void)
68076+{
68077+ return sizeof(struct gr_arg_wrapper);
68078+}
68079+
68080+#ifdef CONFIG_COMPAT
68081+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
68082+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
68083+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
68084+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
68085+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
68086+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
68087+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
68088+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
68089+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
68090+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
68091+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
68092+extern size_t get_gr_arg_wrapper_size_compat(void);
68093+
68094+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
68095+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
68096+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
68097+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
68098+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
68099+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
68100+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
68101+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
68102+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
68103+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
68104+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
68105+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
68106+
68107+#else
68108+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
68109+#define copy_gr_arg copy_gr_arg_normal
68110+#define copy_gr_hash_struct copy_gr_hash_struct_normal
68111+#define copy_acl_object_label copy_acl_object_label_normal
68112+#define copy_acl_subject_label copy_acl_subject_label_normal
68113+#define copy_acl_role_label copy_acl_role_label_normal
68114+#define copy_acl_ip_label copy_acl_ip_label_normal
68115+#define copy_pointer_from_array copy_pointer_from_array_normal
68116+#define copy_sprole_pw copy_sprole_pw_normal
68117+#define copy_role_transition copy_role_transition_normal
68118+#define copy_role_allowed_ip copy_role_allowed_ip_normal
68119+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
68120+#endif
68121+
68122+static struct acl_subject_label *
68123+lookup_subject_map(const struct acl_subject_label *userp)
68124+{
68125+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
68126+ struct subject_map *match;
68127+
68128+ match = polstate->subj_map_set.s_hash[index];
68129+
68130+ while (match && match->user != userp)
68131+ match = match->next;
68132+
68133+ if (match != NULL)
68134+ return match->kernel;
68135+ else
68136+ return NULL;
68137+}
68138+
68139+static void
68140+insert_subj_map_entry(struct subject_map *subjmap)
68141+{
68142+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
68143+ struct subject_map **curr;
68144+
68145+ subjmap->prev = NULL;
68146+
68147+ curr = &polstate->subj_map_set.s_hash[index];
68148+ if (*curr != NULL)
68149+ (*curr)->prev = subjmap;
68150+
68151+ subjmap->next = *curr;
68152+ *curr = subjmap;
68153+
68154+ return;
68155+}
68156+
68157+static void
68158+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
68159+{
68160+ unsigned int index =
68161+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
68162+ struct acl_role_label **curr;
68163+ struct acl_role_label *tmp, *tmp2;
68164+
68165+ curr = &polstate->acl_role_set.r_hash[index];
68166+
68167+ /* simple case, slot is empty, just set it to our role */
68168+ if (*curr == NULL) {
68169+ *curr = role;
68170+ } else {
68171+ /* example:
68172+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
68173+ 2 -> 3
68174+ */
68175+ /* first check to see if we can already be reached via this slot */
68176+ tmp = *curr;
68177+ while (tmp && tmp != role)
68178+ tmp = tmp->next;
68179+ if (tmp == role) {
68180+ /* we don't need to add ourselves to this slot's chain */
68181+ return;
68182+ }
68183+ /* we need to add ourselves to this chain, two cases */
68184+ if (role->next == NULL) {
68185+ /* simple case, append the current chain to our role */
68186+ role->next = *curr;
68187+ *curr = role;
68188+ } else {
68189+ /* 1 -> 2 -> 3 -> 4
68190+ 2 -> 3 -> 4
68191+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
68192+ */
68193+ /* trickier case: walk our role's chain until we find
68194+ the role for the start of the current slot's chain */
68195+ tmp = role;
68196+ tmp2 = *curr;
68197+ while (tmp->next && tmp->next != tmp2)
68198+ tmp = tmp->next;
68199+ if (tmp->next == tmp2) {
68200+ /* from example above, we found 3, so just
68201+ replace this slot's chain with ours */
68202+ *curr = role;
68203+ } else {
68204+ /* we didn't find a subset of our role's chain
68205+ in the current slot's chain, so append their
68206+ chain to ours, and set us as the first role in
68207+ the slot's chain
68208+
68209+ we could fold this case with the case above,
68210+ but making it explicit for clarity
68211+ */
68212+ tmp->next = tmp2;
68213+ *curr = role;
68214+ }
68215+ }
68216+ }
68217+
68218+ return;
68219+}
68220+
68221+static void
68222+insert_acl_role_label(struct acl_role_label *role)
68223+{
68224+ int i;
68225+
68226+ if (polstate->role_list == NULL) {
68227+ polstate->role_list = role;
68228+ role->prev = NULL;
68229+ } else {
68230+ role->prev = polstate->role_list;
68231+ polstate->role_list = role;
68232+ }
68233+
68234+ /* used for hash chains */
68235+ role->next = NULL;
68236+
68237+ if (role->roletype & GR_ROLE_DOMAIN) {
68238+ for (i = 0; i < role->domain_child_num; i++)
68239+ __insert_acl_role_label(role, role->domain_children[i]);
68240+ } else
68241+ __insert_acl_role_label(role, role->uidgid);
68242+}
68243+
68244+static int
68245+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
68246+{
68247+ struct name_entry **curr, *nentry;
68248+ struct inodev_entry *ientry;
68249+ unsigned int len = strlen(name);
68250+ unsigned int key = full_name_hash(name, len);
68251+ unsigned int index = key % polstate->name_set.n_size;
68252+
68253+ curr = &polstate->name_set.n_hash[index];
68254+
68255+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
68256+ curr = &((*curr)->next);
68257+
68258+ if (*curr != NULL)
68259+ return 1;
68260+
68261+ nentry = acl_alloc(sizeof (struct name_entry));
68262+ if (nentry == NULL)
68263+ return 0;
68264+ ientry = acl_alloc(sizeof (struct inodev_entry));
68265+ if (ientry == NULL)
68266+ return 0;
68267+ ientry->nentry = nentry;
68268+
68269+ nentry->key = key;
68270+ nentry->name = name;
68271+ nentry->inode = inode;
68272+ nentry->device = device;
68273+ nentry->len = len;
68274+ nentry->deleted = deleted;
68275+
68276+ nentry->prev = NULL;
68277+ curr = &polstate->name_set.n_hash[index];
68278+ if (*curr != NULL)
68279+ (*curr)->prev = nentry;
68280+ nentry->next = *curr;
68281+ *curr = nentry;
68282+
68283+ /* insert us into the table searchable by inode/dev */
68284+ __insert_inodev_entry(polstate, ientry);
68285+
68286+ return 1;
68287+}
68288+
68289+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
68290+
68291+static void *
68292+create_table(__u32 * len, int elementsize)
68293+{
68294+ unsigned int table_sizes[] = {
68295+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
68296+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
68297+ 4194301, 8388593, 16777213, 33554393, 67108859
68298+ };
68299+ void *newtable = NULL;
68300+ unsigned int pwr = 0;
68301+
68302+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
68303+ table_sizes[pwr] <= *len)
68304+ pwr++;
68305+
68306+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
68307+ return newtable;
68308+
68309+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
68310+ newtable =
68311+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
68312+ else
68313+ newtable = vmalloc(table_sizes[pwr] * elementsize);
68314+
68315+ *len = table_sizes[pwr];
68316+
68317+ return newtable;
68318+}
68319+
68320+static int
68321+init_variables(const struct gr_arg *arg, bool reload)
68322+{
68323+ struct task_struct *reaper = init_pid_ns.child_reaper;
68324+ unsigned int stacksize;
68325+
68326+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
68327+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
68328+ polstate->name_set.n_size = arg->role_db.num_objects;
68329+ polstate->inodev_set.i_size = arg->role_db.num_objects;
68330+
68331+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
68332+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
68333+ return 1;
68334+
68335+ if (!reload) {
68336+ if (!gr_init_uidset())
68337+ return 1;
68338+ }
68339+
68340+ /* set up the stack that holds allocation info */
68341+
68342+ stacksize = arg->role_db.num_pointers + 5;
68343+
68344+ if (!acl_alloc_stack_init(stacksize))
68345+ return 1;
68346+
68347+ if (!reload) {
68348+ /* grab reference for the real root dentry and vfsmount */
68349+ get_fs_root(reaper->fs, &gr_real_root);
68350+
68351+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68352+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
68353+#endif
68354+
68355+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
68356+ if (fakefs_obj_rw == NULL)
68357+ return 1;
68358+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
68359+
68360+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
68361+ if (fakefs_obj_rwx == NULL)
68362+ return 1;
68363+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
68364+ }
68365+
68366+ polstate->subj_map_set.s_hash =
68367+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
68368+ polstate->acl_role_set.r_hash =
68369+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
68370+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
68371+ polstate->inodev_set.i_hash =
68372+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
68373+
68374+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
68375+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
68376+ return 1;
68377+
68378+ memset(polstate->subj_map_set.s_hash, 0,
68379+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
68380+ memset(polstate->acl_role_set.r_hash, 0,
68381+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
68382+ memset(polstate->name_set.n_hash, 0,
68383+ sizeof (struct name_entry *) * polstate->name_set.n_size);
68384+ memset(polstate->inodev_set.i_hash, 0,
68385+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
68386+
68387+ return 0;
68388+}
68389+
68390+/* free information not needed after startup
68391+ currently contains user->kernel pointer mappings for subjects
68392+*/
68393+
68394+static void
68395+free_init_variables(void)
68396+{
68397+ __u32 i;
68398+
68399+ if (polstate->subj_map_set.s_hash) {
68400+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
68401+ if (polstate->subj_map_set.s_hash[i]) {
68402+ kfree(polstate->subj_map_set.s_hash[i]);
68403+ polstate->subj_map_set.s_hash[i] = NULL;
68404+ }
68405+ }
68406+
68407+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
68408+ PAGE_SIZE)
68409+ kfree(polstate->subj_map_set.s_hash);
68410+ else
68411+ vfree(polstate->subj_map_set.s_hash);
68412+ }
68413+
68414+ return;
68415+}
68416+
68417+static void
68418+free_variables(bool reload)
68419+{
68420+ struct acl_subject_label *s;
68421+ struct acl_role_label *r;
68422+ struct task_struct *task, *task2;
68423+ unsigned int x;
68424+
68425+ if (!reload) {
68426+ gr_clear_learn_entries();
68427+
68428+ read_lock(&tasklist_lock);
68429+ do_each_thread(task2, task) {
68430+ task->acl_sp_role = 0;
68431+ task->acl_role_id = 0;
68432+ task->inherited = 0;
68433+ task->acl = NULL;
68434+ task->role = NULL;
68435+ } while_each_thread(task2, task);
68436+ read_unlock(&tasklist_lock);
68437+
68438+ kfree(fakefs_obj_rw);
68439+ fakefs_obj_rw = NULL;
68440+ kfree(fakefs_obj_rwx);
68441+ fakefs_obj_rwx = NULL;
68442+
68443+ /* release the reference to the real root dentry and vfsmount */
68444+ path_put(&gr_real_root);
68445+ memset(&gr_real_root, 0, sizeof(gr_real_root));
68446+ }
68447+
68448+ /* free all object hash tables */
68449+
68450+ FOR_EACH_ROLE_START(r)
68451+ if (r->subj_hash == NULL)
68452+ goto next_role;
68453+ FOR_EACH_SUBJECT_START(r, s, x)
68454+ if (s->obj_hash == NULL)
68455+ break;
68456+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
68457+ kfree(s->obj_hash);
68458+ else
68459+ vfree(s->obj_hash);
68460+ FOR_EACH_SUBJECT_END(s, x)
68461+ FOR_EACH_NESTED_SUBJECT_START(r, s)
68462+ if (s->obj_hash == NULL)
68463+ break;
68464+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
68465+ kfree(s->obj_hash);
68466+ else
68467+ vfree(s->obj_hash);
68468+ FOR_EACH_NESTED_SUBJECT_END(s)
68469+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
68470+ kfree(r->subj_hash);
68471+ else
68472+ vfree(r->subj_hash);
68473+ r->subj_hash = NULL;
68474+next_role:
68475+ FOR_EACH_ROLE_END(r)
68476+
68477+ acl_free_all();
68478+
68479+ if (polstate->acl_role_set.r_hash) {
68480+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
68481+ PAGE_SIZE)
68482+ kfree(polstate->acl_role_set.r_hash);
68483+ else
68484+ vfree(polstate->acl_role_set.r_hash);
68485+ }
68486+ if (polstate->name_set.n_hash) {
68487+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
68488+ PAGE_SIZE)
68489+ kfree(polstate->name_set.n_hash);
68490+ else
68491+ vfree(polstate->name_set.n_hash);
68492+ }
68493+
68494+ if (polstate->inodev_set.i_hash) {
68495+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
68496+ PAGE_SIZE)
68497+ kfree(polstate->inodev_set.i_hash);
68498+ else
68499+ vfree(polstate->inodev_set.i_hash);
68500+ }
68501+
68502+ if (!reload)
68503+ gr_free_uidset();
68504+
68505+ memset(&polstate->name_set, 0, sizeof (struct name_db));
68506+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
68507+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
68508+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
68509+
68510+ polstate->default_role = NULL;
68511+ polstate->kernel_role = NULL;
68512+ polstate->role_list = NULL;
68513+
68514+ return;
68515+}
68516+
68517+static struct acl_subject_label *
68518+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
68519+
68520+static int alloc_and_copy_string(char **name, unsigned int maxlen)
68521+{
68522+ unsigned int len = strnlen_user(*name, maxlen);
68523+ char *tmp;
68524+
68525+ if (!len || len >= maxlen)
68526+ return -EINVAL;
68527+
68528+ if ((tmp = (char *) acl_alloc(len)) == NULL)
68529+ return -ENOMEM;
68530+
68531+ if (copy_from_user(tmp, *name, len))
68532+ return -EFAULT;
68533+
68534+ tmp[len-1] = '\0';
68535+ *name = tmp;
68536+
68537+ return 0;
68538+}
68539+
68540+static int
68541+copy_user_glob(struct acl_object_label *obj)
68542+{
68543+ struct acl_object_label *g_tmp, **guser;
68544+ int error;
68545+
68546+ if (obj->globbed == NULL)
68547+ return 0;
68548+
68549+ guser = &obj->globbed;
68550+ while (*guser) {
68551+ g_tmp = (struct acl_object_label *)
68552+ acl_alloc(sizeof (struct acl_object_label));
68553+ if (g_tmp == NULL)
68554+ return -ENOMEM;
68555+
68556+ if (copy_acl_object_label(g_tmp, *guser))
68557+ return -EFAULT;
68558+
68559+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
68560+ if (error)
68561+ return error;
68562+
68563+ *guser = g_tmp;
68564+ guser = &(g_tmp->next);
68565+ }
68566+
68567+ return 0;
68568+}
68569+
68570+static int
68571+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
68572+ struct acl_role_label *role)
68573+{
68574+ struct acl_object_label *o_tmp;
68575+ int ret;
68576+
68577+ while (userp) {
68578+ if ((o_tmp = (struct acl_object_label *)
68579+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
68580+ return -ENOMEM;
68581+
68582+ if (copy_acl_object_label(o_tmp, userp))
68583+ return -EFAULT;
68584+
68585+ userp = o_tmp->prev;
68586+
68587+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
68588+ if (ret)
68589+ return ret;
68590+
68591+ insert_acl_obj_label(o_tmp, subj);
68592+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
68593+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
68594+ return -ENOMEM;
68595+
68596+ ret = copy_user_glob(o_tmp);
68597+ if (ret)
68598+ return ret;
68599+
68600+ if (o_tmp->nested) {
68601+ int already_copied;
68602+
68603+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
68604+ if (IS_ERR(o_tmp->nested))
68605+ return PTR_ERR(o_tmp->nested);
68606+
68607+ /* insert into nested subject list if we haven't copied this one yet
68608+ to prevent duplicate entries */
68609+ if (!already_copied) {
68610+ o_tmp->nested->next = role->hash->first;
68611+ role->hash->first = o_tmp->nested;
68612+ }
68613+ }
68614+ }
68615+
68616+ return 0;
68617+}
68618+
68619+static __u32
68620+count_user_subjs(struct acl_subject_label *userp)
68621+{
68622+ struct acl_subject_label s_tmp;
68623+ __u32 num = 0;
68624+
68625+ while (userp) {
68626+ if (copy_acl_subject_label(&s_tmp, userp))
68627+ break;
68628+
68629+ userp = s_tmp.prev;
68630+ }
68631+
68632+ return num;
68633+}
68634+
68635+static int
68636+copy_user_allowedips(struct acl_role_label *rolep)
68637+{
68638+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
68639+
68640+ ruserip = rolep->allowed_ips;
68641+
68642+ while (ruserip) {
68643+ rlast = rtmp;
68644+
68645+ if ((rtmp = (struct role_allowed_ip *)
68646+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
68647+ return -ENOMEM;
68648+
68649+ if (copy_role_allowed_ip(rtmp, ruserip))
68650+ return -EFAULT;
68651+
68652+ ruserip = rtmp->prev;
68653+
68654+ if (!rlast) {
68655+ rtmp->prev = NULL;
68656+ rolep->allowed_ips = rtmp;
68657+ } else {
68658+ rlast->next = rtmp;
68659+ rtmp->prev = rlast;
68660+ }
68661+
68662+ if (!ruserip)
68663+ rtmp->next = NULL;
68664+ }
68665+
68666+ return 0;
68667+}
68668+
68669+static int
68670+copy_user_transitions(struct acl_role_label *rolep)
68671+{
68672+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
68673+ int error;
68674+
68675+ rusertp = rolep->transitions;
68676+
68677+ while (rusertp) {
68678+ rlast = rtmp;
68679+
68680+ if ((rtmp = (struct role_transition *)
68681+ acl_alloc(sizeof (struct role_transition))) == NULL)
68682+ return -ENOMEM;
68683+
68684+ if (copy_role_transition(rtmp, rusertp))
68685+ return -EFAULT;
68686+
68687+ rusertp = rtmp->prev;
68688+
68689+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
68690+ if (error)
68691+ return error;
68692+
68693+ if (!rlast) {
68694+ rtmp->prev = NULL;
68695+ rolep->transitions = rtmp;
68696+ } else {
68697+ rlast->next = rtmp;
68698+ rtmp->prev = rlast;
68699+ }
68700+
68701+ if (!rusertp)
68702+ rtmp->next = NULL;
68703+ }
68704+
68705+ return 0;
68706+}
68707+
68708+static __u32 count_user_objs(const struct acl_object_label __user *userp)
68709+{
68710+ struct acl_object_label o_tmp;
68711+ __u32 num = 0;
68712+
68713+ while (userp) {
68714+ if (copy_acl_object_label(&o_tmp, userp))
68715+ break;
68716+
68717+ userp = o_tmp.prev;
68718+ num++;
68719+ }
68720+
68721+ return num;
68722+}
68723+
68724+static struct acl_subject_label *
68725+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
68726+{
68727+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
68728+ __u32 num_objs;
68729+ struct acl_ip_label **i_tmp, *i_utmp2;
68730+ struct gr_hash_struct ghash;
68731+ struct subject_map *subjmap;
68732+ unsigned int i_num;
68733+ int err;
68734+
68735+ if (already_copied != NULL)
68736+ *already_copied = 0;
68737+
68738+ s_tmp = lookup_subject_map(userp);
68739+
68740+ /* we've already copied this subject into the kernel, just return
68741+ the reference to it, and don't copy it over again
68742+ */
68743+ if (s_tmp) {
68744+ if (already_copied != NULL)
68745+ *already_copied = 1;
68746+ return(s_tmp);
68747+ }
68748+
68749+ if ((s_tmp = (struct acl_subject_label *)
68750+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
68751+ return ERR_PTR(-ENOMEM);
68752+
68753+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
68754+ if (subjmap == NULL)
68755+ return ERR_PTR(-ENOMEM);
68756+
68757+ subjmap->user = userp;
68758+ subjmap->kernel = s_tmp;
68759+ insert_subj_map_entry(subjmap);
68760+
68761+ if (copy_acl_subject_label(s_tmp, userp))
68762+ return ERR_PTR(-EFAULT);
68763+
68764+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
68765+ if (err)
68766+ return ERR_PTR(err);
68767+
68768+ if (!strcmp(s_tmp->filename, "/"))
68769+ role->root_label = s_tmp;
68770+
68771+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
68772+ return ERR_PTR(-EFAULT);
68773+
68774+ /* copy user and group transition tables */
68775+
68776+ if (s_tmp->user_trans_num) {
68777+ uid_t *uidlist;
68778+
68779+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
68780+ if (uidlist == NULL)
68781+ return ERR_PTR(-ENOMEM);
68782+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
68783+ return ERR_PTR(-EFAULT);
68784+
68785+ s_tmp->user_transitions = uidlist;
68786+ }
68787+
68788+ if (s_tmp->group_trans_num) {
68789+ gid_t *gidlist;
68790+
68791+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
68792+ if (gidlist == NULL)
68793+ return ERR_PTR(-ENOMEM);
68794+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
68795+ return ERR_PTR(-EFAULT);
68796+
68797+ s_tmp->group_transitions = gidlist;
68798+ }
68799+
68800+ /* set up object hash table */
68801+ num_objs = count_user_objs(ghash.first);
68802+
68803+ s_tmp->obj_hash_size = num_objs;
68804+ s_tmp->obj_hash =
68805+ (struct acl_object_label **)
68806+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
68807+
68808+ if (!s_tmp->obj_hash)
68809+ return ERR_PTR(-ENOMEM);
68810+
68811+ memset(s_tmp->obj_hash, 0,
68812+ s_tmp->obj_hash_size *
68813+ sizeof (struct acl_object_label *));
68814+
68815+ /* add in objects */
68816+ err = copy_user_objs(ghash.first, s_tmp, role);
68817+
68818+ if (err)
68819+ return ERR_PTR(err);
68820+
68821+ /* set pointer for parent subject */
68822+ if (s_tmp->parent_subject) {
68823+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
68824+
68825+ if (IS_ERR(s_tmp2))
68826+ return s_tmp2;
68827+
68828+ s_tmp->parent_subject = s_tmp2;
68829+ }
68830+
68831+ /* add in ip acls */
68832+
68833+ if (!s_tmp->ip_num) {
68834+ s_tmp->ips = NULL;
68835+ goto insert;
68836+ }
68837+
68838+ i_tmp =
68839+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
68840+ sizeof (struct acl_ip_label *));
68841+
68842+ if (!i_tmp)
68843+ return ERR_PTR(-ENOMEM);
68844+
68845+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
68846+ *(i_tmp + i_num) =
68847+ (struct acl_ip_label *)
68848+ acl_alloc(sizeof (struct acl_ip_label));
68849+ if (!*(i_tmp + i_num))
68850+ return ERR_PTR(-ENOMEM);
68851+
68852+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
68853+ return ERR_PTR(-EFAULT);
68854+
68855+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
68856+ return ERR_PTR(-EFAULT);
68857+
68858+ if ((*(i_tmp + i_num))->iface == NULL)
68859+ continue;
68860+
68861+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
68862+ if (err)
68863+ return ERR_PTR(err);
68864+ }
68865+
68866+ s_tmp->ips = i_tmp;
68867+
68868+insert:
68869+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
68870+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
68871+ return ERR_PTR(-ENOMEM);
68872+
68873+ return s_tmp;
68874+}
68875+
68876+static int
68877+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
68878+{
68879+ struct acl_subject_label s_pre;
68880+ struct acl_subject_label * ret;
68881+ int err;
68882+
68883+ while (userp) {
68884+ if (copy_acl_subject_label(&s_pre, userp))
68885+ return -EFAULT;
68886+
68887+ ret = do_copy_user_subj(userp, role, NULL);
68888+
68889+ err = PTR_ERR(ret);
68890+ if (IS_ERR(ret))
68891+ return err;
68892+
68893+ insert_acl_subj_label(ret, role);
68894+
68895+ userp = s_pre.prev;
68896+ }
68897+
68898+ return 0;
68899+}
68900+
68901+static int
68902+copy_user_acl(struct gr_arg *arg)
68903+{
68904+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
68905+ struct acl_subject_label *subj_list;
68906+ struct sprole_pw *sptmp;
68907+ struct gr_hash_struct *ghash;
68908+ uid_t *domainlist;
68909+ unsigned int r_num;
68910+ int err = 0;
68911+ __u16 i;
68912+ __u32 num_subjs;
68913+
68914+ /* we need a default and kernel role */
68915+ if (arg->role_db.num_roles < 2)
68916+ return -EINVAL;
68917+
68918+ /* copy special role authentication info from userspace */
68919+
68920+ polstate->num_sprole_pws = arg->num_sprole_pws;
68921+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
68922+
68923+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
68924+ return -ENOMEM;
68925+
68926+ for (i = 0; i < polstate->num_sprole_pws; i++) {
68927+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
68928+ if (!sptmp)
68929+ return -ENOMEM;
68930+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
68931+ return -EFAULT;
68932+
68933+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
68934+ if (err)
68935+ return err;
68936+
68937+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68938+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
68939+#endif
68940+
68941+ polstate->acl_special_roles[i] = sptmp;
68942+ }
68943+
68944+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
68945+
68946+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
68947+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
68948+
68949+ if (!r_tmp)
68950+ return -ENOMEM;
68951+
68952+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
68953+ return -EFAULT;
68954+
68955+ if (copy_acl_role_label(r_tmp, r_utmp2))
68956+ return -EFAULT;
68957+
68958+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
68959+ if (err)
68960+ return err;
68961+
68962+ if (!strcmp(r_tmp->rolename, "default")
68963+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
68964+ polstate->default_role = r_tmp;
68965+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
68966+ polstate->kernel_role = r_tmp;
68967+ }
68968+
68969+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
68970+ return -ENOMEM;
68971+
68972+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
68973+ return -EFAULT;
68974+
68975+ r_tmp->hash = ghash;
68976+
68977+ num_subjs = count_user_subjs(r_tmp->hash->first);
68978+
68979+ r_tmp->subj_hash_size = num_subjs;
68980+ r_tmp->subj_hash =
68981+ (struct acl_subject_label **)
68982+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
68983+
68984+ if (!r_tmp->subj_hash)
68985+ return -ENOMEM;
68986+
68987+ err = copy_user_allowedips(r_tmp);
68988+ if (err)
68989+ return err;
68990+
68991+ /* copy domain info */
68992+ if (r_tmp->domain_children != NULL) {
68993+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
68994+ if (domainlist == NULL)
68995+ return -ENOMEM;
68996+
68997+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
68998+ return -EFAULT;
68999+
69000+ r_tmp->domain_children = domainlist;
69001+ }
69002+
69003+ err = copy_user_transitions(r_tmp);
69004+ if (err)
69005+ return err;
69006+
69007+ memset(r_tmp->subj_hash, 0,
69008+ r_tmp->subj_hash_size *
69009+ sizeof (struct acl_subject_label *));
69010+
69011+ /* acquire the list of subjects, then NULL out
69012+ the list prior to parsing the subjects for this role,
69013+ as during this parsing the list is replaced with a list
69014+ of *nested* subjects for the role
69015+ */
69016+ subj_list = r_tmp->hash->first;
69017+
69018+ /* set nested subject list to null */
69019+ r_tmp->hash->first = NULL;
69020+
69021+ err = copy_user_subjs(subj_list, r_tmp);
69022+
69023+ if (err)
69024+ return err;
69025+
69026+ insert_acl_role_label(r_tmp);
69027+ }
69028+
69029+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
69030+ return -EINVAL;
69031+
69032+ return err;
69033+}
69034+
69035+static int gracl_reload_apply_policies(void *reload)
69036+{
69037+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
69038+ struct task_struct *task, *task2;
69039+ struct acl_role_label *role, *rtmp;
69040+ struct acl_subject_label *subj;
69041+ const struct cred *cred;
69042+ int role_applied;
69043+ int ret = 0;
69044+
69045+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
69046+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
69047+
69048+ /* first make sure we'll be able to apply the new policy cleanly */
69049+ do_each_thread(task2, task) {
69050+ if (task->exec_file == NULL)
69051+ continue;
69052+ role_applied = 0;
69053+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
69054+ /* preserve special roles */
69055+ FOR_EACH_ROLE_START(role)
69056+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
69057+ rtmp = task->role;
69058+ task->role = role;
69059+ role_applied = 1;
69060+ break;
69061+ }
69062+ FOR_EACH_ROLE_END(role)
69063+ }
69064+ if (!role_applied) {
69065+ cred = __task_cred(task);
69066+ rtmp = task->role;
69067+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
69068+ }
69069+ /* this handles non-nested inherited subjects, nested subjects will still
69070+ be dropped currently */
69071+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
69072+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
69073+ /* change the role back so that we've made no modifications to the policy */
69074+ task->role = rtmp;
69075+
69076+ if (subj == NULL || task->tmpacl == NULL) {
69077+ ret = -EINVAL;
69078+ goto out;
69079+ }
69080+ } while_each_thread(task2, task);
69081+
69082+ /* now actually apply the policy */
69083+
69084+ do_each_thread(task2, task) {
69085+ if (task->exec_file) {
69086+ role_applied = 0;
69087+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
69088+ /* preserve special roles */
69089+ FOR_EACH_ROLE_START(role)
69090+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
69091+ task->role = role;
69092+ role_applied = 1;
69093+ break;
69094+ }
69095+ FOR_EACH_ROLE_END(role)
69096+ }
69097+ if (!role_applied) {
69098+ cred = __task_cred(task);
69099+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
69100+ }
69101+ /* this handles non-nested inherited subjects, nested subjects will still
69102+ be dropped currently */
69103+ if (!reload_state->oldmode && task->inherited)
69104+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
69105+ else {
69106+ /* looked up and tagged to the task previously */
69107+ subj = task->tmpacl;
69108+ }
69109+ /* subj will be non-null */
69110+ __gr_apply_subject_to_task(polstate, task, subj);
69111+ if (reload_state->oldmode) {
69112+ task->acl_role_id = 0;
69113+ task->acl_sp_role = 0;
69114+ task->inherited = 0;
69115+ }
69116+ } else {
69117+ // it's a kernel process
69118+ task->role = polstate->kernel_role;
69119+ task->acl = polstate->kernel_role->root_label;
69120+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
69121+ task->acl->mode &= ~GR_PROCFIND;
69122+#endif
69123+ }
69124+ } while_each_thread(task2, task);
69125+
69126+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
69127+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
69128+
69129+out:
69130+
69131+ return ret;
69132+}
69133+
69134+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
69135+{
69136+ struct gr_reload_state new_reload_state = { };
69137+ int err;
69138+
69139+ new_reload_state.oldpolicy_ptr = polstate;
69140+ new_reload_state.oldalloc_ptr = current_alloc_state;
69141+ new_reload_state.oldmode = oldmode;
69142+
69143+ current_alloc_state = &new_reload_state.newalloc;
69144+ polstate = &new_reload_state.newpolicy;
69145+
69146+ /* everything relevant is now saved off, copy in the new policy */
69147+ if (init_variables(args, true)) {
69148+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
69149+ err = -ENOMEM;
69150+ goto error;
69151+ }
69152+
69153+ err = copy_user_acl(args);
69154+ free_init_variables();
69155+ if (err)
69156+ goto error;
69157+ /* the new policy is copied in, with the old policy available via saved_state
69158+ first go through applying roles, making sure to preserve special roles
69159+ then apply new subjects, making sure to preserve inherited and nested subjects,
69160+ though currently only inherited subjects will be preserved
69161+ */
69162+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
69163+ if (err)
69164+ goto error;
69165+
69166+ /* we've now applied the new policy, so restore the old policy state to free it */
69167+ polstate = &new_reload_state.oldpolicy;
69168+ current_alloc_state = &new_reload_state.oldalloc;
69169+ free_variables(true);
69170+
69171+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
69172+ to running_polstate/current_alloc_state inside stop_machine
69173+ */
69174+ err = 0;
69175+ goto out;
69176+error:
69177+ /* on error of loading the new policy, we'll just keep the previous
69178+ policy set around
69179+ */
69180+ free_variables(true);
69181+
69182+ /* doesn't affect runtime, but maintains consistent state */
69183+out:
69184+ polstate = new_reload_state.oldpolicy_ptr;
69185+ current_alloc_state = new_reload_state.oldalloc_ptr;
69186+
69187+ return err;
69188+}
69189+
69190+static int
69191+gracl_init(struct gr_arg *args)
69192+{
69193+ int error = 0;
69194+
69195+ memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
69196+ memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
69197+
69198+ if (init_variables(args, false)) {
69199+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
69200+ error = -ENOMEM;
69201+ goto out;
69202+ }
69203+
69204+ error = copy_user_acl(args);
69205+ free_init_variables();
69206+ if (error)
69207+ goto out;
69208+
69209+ error = gr_set_acls(0);
69210+ if (error)
69211+ goto out;
69212+
69213+ gr_enable_rbac_system();
69214+
69215+ return 0;
69216+
69217+out:
69218+ free_variables(false);
69219+ return error;
69220+}
69221+
69222+static int
69223+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
69224+ unsigned char **sum)
69225+{
69226+ struct acl_role_label *r;
69227+ struct role_allowed_ip *ipp;
69228+ struct role_transition *trans;
69229+ unsigned int i;
69230+ int found = 0;
69231+ u32 curr_ip = current->signal->curr_ip;
69232+
69233+ current->signal->saved_ip = curr_ip;
69234+
69235+ /* check transition table */
69236+
69237+ for (trans = current->role->transitions; trans; trans = trans->next) {
69238+ if (!strcmp(rolename, trans->rolename)) {
69239+ found = 1;
69240+ break;
69241+ }
69242+ }
69243+
69244+ if (!found)
69245+ return 0;
69246+
69247+ /* handle special roles that do not require authentication
69248+ and check ip */
69249+
69250+ FOR_EACH_ROLE_START(r)
69251+ if (!strcmp(rolename, r->rolename) &&
69252+ (r->roletype & GR_ROLE_SPECIAL)) {
69253+ found = 0;
69254+ if (r->allowed_ips != NULL) {
69255+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
69256+ if ((ntohl(curr_ip) & ipp->netmask) ==
69257+ (ntohl(ipp->addr) & ipp->netmask))
69258+ found = 1;
69259+ }
69260+ } else
69261+ found = 2;
69262+ if (!found)
69263+ return 0;
69264+
69265+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
69266+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
69267+ *salt = NULL;
69268+ *sum = NULL;
69269+ return 1;
69270+ }
69271+ }
69272+ FOR_EACH_ROLE_END(r)
69273+
69274+ for (i = 0; i < polstate->num_sprole_pws; i++) {
69275+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
69276+ *salt = polstate->acl_special_roles[i]->salt;
69277+ *sum = polstate->acl_special_roles[i]->sum;
69278+ return 1;
69279+ }
69280+ }
69281+
69282+ return 0;
69283+}
69284+
69285+int gr_check_secure_terminal(struct task_struct *task)
69286+{
69287+ struct task_struct *p, *p2, *p3;
69288+ struct files_struct *files;
69289+ struct fdtable *fdt;
69290+ struct file *our_file = NULL, *file;
69291+ int i;
69292+
69293+ if (task->signal->tty == NULL)
69294+ return 1;
69295+
69296+ files = get_files_struct(task);
69297+ if (files != NULL) {
69298+ rcu_read_lock();
69299+ fdt = files_fdtable(files);
69300+ for (i=0; i < fdt->max_fds; i++) {
69301+ file = fcheck_files(files, i);
69302+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
69303+ get_file(file);
69304+ our_file = file;
69305+ }
69306+ }
69307+ rcu_read_unlock();
69308+ put_files_struct(files);
69309+ }
69310+
69311+ if (our_file == NULL)
69312+ return 1;
69313+
69314+ read_lock(&tasklist_lock);
69315+ do_each_thread(p2, p) {
69316+ files = get_files_struct(p);
69317+ if (files == NULL ||
69318+ (p->signal && p->signal->tty == task->signal->tty)) {
69319+ if (files != NULL)
69320+ put_files_struct(files);
69321+ continue;
69322+ }
69323+ rcu_read_lock();
69324+ fdt = files_fdtable(files);
69325+ for (i=0; i < fdt->max_fds; i++) {
69326+ file = fcheck_files(files, i);
69327+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
69328+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
69329+ p3 = task;
69330+ while (task_pid_nr(p3) > 0) {
69331+ if (p3 == p)
69332+ break;
69333+ p3 = p3->real_parent;
69334+ }
69335+ if (p3 == p)
69336+ break;
69337+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
69338+ gr_handle_alertkill(p);
69339+ rcu_read_unlock();
69340+ put_files_struct(files);
69341+ read_unlock(&tasklist_lock);
69342+ fput(our_file);
69343+ return 0;
69344+ }
69345+ }
69346+ rcu_read_unlock();
69347+ put_files_struct(files);
69348+ } while_each_thread(p2, p);
69349+ read_unlock(&tasklist_lock);
69350+
69351+ fput(our_file);
69352+ return 1;
69353+}
69354+
69355+ssize_t
69356+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
69357+{
69358+ struct gr_arg_wrapper uwrap;
69359+ unsigned char *sprole_salt = NULL;
69360+ unsigned char *sprole_sum = NULL;
69361+ int error = 0;
69362+ int error2 = 0;
69363+ size_t req_count = 0;
69364+ unsigned char oldmode = 0;
69365+
69366+ mutex_lock(&gr_dev_mutex);
69367+
69368+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
69369+ error = -EPERM;
69370+ goto out;
69371+ }
69372+
69373+#ifdef CONFIG_COMPAT
69374+ pax_open_kernel();
69375+ if (is_compat_task()) {
69376+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
69377+ copy_gr_arg = &copy_gr_arg_compat;
69378+ copy_acl_object_label = &copy_acl_object_label_compat;
69379+ copy_acl_subject_label = &copy_acl_subject_label_compat;
69380+ copy_acl_role_label = &copy_acl_role_label_compat;
69381+ copy_acl_ip_label = &copy_acl_ip_label_compat;
69382+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
69383+ copy_role_transition = &copy_role_transition_compat;
69384+ copy_sprole_pw = &copy_sprole_pw_compat;
69385+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
69386+ copy_pointer_from_array = &copy_pointer_from_array_compat;
69387+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
69388+ } else {
69389+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
69390+ copy_gr_arg = &copy_gr_arg_normal;
69391+ copy_acl_object_label = &copy_acl_object_label_normal;
69392+ copy_acl_subject_label = &copy_acl_subject_label_normal;
69393+ copy_acl_role_label = &copy_acl_role_label_normal;
69394+ copy_acl_ip_label = &copy_acl_ip_label_normal;
69395+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
69396+ copy_role_transition = &copy_role_transition_normal;
69397+ copy_sprole_pw = &copy_sprole_pw_normal;
69398+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
69399+ copy_pointer_from_array = &copy_pointer_from_array_normal;
69400+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
69401+ }
69402+ pax_close_kernel();
69403+#endif
69404+
69405+ req_count = get_gr_arg_wrapper_size();
69406+
69407+ if (count != req_count) {
69408+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
69409+ error = -EINVAL;
69410+ goto out;
69411+ }
69412+
69413+
69414+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
69415+ gr_auth_expires = 0;
69416+ gr_auth_attempts = 0;
69417+ }
69418+
69419+ error = copy_gr_arg_wrapper(buf, &uwrap);
69420+ if (error)
69421+ goto out;
69422+
69423+ error = copy_gr_arg(uwrap.arg, &gr_usermode);
69424+ if (error)
69425+ goto out;
69426+
69427+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
69428+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
69429+ time_after(gr_auth_expires, get_seconds())) {
69430+ error = -EBUSY;
69431+ goto out;
69432+ }
69433+
69434+ /* if non-root trying to do anything other than use a special role,
69435+ do not attempt authentication, do not count towards authentication
69436+ locking
69437+ */
69438+
69439+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
69440+ gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
69441+ gr_is_global_nonroot(current_uid())) {
69442+ error = -EPERM;
69443+ goto out;
69444+ }
69445+
69446+ /* ensure pw and special role name are null terminated */
69447+
69448+ gr_usermode.pw[GR_PW_LEN - 1] = '\0';
69449+ gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
69450+
69451+ /* Okay.
69452+ * We have our enough of the argument structure..(we have yet
69453+ * to copy_from_user the tables themselves) . Copy the tables
69454+ * only if we need them, i.e. for loading operations. */
69455+
69456+ switch (gr_usermode.mode) {
69457+ case GR_STATUS:
69458+ if (gr_acl_is_enabled()) {
69459+ error = 1;
69460+ if (!gr_check_secure_terminal(current))
69461+ error = 3;
69462+ } else
69463+ error = 2;
69464+ goto out;
69465+ case GR_SHUTDOWN:
69466+ if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
69467+ stop_machine(gr_rbac_disable, NULL, NULL);
69468+ free_variables(false);
69469+ memset(&gr_usermode, 0, sizeof(gr_usermode));
69470+ memset(&gr_system_salt, 0, sizeof(gr_system_salt));
69471+ memset(&gr_system_sum, 0, sizeof(gr_system_sum));
69472+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
69473+ } else if (gr_acl_is_enabled()) {
69474+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
69475+ error = -EPERM;
69476+ } else {
69477+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
69478+ error = -EAGAIN;
69479+ }
69480+ break;
69481+ case GR_ENABLE:
69482+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
69483+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
69484+ else {
69485+ if (gr_acl_is_enabled())
69486+ error = -EAGAIN;
69487+ else
69488+ error = error2;
69489+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
69490+ }
69491+ break;
69492+ case GR_OLDRELOAD:
69493+ oldmode = 1;
69494+ case GR_RELOAD:
69495+ if (!gr_acl_is_enabled()) {
69496+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
69497+ error = -EAGAIN;
69498+ } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
69499+ error2 = gracl_reload(&gr_usermode, oldmode);
69500+ if (!error2)
69501+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
69502+ else {
69503+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
69504+ error = error2;
69505+ }
69506+ } else {
69507+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
69508+ error = -EPERM;
69509+ }
69510+ break;
69511+ case GR_SEGVMOD:
69512+ if (unlikely(!gr_acl_is_enabled())) {
69513+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
69514+ error = -EAGAIN;
69515+ break;
69516+ }
69517+
69518+ if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
69519+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
69520+ if (gr_usermode.segv_device && gr_usermode.segv_inode) {
69521+ struct acl_subject_label *segvacl;
69522+ segvacl =
69523+ lookup_acl_subj_label(gr_usermode.segv_inode,
69524+ gr_usermode.segv_device,
69525+ current->role);
69526+ if (segvacl) {
69527+ segvacl->crashes = 0;
69528+ segvacl->expires = 0;
69529+ }
69530+ } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
69531+ gr_remove_uid(gr_usermode.segv_uid);
69532+ }
69533+ } else {
69534+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
69535+ error = -EPERM;
69536+ }
69537+ break;
69538+ case GR_SPROLE:
69539+ case GR_SPROLEPAM:
69540+ if (unlikely(!gr_acl_is_enabled())) {
69541+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
69542+ error = -EAGAIN;
69543+ break;
69544+ }
69545+
69546+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
69547+ current->role->expires = 0;
69548+ current->role->auth_attempts = 0;
69549+ }
69550+
69551+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
69552+ time_after(current->role->expires, get_seconds())) {
69553+ error = -EBUSY;
69554+ goto out;
69555+ }
69556+
69557+ if (lookup_special_role_auth
69558+ (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
69559+ && ((!sprole_salt && !sprole_sum)
69560+ || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
69561+ char *p = "";
69562+ assign_special_role(gr_usermode.sp_role);
69563+ read_lock(&tasklist_lock);
69564+ if (current->real_parent)
69565+ p = current->real_parent->role->rolename;
69566+ read_unlock(&tasklist_lock);
69567+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
69568+ p, acl_sp_role_value);
69569+ } else {
69570+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
69571+ error = -EPERM;
69572+ if(!(current->role->auth_attempts++))
69573+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
69574+
69575+ goto out;
69576+ }
69577+ break;
69578+ case GR_UNSPROLE:
69579+ if (unlikely(!gr_acl_is_enabled())) {
69580+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
69581+ error = -EAGAIN;
69582+ break;
69583+ }
69584+
69585+ if (current->role->roletype & GR_ROLE_SPECIAL) {
69586+ char *p = "";
69587+ int i = 0;
69588+
69589+ read_lock(&tasklist_lock);
69590+ if (current->real_parent) {
69591+ p = current->real_parent->role->rolename;
69592+ i = current->real_parent->acl_role_id;
69593+ }
69594+ read_unlock(&tasklist_lock);
69595+
69596+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
69597+ gr_set_acls(1);
69598+ } else {
69599+ error = -EPERM;
69600+ goto out;
69601+ }
69602+ break;
69603+ default:
69604+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
69605+ error = -EINVAL;
69606+ break;
69607+ }
69608+
69609+ if (error != -EPERM)
69610+ goto out;
69611+
69612+ if(!(gr_auth_attempts++))
69613+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
69614+
69615+ out:
69616+ mutex_unlock(&gr_dev_mutex);
69617+
69618+ if (!error)
69619+ error = req_count;
69620+
69621+ return error;
69622+}
69623+
69624+int
69625+gr_set_acls(const int type)
69626+{
69627+ struct task_struct *task, *task2;
69628+ struct acl_role_label *role = current->role;
69629+ struct acl_subject_label *subj;
69630+ __u16 acl_role_id = current->acl_role_id;
69631+ const struct cred *cred;
69632+ int ret;
69633+
69634+ rcu_read_lock();
69635+ read_lock(&tasklist_lock);
69636+ read_lock(&grsec_exec_file_lock);
69637+ do_each_thread(task2, task) {
69638+ /* check to see if we're called from the exit handler,
69639+ if so, only replace ACLs that have inherited the admin
69640+ ACL */
69641+
69642+ if (type && (task->role != role ||
69643+ task->acl_role_id != acl_role_id))
69644+ continue;
69645+
69646+ task->acl_role_id = 0;
69647+ task->acl_sp_role = 0;
69648+ task->inherited = 0;
69649+
69650+ if (task->exec_file) {
69651+ cred = __task_cred(task);
69652+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
69653+ subj = __gr_get_subject_for_task(polstate, task, NULL);
69654+ if (subj == NULL) {
69655+ ret = -EINVAL;
69656+ read_unlock(&grsec_exec_file_lock);
69657+ read_unlock(&tasklist_lock);
69658+ rcu_read_unlock();
69659+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
69660+ return ret;
69661+ }
69662+ __gr_apply_subject_to_task(polstate, task, subj);
69663+ } else {
69664+ // it's a kernel process
69665+ task->role = polstate->kernel_role;
69666+ task->acl = polstate->kernel_role->root_label;
69667+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
69668+ task->acl->mode &= ~GR_PROCFIND;
69669+#endif
69670+ }
69671+ } while_each_thread(task2, task);
69672+ read_unlock(&grsec_exec_file_lock);
69673+ read_unlock(&tasklist_lock);
69674+ rcu_read_unlock();
69675+
69676+ return 0;
69677+}
69678diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
69679new file mode 100644
69680index 0000000..39645c9
69681--- /dev/null
69682+++ b/grsecurity/gracl_res.c
69683@@ -0,0 +1,68 @@
69684+#include <linux/kernel.h>
69685+#include <linux/sched.h>
69686+#include <linux/gracl.h>
69687+#include <linux/grinternal.h>
69688+
69689+static const char *restab_log[] = {
69690+ [RLIMIT_CPU] = "RLIMIT_CPU",
69691+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
69692+ [RLIMIT_DATA] = "RLIMIT_DATA",
69693+ [RLIMIT_STACK] = "RLIMIT_STACK",
69694+ [RLIMIT_CORE] = "RLIMIT_CORE",
69695+ [RLIMIT_RSS] = "RLIMIT_RSS",
69696+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
69697+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
69698+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
69699+ [RLIMIT_AS] = "RLIMIT_AS",
69700+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
69701+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
69702+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
69703+ [RLIMIT_NICE] = "RLIMIT_NICE",
69704+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
69705+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
69706+ [GR_CRASH_RES] = "RLIMIT_CRASH"
69707+};
69708+
69709+void
69710+gr_log_resource(const struct task_struct *task,
69711+ const int res, const unsigned long wanted, const int gt)
69712+{
69713+ const struct cred *cred;
69714+ unsigned long rlim;
69715+
69716+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
69717+ return;
69718+
69719+ // not yet supported resource
69720+ if (unlikely(!restab_log[res]))
69721+ return;
69722+
69723+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
69724+ rlim = task_rlimit_max(task, res);
69725+ else
69726+ rlim = task_rlimit(task, res);
69727+
69728+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
69729+ return;
69730+
69731+ rcu_read_lock();
69732+ cred = __task_cred(task);
69733+
69734+ if (res == RLIMIT_NPROC &&
69735+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
69736+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
69737+ goto out_rcu_unlock;
69738+ else if (res == RLIMIT_MEMLOCK &&
69739+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
69740+ goto out_rcu_unlock;
69741+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
69742+ goto out_rcu_unlock;
69743+ rcu_read_unlock();
69744+
69745+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
69746+
69747+ return;
69748+out_rcu_unlock:
69749+ rcu_read_unlock();
69750+ return;
69751+}
69752diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
69753new file mode 100644
69754index 0000000..2040e61
69755--- /dev/null
69756+++ b/grsecurity/gracl_segv.c
69757@@ -0,0 +1,313 @@
69758+#include <linux/kernel.h>
69759+#include <linux/mm.h>
69760+#include <asm/uaccess.h>
69761+#include <asm/errno.h>
69762+#include <asm/mman.h>
69763+#include <net/sock.h>
69764+#include <linux/file.h>
69765+#include <linux/fs.h>
69766+#include <linux/net.h>
69767+#include <linux/in.h>
69768+#include <linux/slab.h>
69769+#include <linux/types.h>
69770+#include <linux/sched.h>
69771+#include <linux/timer.h>
69772+#include <linux/gracl.h>
69773+#include <linux/grsecurity.h>
69774+#include <linux/grinternal.h>
69775+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69776+#include <linux/magic.h>
69777+#include <linux/pagemap.h>
69778+#include "../fs/btrfs/async-thread.h"
69779+#include "../fs/btrfs/ctree.h"
69780+#include "../fs/btrfs/btrfs_inode.h"
69781+#endif
69782+
69783+static struct crash_uid *uid_set;
69784+static unsigned short uid_used;
69785+static DEFINE_SPINLOCK(gr_uid_lock);
69786+extern rwlock_t gr_inode_lock;
69787+extern struct acl_subject_label *
69788+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
69789+ struct acl_role_label *role);
69790+
69791+static inline dev_t __get_dev(const struct dentry *dentry)
69792+{
69793+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69794+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69795+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69796+ else
69797+#endif
69798+ return dentry->d_sb->s_dev;
69799+}
69800+
69801+int
69802+gr_init_uidset(void)
69803+{
69804+ uid_set =
69805+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
69806+ uid_used = 0;
69807+
69808+ return uid_set ? 1 : 0;
69809+}
69810+
69811+void
69812+gr_free_uidset(void)
69813+{
69814+ if (uid_set) {
69815+ struct crash_uid *tmpset;
69816+ spin_lock(&gr_uid_lock);
69817+ tmpset = uid_set;
69818+ uid_set = NULL;
69819+ uid_used = 0;
69820+ spin_unlock(&gr_uid_lock);
69821+ if (tmpset)
69822+ kfree(tmpset);
69823+ }
69824+
69825+ return;
69826+}
69827+
69828+int
69829+gr_find_uid(const uid_t uid)
69830+{
69831+ struct crash_uid *tmp = uid_set;
69832+ uid_t buid;
69833+ int low = 0, high = uid_used - 1, mid;
69834+
69835+ while (high >= low) {
69836+ mid = (low + high) >> 1;
69837+ buid = tmp[mid].uid;
69838+ if (buid == uid)
69839+ return mid;
69840+ if (buid > uid)
69841+ high = mid - 1;
69842+ if (buid < uid)
69843+ low = mid + 1;
69844+ }
69845+
69846+ return -1;
69847+}
69848+
69849+static __inline__ void
69850+gr_insertsort(void)
69851+{
69852+ unsigned short i, j;
69853+ struct crash_uid index;
69854+
69855+ for (i = 1; i < uid_used; i++) {
69856+ index = uid_set[i];
69857+ j = i;
69858+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
69859+ uid_set[j] = uid_set[j - 1];
69860+ j--;
69861+ }
69862+ uid_set[j] = index;
69863+ }
69864+
69865+ return;
69866+}
69867+
69868+static __inline__ void
69869+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
69870+{
69871+ int loc;
69872+ uid_t uid = GR_GLOBAL_UID(kuid);
69873+
69874+ if (uid_used == GR_UIDTABLE_MAX)
69875+ return;
69876+
69877+ loc = gr_find_uid(uid);
69878+
69879+ if (loc >= 0) {
69880+ uid_set[loc].expires = expires;
69881+ return;
69882+ }
69883+
69884+ uid_set[uid_used].uid = uid;
69885+ uid_set[uid_used].expires = expires;
69886+ uid_used++;
69887+
69888+ gr_insertsort();
69889+
69890+ return;
69891+}
69892+
69893+void
69894+gr_remove_uid(const unsigned short loc)
69895+{
69896+ unsigned short i;
69897+
69898+ for (i = loc + 1; i < uid_used; i++)
69899+ uid_set[i - 1] = uid_set[i];
69900+
69901+ uid_used--;
69902+
69903+ return;
69904+}
69905+
69906+int
69907+gr_check_crash_uid(const kuid_t kuid)
69908+{
69909+ int loc;
69910+ int ret = 0;
69911+ uid_t uid;
69912+
69913+ if (unlikely(!gr_acl_is_enabled()))
69914+ return 0;
69915+
69916+ uid = GR_GLOBAL_UID(kuid);
69917+
69918+ spin_lock(&gr_uid_lock);
69919+ loc = gr_find_uid(uid);
69920+
69921+ if (loc < 0)
69922+ goto out_unlock;
69923+
69924+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
69925+ gr_remove_uid(loc);
69926+ else
69927+ ret = 1;
69928+
69929+out_unlock:
69930+ spin_unlock(&gr_uid_lock);
69931+ return ret;
69932+}
69933+
69934+static __inline__ int
69935+proc_is_setxid(const struct cred *cred)
69936+{
69937+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
69938+ !uid_eq(cred->uid, cred->fsuid))
69939+ return 1;
69940+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
69941+ !gid_eq(cred->gid, cred->fsgid))
69942+ return 1;
69943+
69944+ return 0;
69945+}
69946+
69947+extern int gr_fake_force_sig(int sig, struct task_struct *t);
69948+
69949+void
69950+gr_handle_crash(struct task_struct *task, const int sig)
69951+{
69952+ struct acl_subject_label *curr;
69953+ struct task_struct *tsk, *tsk2;
69954+ const struct cred *cred;
69955+ const struct cred *cred2;
69956+
69957+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
69958+ return;
69959+
69960+ if (unlikely(!gr_acl_is_enabled()))
69961+ return;
69962+
69963+ curr = task->acl;
69964+
69965+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
69966+ return;
69967+
69968+ if (time_before_eq(curr->expires, get_seconds())) {
69969+ curr->expires = 0;
69970+ curr->crashes = 0;
69971+ }
69972+
69973+ curr->crashes++;
69974+
69975+ if (!curr->expires)
69976+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
69977+
69978+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
69979+ time_after(curr->expires, get_seconds())) {
69980+ rcu_read_lock();
69981+ cred = __task_cred(task);
69982+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
69983+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
69984+ spin_lock(&gr_uid_lock);
69985+ gr_insert_uid(cred->uid, curr->expires);
69986+ spin_unlock(&gr_uid_lock);
69987+ curr->expires = 0;
69988+ curr->crashes = 0;
69989+ read_lock(&tasklist_lock);
69990+ do_each_thread(tsk2, tsk) {
69991+ cred2 = __task_cred(tsk);
69992+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
69993+ gr_fake_force_sig(SIGKILL, tsk);
69994+ } while_each_thread(tsk2, tsk);
69995+ read_unlock(&tasklist_lock);
69996+ } else {
69997+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
69998+ read_lock(&tasklist_lock);
69999+ read_lock(&grsec_exec_file_lock);
70000+ do_each_thread(tsk2, tsk) {
70001+ if (likely(tsk != task)) {
70002+ // if this thread has the same subject as the one that triggered
70003+ // RES_CRASH and it's the same binary, kill it
70004+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
70005+ gr_fake_force_sig(SIGKILL, tsk);
70006+ }
70007+ } while_each_thread(tsk2, tsk);
70008+ read_unlock(&grsec_exec_file_lock);
70009+ read_unlock(&tasklist_lock);
70010+ }
70011+ rcu_read_unlock();
70012+ }
70013+
70014+ return;
70015+}
70016+
70017+int
70018+gr_check_crash_exec(const struct file *filp)
70019+{
70020+ struct acl_subject_label *curr;
70021+
70022+ if (unlikely(!gr_acl_is_enabled()))
70023+ return 0;
70024+
70025+ read_lock(&gr_inode_lock);
70026+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
70027+ __get_dev(filp->f_path.dentry),
70028+ current->role);
70029+ read_unlock(&gr_inode_lock);
70030+
70031+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
70032+ (!curr->crashes && !curr->expires))
70033+ return 0;
70034+
70035+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
70036+ time_after(curr->expires, get_seconds()))
70037+ return 1;
70038+ else if (time_before_eq(curr->expires, get_seconds())) {
70039+ curr->crashes = 0;
70040+ curr->expires = 0;
70041+ }
70042+
70043+ return 0;
70044+}
70045+
70046+void
70047+gr_handle_alertkill(struct task_struct *task)
70048+{
70049+ struct acl_subject_label *curracl;
70050+ __u32 curr_ip;
70051+ struct task_struct *p, *p2;
70052+
70053+ if (unlikely(!gr_acl_is_enabled()))
70054+ return;
70055+
70056+ curracl = task->acl;
70057+ curr_ip = task->signal->curr_ip;
70058+
70059+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
70060+ read_lock(&tasklist_lock);
70061+ do_each_thread(p2, p) {
70062+ if (p->signal->curr_ip == curr_ip)
70063+ gr_fake_force_sig(SIGKILL, p);
70064+ } while_each_thread(p2, p);
70065+ read_unlock(&tasklist_lock);
70066+ } else if (curracl->mode & GR_KILLPROC)
70067+ gr_fake_force_sig(SIGKILL, task);
70068+
70069+ return;
70070+}
70071diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
70072new file mode 100644
70073index 0000000..98011b0
70074--- /dev/null
70075+++ b/grsecurity/gracl_shm.c
70076@@ -0,0 +1,40 @@
70077+#include <linux/kernel.h>
70078+#include <linux/mm.h>
70079+#include <linux/sched.h>
70080+#include <linux/file.h>
70081+#include <linux/ipc.h>
70082+#include <linux/gracl.h>
70083+#include <linux/grsecurity.h>
70084+#include <linux/grinternal.h>
70085+
70086+int
70087+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70088+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
70089+{
70090+ struct task_struct *task;
70091+
70092+ if (!gr_acl_is_enabled())
70093+ return 1;
70094+
70095+ rcu_read_lock();
70096+ read_lock(&tasklist_lock);
70097+
70098+ task = find_task_by_vpid(shm_cprid);
70099+
70100+ if (unlikely(!task))
70101+ task = find_task_by_vpid(shm_lapid);
70102+
70103+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
70104+ (task_pid_nr(task) == shm_lapid)) &&
70105+ (task->acl->mode & GR_PROTSHM) &&
70106+ (task->acl != current->acl))) {
70107+ read_unlock(&tasklist_lock);
70108+ rcu_read_unlock();
70109+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
70110+ return 0;
70111+ }
70112+ read_unlock(&tasklist_lock);
70113+ rcu_read_unlock();
70114+
70115+ return 1;
70116+}
70117diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
70118new file mode 100644
70119index 0000000..bc0be01
70120--- /dev/null
70121+++ b/grsecurity/grsec_chdir.c
70122@@ -0,0 +1,19 @@
70123+#include <linux/kernel.h>
70124+#include <linux/sched.h>
70125+#include <linux/fs.h>
70126+#include <linux/file.h>
70127+#include <linux/grsecurity.h>
70128+#include <linux/grinternal.h>
70129+
70130+void
70131+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
70132+{
70133+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
70134+ if ((grsec_enable_chdir && grsec_enable_group &&
70135+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
70136+ !grsec_enable_group)) {
70137+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
70138+ }
70139+#endif
70140+ return;
70141+}
70142diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
70143new file mode 100644
70144index 0000000..e10b319
70145--- /dev/null
70146+++ b/grsecurity/grsec_chroot.c
70147@@ -0,0 +1,370 @@
70148+#include <linux/kernel.h>
70149+#include <linux/module.h>
70150+#include <linux/sched.h>
70151+#include <linux/file.h>
70152+#include <linux/fs.h>
70153+#include <linux/mount.h>
70154+#include <linux/types.h>
70155+#include "../fs/mount.h"
70156+#include <linux/grsecurity.h>
70157+#include <linux/grinternal.h>
70158+
70159+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
70160+int gr_init_ran;
70161+#endif
70162+
70163+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
70164+{
70165+#ifdef CONFIG_GRKERNSEC
70166+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
70167+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
70168+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
70169+ && gr_init_ran
70170+#endif
70171+ )
70172+ task->gr_is_chrooted = 1;
70173+ else {
70174+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
70175+ if (task_pid_nr(task) == 1 && !gr_init_ran)
70176+ gr_init_ran = 1;
70177+#endif
70178+ task->gr_is_chrooted = 0;
70179+ }
70180+
70181+ task->gr_chroot_dentry = path->dentry;
70182+#endif
70183+ return;
70184+}
70185+
70186+void gr_clear_chroot_entries(struct task_struct *task)
70187+{
70188+#ifdef CONFIG_GRKERNSEC
70189+ task->gr_is_chrooted = 0;
70190+ task->gr_chroot_dentry = NULL;
70191+#endif
70192+ return;
70193+}
70194+
70195+int
70196+gr_handle_chroot_unix(const pid_t pid)
70197+{
70198+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70199+ struct task_struct *p;
70200+
70201+ if (unlikely(!grsec_enable_chroot_unix))
70202+ return 1;
70203+
70204+ if (likely(!proc_is_chrooted(current)))
70205+ return 1;
70206+
70207+ rcu_read_lock();
70208+ read_lock(&tasklist_lock);
70209+ p = find_task_by_vpid_unrestricted(pid);
70210+ if (unlikely(p && !have_same_root(current, p))) {
70211+ read_unlock(&tasklist_lock);
70212+ rcu_read_unlock();
70213+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
70214+ return 0;
70215+ }
70216+ read_unlock(&tasklist_lock);
70217+ rcu_read_unlock();
70218+#endif
70219+ return 1;
70220+}
70221+
70222+int
70223+gr_handle_chroot_nice(void)
70224+{
70225+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
70226+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
70227+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
70228+ return -EPERM;
70229+ }
70230+#endif
70231+ return 0;
70232+}
70233+
70234+int
70235+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
70236+{
70237+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
70238+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
70239+ && proc_is_chrooted(current)) {
70240+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
70241+ return -EACCES;
70242+ }
70243+#endif
70244+ return 0;
70245+}
70246+
70247+int
70248+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
70249+{
70250+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70251+ struct task_struct *p;
70252+ int ret = 0;
70253+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
70254+ return ret;
70255+
70256+ read_lock(&tasklist_lock);
70257+ do_each_pid_task(pid, type, p) {
70258+ if (!have_same_root(current, p)) {
70259+ ret = 1;
70260+ goto out;
70261+ }
70262+ } while_each_pid_task(pid, type, p);
70263+out:
70264+ read_unlock(&tasklist_lock);
70265+ return ret;
70266+#endif
70267+ return 0;
70268+}
70269+
70270+int
70271+gr_pid_is_chrooted(struct task_struct *p)
70272+{
70273+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70274+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
70275+ return 0;
70276+
70277+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
70278+ !have_same_root(current, p)) {
70279+ return 1;
70280+ }
70281+#endif
70282+ return 0;
70283+}
70284+
70285+EXPORT_SYMBOL(gr_pid_is_chrooted);
70286+
70287+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
70288+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
70289+{
70290+ struct path path, currentroot;
70291+ int ret = 0;
70292+
70293+ path.dentry = (struct dentry *)u_dentry;
70294+ path.mnt = (struct vfsmount *)u_mnt;
70295+ get_fs_root(current->fs, &currentroot);
70296+ if (path_is_under(&path, &currentroot))
70297+ ret = 1;
70298+ path_put(&currentroot);
70299+
70300+ return ret;
70301+}
70302+#endif
70303+
70304+int
70305+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
70306+{
70307+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
70308+ if (!grsec_enable_chroot_fchdir)
70309+ return 1;
70310+
70311+ if (!proc_is_chrooted(current))
70312+ return 1;
70313+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
70314+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
70315+ return 0;
70316+ }
70317+#endif
70318+ return 1;
70319+}
70320+
70321+int
70322+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70323+ const time_t shm_createtime)
70324+{
70325+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
70326+ struct task_struct *p;
70327+ time_t starttime;
70328+
70329+ if (unlikely(!grsec_enable_chroot_shmat))
70330+ return 1;
70331+
70332+ if (likely(!proc_is_chrooted(current)))
70333+ return 1;
70334+
70335+ rcu_read_lock();
70336+ read_lock(&tasklist_lock);
70337+
70338+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
70339+ starttime = p->start_time.tv_sec;
70340+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
70341+ if (have_same_root(current, p)) {
70342+ goto allow;
70343+ } else {
70344+ read_unlock(&tasklist_lock);
70345+ rcu_read_unlock();
70346+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
70347+ return 0;
70348+ }
70349+ }
70350+ /* creator exited, pid reuse, fall through to next check */
70351+ }
70352+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
70353+ if (unlikely(!have_same_root(current, p))) {
70354+ read_unlock(&tasklist_lock);
70355+ rcu_read_unlock();
70356+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
70357+ return 0;
70358+ }
70359+ }
70360+
70361+allow:
70362+ read_unlock(&tasklist_lock);
70363+ rcu_read_unlock();
70364+#endif
70365+ return 1;
70366+}
70367+
70368+void
70369+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
70370+{
70371+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
70372+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
70373+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
70374+#endif
70375+ return;
70376+}
70377+
70378+int
70379+gr_handle_chroot_mknod(const struct dentry *dentry,
70380+ const struct vfsmount *mnt, const int mode)
70381+{
70382+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
70383+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
70384+ proc_is_chrooted(current)) {
70385+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
70386+ return -EPERM;
70387+ }
70388+#endif
70389+ return 0;
70390+}
70391+
70392+int
70393+gr_handle_chroot_mount(const struct dentry *dentry,
70394+ const struct vfsmount *mnt, const char *dev_name)
70395+{
70396+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
70397+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
70398+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
70399+ return -EPERM;
70400+ }
70401+#endif
70402+ return 0;
70403+}
70404+
70405+int
70406+gr_handle_chroot_pivot(void)
70407+{
70408+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
70409+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
70410+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
70411+ return -EPERM;
70412+ }
70413+#endif
70414+ return 0;
70415+}
70416+
70417+int
70418+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
70419+{
70420+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
70421+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
70422+ !gr_is_outside_chroot(dentry, mnt)) {
70423+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
70424+ return -EPERM;
70425+ }
70426+#endif
70427+ return 0;
70428+}
70429+
70430+extern const char *captab_log[];
70431+extern int captab_log_entries;
70432+
70433+int
70434+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
70435+{
70436+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70437+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
70438+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
70439+ if (cap_raised(chroot_caps, cap)) {
70440+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
70441+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
70442+ }
70443+ return 0;
70444+ }
70445+ }
70446+#endif
70447+ return 1;
70448+}
70449+
70450+int
70451+gr_chroot_is_capable(const int cap)
70452+{
70453+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70454+ return gr_task_chroot_is_capable(current, current_cred(), cap);
70455+#endif
70456+ return 1;
70457+}
70458+
70459+int
70460+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
70461+{
70462+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70463+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
70464+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
70465+ if (cap_raised(chroot_caps, cap)) {
70466+ return 0;
70467+ }
70468+ }
70469+#endif
70470+ return 1;
70471+}
70472+
70473+int
70474+gr_chroot_is_capable_nolog(const int cap)
70475+{
70476+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70477+ return gr_task_chroot_is_capable_nolog(current, cap);
70478+#endif
70479+ return 1;
70480+}
70481+
70482+int
70483+gr_handle_chroot_sysctl(const int op)
70484+{
70485+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
70486+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
70487+ proc_is_chrooted(current))
70488+ return -EACCES;
70489+#endif
70490+ return 0;
70491+}
70492+
70493+void
70494+gr_handle_chroot_chdir(const struct path *path)
70495+{
70496+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
70497+ if (grsec_enable_chroot_chdir)
70498+ set_fs_pwd(current->fs, path);
70499+#endif
70500+ return;
70501+}
70502+
70503+int
70504+gr_handle_chroot_chmod(const struct dentry *dentry,
70505+ const struct vfsmount *mnt, const int mode)
70506+{
70507+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
70508+ /* allow chmod +s on directories, but not files */
70509+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
70510+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
70511+ proc_is_chrooted(current)) {
70512+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
70513+ return -EPERM;
70514+ }
70515+#endif
70516+ return 0;
70517+}
70518diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
70519new file mode 100644
70520index 0000000..52b3e30
70521--- /dev/null
70522+++ b/grsecurity/grsec_disabled.c
70523@@ -0,0 +1,433 @@
70524+#include <linux/kernel.h>
70525+#include <linux/module.h>
70526+#include <linux/sched.h>
70527+#include <linux/file.h>
70528+#include <linux/fs.h>
70529+#include <linux/kdev_t.h>
70530+#include <linux/net.h>
70531+#include <linux/in.h>
70532+#include <linux/ip.h>
70533+#include <linux/skbuff.h>
70534+#include <linux/sysctl.h>
70535+
70536+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
70537+void
70538+pax_set_initial_flags(struct linux_binprm *bprm)
70539+{
70540+ return;
70541+}
70542+#endif
70543+
70544+#ifdef CONFIG_SYSCTL
70545+__u32
70546+gr_handle_sysctl(const struct ctl_table * table, const int op)
70547+{
70548+ return 0;
70549+}
70550+#endif
70551+
70552+#ifdef CONFIG_TASKSTATS
70553+int gr_is_taskstats_denied(int pid)
70554+{
70555+ return 0;
70556+}
70557+#endif
70558+
70559+int
70560+gr_acl_is_enabled(void)
70561+{
70562+ return 0;
70563+}
70564+
70565+void
70566+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
70567+{
70568+ return;
70569+}
70570+
70571+int
70572+gr_handle_rawio(const struct inode *inode)
70573+{
70574+ return 0;
70575+}
70576+
70577+void
70578+gr_acl_handle_psacct(struct task_struct *task, const long code)
70579+{
70580+ return;
70581+}
70582+
70583+int
70584+gr_handle_ptrace(struct task_struct *task, const long request)
70585+{
70586+ return 0;
70587+}
70588+
70589+int
70590+gr_handle_proc_ptrace(struct task_struct *task)
70591+{
70592+ return 0;
70593+}
70594+
70595+int
70596+gr_set_acls(const int type)
70597+{
70598+ return 0;
70599+}
70600+
70601+int
70602+gr_check_hidden_task(const struct task_struct *tsk)
70603+{
70604+ return 0;
70605+}
70606+
70607+int
70608+gr_check_protected_task(const struct task_struct *task)
70609+{
70610+ return 0;
70611+}
70612+
70613+int
70614+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70615+{
70616+ return 0;
70617+}
70618+
70619+void
70620+gr_copy_label(struct task_struct *tsk)
70621+{
70622+ return;
70623+}
70624+
70625+void
70626+gr_set_pax_flags(struct task_struct *task)
70627+{
70628+ return;
70629+}
70630+
70631+int
70632+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
70633+ const int unsafe_share)
70634+{
70635+ return 0;
70636+}
70637+
70638+void
70639+gr_handle_delete(const ino_t ino, const dev_t dev)
70640+{
70641+ return;
70642+}
70643+
70644+void
70645+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
70646+{
70647+ return;
70648+}
70649+
70650+void
70651+gr_handle_crash(struct task_struct *task, const int sig)
70652+{
70653+ return;
70654+}
70655+
70656+int
70657+gr_check_crash_exec(const struct file *filp)
70658+{
70659+ return 0;
70660+}
70661+
70662+int
70663+gr_check_crash_uid(const kuid_t uid)
70664+{
70665+ return 0;
70666+}
70667+
70668+void
70669+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
70670+ struct dentry *old_dentry,
70671+ struct dentry *new_dentry,
70672+ struct vfsmount *mnt, const __u8 replace)
70673+{
70674+ return;
70675+}
70676+
70677+int
70678+gr_search_socket(const int family, const int type, const int protocol)
70679+{
70680+ return 1;
70681+}
70682+
70683+int
70684+gr_search_connectbind(const int mode, const struct socket *sock,
70685+ const struct sockaddr_in *addr)
70686+{
70687+ return 0;
70688+}
70689+
70690+void
70691+gr_handle_alertkill(struct task_struct *task)
70692+{
70693+ return;
70694+}
70695+
70696+__u32
70697+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
70698+{
70699+ return 1;
70700+}
70701+
70702+__u32
70703+gr_acl_handle_hidden_file(const struct dentry * dentry,
70704+ const struct vfsmount * mnt)
70705+{
70706+ return 1;
70707+}
70708+
70709+__u32
70710+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
70711+ int acc_mode)
70712+{
70713+ return 1;
70714+}
70715+
70716+__u32
70717+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
70718+{
70719+ return 1;
70720+}
70721+
70722+__u32
70723+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
70724+{
70725+ return 1;
70726+}
70727+
70728+int
70729+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
70730+ unsigned int *vm_flags)
70731+{
70732+ return 1;
70733+}
70734+
70735+__u32
70736+gr_acl_handle_truncate(const struct dentry * dentry,
70737+ const struct vfsmount * mnt)
70738+{
70739+ return 1;
70740+}
70741+
70742+__u32
70743+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
70744+{
70745+ return 1;
70746+}
70747+
70748+__u32
70749+gr_acl_handle_access(const struct dentry * dentry,
70750+ const struct vfsmount * mnt, const int fmode)
70751+{
70752+ return 1;
70753+}
70754+
70755+__u32
70756+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
70757+ umode_t *mode)
70758+{
70759+ return 1;
70760+}
70761+
70762+__u32
70763+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
70764+{
70765+ return 1;
70766+}
70767+
70768+__u32
70769+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
70770+{
70771+ return 1;
70772+}
70773+
70774+__u32
70775+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
70776+{
70777+ return 1;
70778+}
70779+
70780+void
70781+grsecurity_init(void)
70782+{
70783+ return;
70784+}
70785+
70786+umode_t gr_acl_umask(void)
70787+{
70788+ return 0;
70789+}
70790+
70791+__u32
70792+gr_acl_handle_mknod(const struct dentry * new_dentry,
70793+ const struct dentry * parent_dentry,
70794+ const struct vfsmount * parent_mnt,
70795+ const int mode)
70796+{
70797+ return 1;
70798+}
70799+
70800+__u32
70801+gr_acl_handle_mkdir(const struct dentry * new_dentry,
70802+ const struct dentry * parent_dentry,
70803+ const struct vfsmount * parent_mnt)
70804+{
70805+ return 1;
70806+}
70807+
70808+__u32
70809+gr_acl_handle_symlink(const struct dentry * new_dentry,
70810+ const struct dentry * parent_dentry,
70811+ const struct vfsmount * parent_mnt, const struct filename *from)
70812+{
70813+ return 1;
70814+}
70815+
70816+__u32
70817+gr_acl_handle_link(const struct dentry * new_dentry,
70818+ const struct dentry * parent_dentry,
70819+ const struct vfsmount * parent_mnt,
70820+ const struct dentry * old_dentry,
70821+ const struct vfsmount * old_mnt, const struct filename *to)
70822+{
70823+ return 1;
70824+}
70825+
70826+int
70827+gr_acl_handle_rename(const struct dentry *new_dentry,
70828+ const struct dentry *parent_dentry,
70829+ const struct vfsmount *parent_mnt,
70830+ const struct dentry *old_dentry,
70831+ const struct inode *old_parent_inode,
70832+ const struct vfsmount *old_mnt, const struct filename *newname)
70833+{
70834+ return 0;
70835+}
70836+
70837+int
70838+gr_acl_handle_filldir(const struct file *file, const char *name,
70839+ const int namelen, const ino_t ino)
70840+{
70841+ return 1;
70842+}
70843+
70844+int
70845+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70846+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
70847+{
70848+ return 1;
70849+}
70850+
70851+int
70852+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
70853+{
70854+ return 0;
70855+}
70856+
70857+int
70858+gr_search_accept(const struct socket *sock)
70859+{
70860+ return 0;
70861+}
70862+
70863+int
70864+gr_search_listen(const struct socket *sock)
70865+{
70866+ return 0;
70867+}
70868+
70869+int
70870+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
70871+{
70872+ return 0;
70873+}
70874+
70875+__u32
70876+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
70877+{
70878+ return 1;
70879+}
70880+
70881+__u32
70882+gr_acl_handle_creat(const struct dentry * dentry,
70883+ const struct dentry * p_dentry,
70884+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
70885+ const int imode)
70886+{
70887+ return 1;
70888+}
70889+
70890+void
70891+gr_acl_handle_exit(void)
70892+{
70893+ return;
70894+}
70895+
70896+int
70897+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
70898+{
70899+ return 1;
70900+}
70901+
70902+void
70903+gr_set_role_label(const kuid_t uid, const kgid_t gid)
70904+{
70905+ return;
70906+}
70907+
70908+int
70909+gr_acl_handle_procpidmem(const struct task_struct *task)
70910+{
70911+ return 0;
70912+}
70913+
70914+int
70915+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
70916+{
70917+ return 0;
70918+}
70919+
70920+int
70921+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
70922+{
70923+ return 0;
70924+}
70925+
70926+int
70927+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70928+{
70929+ return 0;
70930+}
70931+
70932+int
70933+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70934+{
70935+ return 0;
70936+}
70937+
70938+int gr_acl_enable_at_secure(void)
70939+{
70940+ return 0;
70941+}
70942+
70943+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70944+{
70945+ return dentry->d_sb->s_dev;
70946+}
70947+
70948+void gr_put_exec_file(struct task_struct *task)
70949+{
70950+ return;
70951+}
70952+
70953+#ifdef CONFIG_SECURITY
70954+EXPORT_SYMBOL(gr_check_user_change);
70955+EXPORT_SYMBOL(gr_check_group_change);
70956+#endif
70957diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
70958new file mode 100644
70959index 0000000..387032b
70960--- /dev/null
70961+++ b/grsecurity/grsec_exec.c
70962@@ -0,0 +1,187 @@
70963+#include <linux/kernel.h>
70964+#include <linux/sched.h>
70965+#include <linux/file.h>
70966+#include <linux/binfmts.h>
70967+#include <linux/fs.h>
70968+#include <linux/types.h>
70969+#include <linux/grdefs.h>
70970+#include <linux/grsecurity.h>
70971+#include <linux/grinternal.h>
70972+#include <linux/capability.h>
70973+#include <linux/module.h>
70974+#include <linux/compat.h>
70975+
70976+#include <asm/uaccess.h>
70977+
70978+#ifdef CONFIG_GRKERNSEC_EXECLOG
70979+static char gr_exec_arg_buf[132];
70980+static DEFINE_MUTEX(gr_exec_arg_mutex);
70981+#endif
70982+
70983+struct user_arg_ptr {
70984+#ifdef CONFIG_COMPAT
70985+ bool is_compat;
70986+#endif
70987+ union {
70988+ const char __user *const __user *native;
70989+#ifdef CONFIG_COMPAT
70990+ const compat_uptr_t __user *compat;
70991+#endif
70992+ } ptr;
70993+};
70994+
70995+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
70996+
70997+void
70998+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
70999+{
71000+#ifdef CONFIG_GRKERNSEC_EXECLOG
71001+ char *grarg = gr_exec_arg_buf;
71002+ unsigned int i, x, execlen = 0;
71003+ char c;
71004+
71005+ if (!((grsec_enable_execlog && grsec_enable_group &&
71006+ in_group_p(grsec_audit_gid))
71007+ || (grsec_enable_execlog && !grsec_enable_group)))
71008+ return;
71009+
71010+ mutex_lock(&gr_exec_arg_mutex);
71011+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
71012+
71013+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
71014+ const char __user *p;
71015+ unsigned int len;
71016+
71017+ p = get_user_arg_ptr(argv, i);
71018+ if (IS_ERR(p))
71019+ goto log;
71020+
71021+ len = strnlen_user(p, 128 - execlen);
71022+ if (len > 128 - execlen)
71023+ len = 128 - execlen;
71024+ else if (len > 0)
71025+ len--;
71026+ if (copy_from_user(grarg + execlen, p, len))
71027+ goto log;
71028+
71029+ /* rewrite unprintable characters */
71030+ for (x = 0; x < len; x++) {
71031+ c = *(grarg + execlen + x);
71032+ if (c < 32 || c > 126)
71033+ *(grarg + execlen + x) = ' ';
71034+ }
71035+
71036+ execlen += len;
71037+ *(grarg + execlen) = ' ';
71038+ *(grarg + execlen + 1) = '\0';
71039+ execlen++;
71040+ }
71041+
71042+ log:
71043+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
71044+ bprm->file->f_path.mnt, grarg);
71045+ mutex_unlock(&gr_exec_arg_mutex);
71046+#endif
71047+ return;
71048+}
71049+
71050+#ifdef CONFIG_GRKERNSEC
71051+extern int gr_acl_is_capable(const int cap);
71052+extern int gr_acl_is_capable_nolog(const int cap);
71053+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
71054+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
71055+extern int gr_chroot_is_capable(const int cap);
71056+extern int gr_chroot_is_capable_nolog(const int cap);
71057+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
71058+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
71059+#endif
71060+
71061+const char *captab_log[] = {
71062+ "CAP_CHOWN",
71063+ "CAP_DAC_OVERRIDE",
71064+ "CAP_DAC_READ_SEARCH",
71065+ "CAP_FOWNER",
71066+ "CAP_FSETID",
71067+ "CAP_KILL",
71068+ "CAP_SETGID",
71069+ "CAP_SETUID",
71070+ "CAP_SETPCAP",
71071+ "CAP_LINUX_IMMUTABLE",
71072+ "CAP_NET_BIND_SERVICE",
71073+ "CAP_NET_BROADCAST",
71074+ "CAP_NET_ADMIN",
71075+ "CAP_NET_RAW",
71076+ "CAP_IPC_LOCK",
71077+ "CAP_IPC_OWNER",
71078+ "CAP_SYS_MODULE",
71079+ "CAP_SYS_RAWIO",
71080+ "CAP_SYS_CHROOT",
71081+ "CAP_SYS_PTRACE",
71082+ "CAP_SYS_PACCT",
71083+ "CAP_SYS_ADMIN",
71084+ "CAP_SYS_BOOT",
71085+ "CAP_SYS_NICE",
71086+ "CAP_SYS_RESOURCE",
71087+ "CAP_SYS_TIME",
71088+ "CAP_SYS_TTY_CONFIG",
71089+ "CAP_MKNOD",
71090+ "CAP_LEASE",
71091+ "CAP_AUDIT_WRITE",
71092+ "CAP_AUDIT_CONTROL",
71093+ "CAP_SETFCAP",
71094+ "CAP_MAC_OVERRIDE",
71095+ "CAP_MAC_ADMIN",
71096+ "CAP_SYSLOG",
71097+ "CAP_WAKE_ALARM"
71098+};
71099+
71100+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
71101+
71102+int gr_is_capable(const int cap)
71103+{
71104+#ifdef CONFIG_GRKERNSEC
71105+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
71106+ return 1;
71107+ return 0;
71108+#else
71109+ return 1;
71110+#endif
71111+}
71112+
71113+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
71114+{
71115+#ifdef CONFIG_GRKERNSEC
71116+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
71117+ return 1;
71118+ return 0;
71119+#else
71120+ return 1;
71121+#endif
71122+}
71123+
71124+int gr_is_capable_nolog(const int cap)
71125+{
71126+#ifdef CONFIG_GRKERNSEC
71127+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
71128+ return 1;
71129+ return 0;
71130+#else
71131+ return 1;
71132+#endif
71133+}
71134+
71135+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
71136+{
71137+#ifdef CONFIG_GRKERNSEC
71138+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
71139+ return 1;
71140+ return 0;
71141+#else
71142+ return 1;
71143+#endif
71144+}
71145+
71146+EXPORT_SYMBOL(gr_is_capable);
71147+EXPORT_SYMBOL(gr_is_capable_nolog);
71148+EXPORT_SYMBOL(gr_task_is_capable);
71149+EXPORT_SYMBOL(gr_task_is_capable_nolog);
71150diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
71151new file mode 100644
71152index 0000000..06cc6ea
71153--- /dev/null
71154+++ b/grsecurity/grsec_fifo.c
71155@@ -0,0 +1,24 @@
71156+#include <linux/kernel.h>
71157+#include <linux/sched.h>
71158+#include <linux/fs.h>
71159+#include <linux/file.h>
71160+#include <linux/grinternal.h>
71161+
71162+int
71163+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
71164+ const struct dentry *dir, const int flag, const int acc_mode)
71165+{
71166+#ifdef CONFIG_GRKERNSEC_FIFO
71167+ const struct cred *cred = current_cred();
71168+
71169+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
71170+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
71171+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
71172+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
71173+ if (!inode_permission(dentry->d_inode, acc_mode))
71174+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
71175+ return -EACCES;
71176+ }
71177+#endif
71178+ return 0;
71179+}
71180diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
71181new file mode 100644
71182index 0000000..8ca18bf
71183--- /dev/null
71184+++ b/grsecurity/grsec_fork.c
71185@@ -0,0 +1,23 @@
71186+#include <linux/kernel.h>
71187+#include <linux/sched.h>
71188+#include <linux/grsecurity.h>
71189+#include <linux/grinternal.h>
71190+#include <linux/errno.h>
71191+
71192+void
71193+gr_log_forkfail(const int retval)
71194+{
71195+#ifdef CONFIG_GRKERNSEC_FORKFAIL
71196+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
71197+ switch (retval) {
71198+ case -EAGAIN:
71199+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
71200+ break;
71201+ case -ENOMEM:
71202+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
71203+ break;
71204+ }
71205+ }
71206+#endif
71207+ return;
71208+}
71209diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
71210new file mode 100644
71211index 0000000..a88e901
71212--- /dev/null
71213+++ b/grsecurity/grsec_init.c
71214@@ -0,0 +1,272 @@
71215+#include <linux/kernel.h>
71216+#include <linux/sched.h>
71217+#include <linux/mm.h>
71218+#include <linux/gracl.h>
71219+#include <linux/slab.h>
71220+#include <linux/vmalloc.h>
71221+#include <linux/percpu.h>
71222+#include <linux/module.h>
71223+
71224+int grsec_enable_ptrace_readexec;
71225+int grsec_enable_setxid;
71226+int grsec_enable_symlinkown;
71227+kgid_t grsec_symlinkown_gid;
71228+int grsec_enable_brute;
71229+int grsec_enable_link;
71230+int grsec_enable_dmesg;
71231+int grsec_enable_harden_ptrace;
71232+int grsec_enable_harden_ipc;
71233+int grsec_enable_fifo;
71234+int grsec_enable_execlog;
71235+int grsec_enable_signal;
71236+int grsec_enable_forkfail;
71237+int grsec_enable_audit_ptrace;
71238+int grsec_enable_time;
71239+int grsec_enable_group;
71240+kgid_t grsec_audit_gid;
71241+int grsec_enable_chdir;
71242+int grsec_enable_mount;
71243+int grsec_enable_rofs;
71244+int grsec_deny_new_usb;
71245+int grsec_enable_chroot_findtask;
71246+int grsec_enable_chroot_mount;
71247+int grsec_enable_chroot_shmat;
71248+int grsec_enable_chroot_fchdir;
71249+int grsec_enable_chroot_double;
71250+int grsec_enable_chroot_pivot;
71251+int grsec_enable_chroot_chdir;
71252+int grsec_enable_chroot_chmod;
71253+int grsec_enable_chroot_mknod;
71254+int grsec_enable_chroot_nice;
71255+int grsec_enable_chroot_execlog;
71256+int grsec_enable_chroot_caps;
71257+int grsec_enable_chroot_sysctl;
71258+int grsec_enable_chroot_unix;
71259+int grsec_enable_tpe;
71260+kgid_t grsec_tpe_gid;
71261+int grsec_enable_blackhole;
71262+#ifdef CONFIG_IPV6_MODULE
71263+EXPORT_SYMBOL(grsec_enable_blackhole);
71264+#endif
71265+int grsec_lastack_retries;
71266+int grsec_enable_tpe_all;
71267+int grsec_enable_tpe_invert;
71268+int grsec_enable_socket_all;
71269+kgid_t grsec_socket_all_gid;
71270+int grsec_enable_socket_client;
71271+kgid_t grsec_socket_client_gid;
71272+int grsec_enable_socket_server;
71273+kgid_t grsec_socket_server_gid;
71274+int grsec_resource_logging;
71275+int grsec_disable_privio;
71276+int grsec_enable_log_rwxmaps;
71277+int grsec_lock;
71278+
71279+DEFINE_SPINLOCK(grsec_alert_lock);
71280+unsigned long grsec_alert_wtime = 0;
71281+unsigned long grsec_alert_fyet = 0;
71282+
71283+DEFINE_SPINLOCK(grsec_audit_lock);
71284+
71285+DEFINE_RWLOCK(grsec_exec_file_lock);
71286+
71287+char *gr_shared_page[4];
71288+
71289+char *gr_alert_log_fmt;
71290+char *gr_audit_log_fmt;
71291+char *gr_alert_log_buf;
71292+char *gr_audit_log_buf;
71293+
71294+void __init
71295+grsecurity_init(void)
71296+{
71297+ int j;
71298+ /* create the per-cpu shared pages */
71299+
71300+#ifdef CONFIG_X86
71301+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
71302+#endif
71303+
71304+ for (j = 0; j < 4; j++) {
71305+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
71306+ if (gr_shared_page[j] == NULL) {
71307+ panic("Unable to allocate grsecurity shared page");
71308+ return;
71309+ }
71310+ }
71311+
71312+ /* allocate log buffers */
71313+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
71314+ if (!gr_alert_log_fmt) {
71315+ panic("Unable to allocate grsecurity alert log format buffer");
71316+ return;
71317+ }
71318+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
71319+ if (!gr_audit_log_fmt) {
71320+ panic("Unable to allocate grsecurity audit log format buffer");
71321+ return;
71322+ }
71323+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
71324+ if (!gr_alert_log_buf) {
71325+ panic("Unable to allocate grsecurity alert log buffer");
71326+ return;
71327+ }
71328+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
71329+ if (!gr_audit_log_buf) {
71330+ panic("Unable to allocate grsecurity audit log buffer");
71331+ return;
71332+ }
71333+
71334+#ifdef CONFIG_GRKERNSEC_IO
71335+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
71336+ grsec_disable_privio = 1;
71337+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
71338+ grsec_disable_privio = 1;
71339+#else
71340+ grsec_disable_privio = 0;
71341+#endif
71342+#endif
71343+
71344+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71345+ /* for backward compatibility, tpe_invert always defaults to on if
71346+ enabled in the kernel
71347+ */
71348+ grsec_enable_tpe_invert = 1;
71349+#endif
71350+
71351+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
71352+#ifndef CONFIG_GRKERNSEC_SYSCTL
71353+ grsec_lock = 1;
71354+#endif
71355+
71356+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
71357+ grsec_enable_log_rwxmaps = 1;
71358+#endif
71359+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
71360+ grsec_enable_group = 1;
71361+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
71362+#endif
71363+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
71364+ grsec_enable_ptrace_readexec = 1;
71365+#endif
71366+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71367+ grsec_enable_chdir = 1;
71368+#endif
71369+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71370+ grsec_enable_harden_ptrace = 1;
71371+#endif
71372+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
71373+ grsec_enable_harden_ipc = 1;
71374+#endif
71375+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71376+ grsec_enable_mount = 1;
71377+#endif
71378+#ifdef CONFIG_GRKERNSEC_LINK
71379+ grsec_enable_link = 1;
71380+#endif
71381+#ifdef CONFIG_GRKERNSEC_BRUTE
71382+ grsec_enable_brute = 1;
71383+#endif
71384+#ifdef CONFIG_GRKERNSEC_DMESG
71385+ grsec_enable_dmesg = 1;
71386+#endif
71387+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71388+ grsec_enable_blackhole = 1;
71389+ grsec_lastack_retries = 4;
71390+#endif
71391+#ifdef CONFIG_GRKERNSEC_FIFO
71392+ grsec_enable_fifo = 1;
71393+#endif
71394+#ifdef CONFIG_GRKERNSEC_EXECLOG
71395+ grsec_enable_execlog = 1;
71396+#endif
71397+#ifdef CONFIG_GRKERNSEC_SETXID
71398+ grsec_enable_setxid = 1;
71399+#endif
71400+#ifdef CONFIG_GRKERNSEC_SIGNAL
71401+ grsec_enable_signal = 1;
71402+#endif
71403+#ifdef CONFIG_GRKERNSEC_FORKFAIL
71404+ grsec_enable_forkfail = 1;
71405+#endif
71406+#ifdef CONFIG_GRKERNSEC_TIME
71407+ grsec_enable_time = 1;
71408+#endif
71409+#ifdef CONFIG_GRKERNSEC_RESLOG
71410+ grsec_resource_logging = 1;
71411+#endif
71412+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71413+ grsec_enable_chroot_findtask = 1;
71414+#endif
71415+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
71416+ grsec_enable_chroot_unix = 1;
71417+#endif
71418+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
71419+ grsec_enable_chroot_mount = 1;
71420+#endif
71421+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
71422+ grsec_enable_chroot_fchdir = 1;
71423+#endif
71424+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
71425+ grsec_enable_chroot_shmat = 1;
71426+#endif
71427+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
71428+ grsec_enable_audit_ptrace = 1;
71429+#endif
71430+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
71431+ grsec_enable_chroot_double = 1;
71432+#endif
71433+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
71434+ grsec_enable_chroot_pivot = 1;
71435+#endif
71436+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
71437+ grsec_enable_chroot_chdir = 1;
71438+#endif
71439+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
71440+ grsec_enable_chroot_chmod = 1;
71441+#endif
71442+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
71443+ grsec_enable_chroot_mknod = 1;
71444+#endif
71445+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
71446+ grsec_enable_chroot_nice = 1;
71447+#endif
71448+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
71449+ grsec_enable_chroot_execlog = 1;
71450+#endif
71451+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
71452+ grsec_enable_chroot_caps = 1;
71453+#endif
71454+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
71455+ grsec_enable_chroot_sysctl = 1;
71456+#endif
71457+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
71458+ grsec_enable_symlinkown = 1;
71459+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
71460+#endif
71461+#ifdef CONFIG_GRKERNSEC_TPE
71462+ grsec_enable_tpe = 1;
71463+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
71464+#ifdef CONFIG_GRKERNSEC_TPE_ALL
71465+ grsec_enable_tpe_all = 1;
71466+#endif
71467+#endif
71468+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
71469+ grsec_enable_socket_all = 1;
71470+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
71471+#endif
71472+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
71473+ grsec_enable_socket_client = 1;
71474+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
71475+#endif
71476+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
71477+ grsec_enable_socket_server = 1;
71478+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
71479+#endif
71480+#endif
71481+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
71482+ grsec_deny_new_usb = 1;
71483+#endif
71484+
71485+ return;
71486+}
71487diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
71488new file mode 100644
71489index 0000000..f365de0
71490--- /dev/null
71491+++ b/grsecurity/grsec_ipc.c
71492@@ -0,0 +1,22 @@
71493+#include <linux/kernel.h>
71494+#include <linux/mm.h>
71495+#include <linux/sched.h>
71496+#include <linux/file.h>
71497+#include <linux/ipc.h>
71498+#include <linux/ipc_namespace.h>
71499+#include <linux/grsecurity.h>
71500+#include <linux/grinternal.h>
71501+
71502+int
71503+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
71504+{
71505+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
71506+ int write = (requested_mode & 00002);
71507+
71508+ if (grsec_enable_harden_ipc && !(requested_mode & ~granted_mode & 0007) && !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
71509+ gr_log_str2_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", write ? "writ" : "read", GR_GLOBAL_UID(ipcp->cuid));
71510+ return 0;
71511+ }
71512+#endif
71513+ return 1;
71514+}
71515diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
71516new file mode 100644
71517index 0000000..5e05e20
71518--- /dev/null
71519+++ b/grsecurity/grsec_link.c
71520@@ -0,0 +1,58 @@
71521+#include <linux/kernel.h>
71522+#include <linux/sched.h>
71523+#include <linux/fs.h>
71524+#include <linux/file.h>
71525+#include <linux/grinternal.h>
71526+
71527+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
71528+{
71529+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
71530+ const struct inode *link_inode = link->dentry->d_inode;
71531+
71532+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
71533+ /* ignore root-owned links, e.g. /proc/self */
71534+ gr_is_global_nonroot(link_inode->i_uid) && target &&
71535+ !uid_eq(link_inode->i_uid, target->i_uid)) {
71536+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
71537+ return 1;
71538+ }
71539+#endif
71540+ return 0;
71541+}
71542+
71543+int
71544+gr_handle_follow_link(const struct inode *parent,
71545+ const struct inode *inode,
71546+ const struct dentry *dentry, const struct vfsmount *mnt)
71547+{
71548+#ifdef CONFIG_GRKERNSEC_LINK
71549+ const struct cred *cred = current_cred();
71550+
71551+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
71552+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
71553+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
71554+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
71555+ return -EACCES;
71556+ }
71557+#endif
71558+ return 0;
71559+}
71560+
71561+int
71562+gr_handle_hardlink(const struct dentry *dentry,
71563+ const struct vfsmount *mnt,
71564+ struct inode *inode, const int mode, const struct filename *to)
71565+{
71566+#ifdef CONFIG_GRKERNSEC_LINK
71567+ const struct cred *cred = current_cred();
71568+
71569+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
71570+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
71571+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
71572+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
71573+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
71574+ return -EPERM;
71575+ }
71576+#endif
71577+ return 0;
71578+}
71579diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
71580new file mode 100644
71581index 0000000..dbe0a6b
71582--- /dev/null
71583+++ b/grsecurity/grsec_log.c
71584@@ -0,0 +1,341 @@
71585+#include <linux/kernel.h>
71586+#include <linux/sched.h>
71587+#include <linux/file.h>
71588+#include <linux/tty.h>
71589+#include <linux/fs.h>
71590+#include <linux/mm.h>
71591+#include <linux/grinternal.h>
71592+
71593+#ifdef CONFIG_TREE_PREEMPT_RCU
71594+#define DISABLE_PREEMPT() preempt_disable()
71595+#define ENABLE_PREEMPT() preempt_enable()
71596+#else
71597+#define DISABLE_PREEMPT()
71598+#define ENABLE_PREEMPT()
71599+#endif
71600+
71601+#define BEGIN_LOCKS(x) \
71602+ DISABLE_PREEMPT(); \
71603+ rcu_read_lock(); \
71604+ read_lock(&tasklist_lock); \
71605+ read_lock(&grsec_exec_file_lock); \
71606+ if (x != GR_DO_AUDIT) \
71607+ spin_lock(&grsec_alert_lock); \
71608+ else \
71609+ spin_lock(&grsec_audit_lock)
71610+
71611+#define END_LOCKS(x) \
71612+ if (x != GR_DO_AUDIT) \
71613+ spin_unlock(&grsec_alert_lock); \
71614+ else \
71615+ spin_unlock(&grsec_audit_lock); \
71616+ read_unlock(&grsec_exec_file_lock); \
71617+ read_unlock(&tasklist_lock); \
71618+ rcu_read_unlock(); \
71619+ ENABLE_PREEMPT(); \
71620+ if (x == GR_DONT_AUDIT) \
71621+ gr_handle_alertkill(current)
71622+
71623+enum {
71624+ FLOODING,
71625+ NO_FLOODING
71626+};
71627+
71628+extern char *gr_alert_log_fmt;
71629+extern char *gr_audit_log_fmt;
71630+extern char *gr_alert_log_buf;
71631+extern char *gr_audit_log_buf;
71632+
71633+static int gr_log_start(int audit)
71634+{
71635+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
71636+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
71637+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71638+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
71639+ unsigned long curr_secs = get_seconds();
71640+
71641+ if (audit == GR_DO_AUDIT)
71642+ goto set_fmt;
71643+
71644+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
71645+ grsec_alert_wtime = curr_secs;
71646+ grsec_alert_fyet = 0;
71647+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
71648+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
71649+ grsec_alert_fyet++;
71650+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
71651+ grsec_alert_wtime = curr_secs;
71652+ grsec_alert_fyet++;
71653+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
71654+ return FLOODING;
71655+ }
71656+ else return FLOODING;
71657+
71658+set_fmt:
71659+#endif
71660+ memset(buf, 0, PAGE_SIZE);
71661+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
71662+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
71663+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
71664+ } else if (current->signal->curr_ip) {
71665+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
71666+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
71667+ } else if (gr_acl_is_enabled()) {
71668+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
71669+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
71670+ } else {
71671+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
71672+ strcpy(buf, fmt);
71673+ }
71674+
71675+ return NO_FLOODING;
71676+}
71677+
71678+static void gr_log_middle(int audit, const char *msg, va_list ap)
71679+ __attribute__ ((format (printf, 2, 0)));
71680+
71681+static void gr_log_middle(int audit, const char *msg, va_list ap)
71682+{
71683+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71684+ unsigned int len = strlen(buf);
71685+
71686+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
71687+
71688+ return;
71689+}
71690+
71691+static void gr_log_middle_varargs(int audit, const char *msg, ...)
71692+ __attribute__ ((format (printf, 2, 3)));
71693+
71694+static void gr_log_middle_varargs(int audit, const char *msg, ...)
71695+{
71696+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71697+ unsigned int len = strlen(buf);
71698+ va_list ap;
71699+
71700+ va_start(ap, msg);
71701+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
71702+ va_end(ap);
71703+
71704+ return;
71705+}
71706+
71707+static void gr_log_end(int audit, int append_default)
71708+{
71709+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71710+ if (append_default) {
71711+ struct task_struct *task = current;
71712+ struct task_struct *parent = task->real_parent;
71713+ const struct cred *cred = __task_cred(task);
71714+ const struct cred *pcred = __task_cred(parent);
71715+ unsigned int len = strlen(buf);
71716+
71717+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71718+ }
71719+
71720+ printk("%s\n", buf);
71721+
71722+ return;
71723+}
71724+
71725+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
71726+{
71727+ int logtype;
71728+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
71729+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
71730+ void *voidptr = NULL;
71731+ int num1 = 0, num2 = 0;
71732+ unsigned long ulong1 = 0, ulong2 = 0;
71733+ struct dentry *dentry = NULL;
71734+ struct vfsmount *mnt = NULL;
71735+ struct file *file = NULL;
71736+ struct task_struct *task = NULL;
71737+ struct vm_area_struct *vma = NULL;
71738+ const struct cred *cred, *pcred;
71739+ va_list ap;
71740+
71741+ BEGIN_LOCKS(audit);
71742+ logtype = gr_log_start(audit);
71743+ if (logtype == FLOODING) {
71744+ END_LOCKS(audit);
71745+ return;
71746+ }
71747+ va_start(ap, argtypes);
71748+ switch (argtypes) {
71749+ case GR_TTYSNIFF:
71750+ task = va_arg(ap, struct task_struct *);
71751+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
71752+ break;
71753+ case GR_SYSCTL_HIDDEN:
71754+ str1 = va_arg(ap, char *);
71755+ gr_log_middle_varargs(audit, msg, result, str1);
71756+ break;
71757+ case GR_RBAC:
71758+ dentry = va_arg(ap, struct dentry *);
71759+ mnt = va_arg(ap, struct vfsmount *);
71760+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
71761+ break;
71762+ case GR_RBAC_STR:
71763+ dentry = va_arg(ap, struct dentry *);
71764+ mnt = va_arg(ap, struct vfsmount *);
71765+ str1 = va_arg(ap, char *);
71766+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
71767+ break;
71768+ case GR_STR_RBAC:
71769+ str1 = va_arg(ap, char *);
71770+ dentry = va_arg(ap, struct dentry *);
71771+ mnt = va_arg(ap, struct vfsmount *);
71772+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
71773+ break;
71774+ case GR_RBAC_MODE2:
71775+ dentry = va_arg(ap, struct dentry *);
71776+ mnt = va_arg(ap, struct vfsmount *);
71777+ str1 = va_arg(ap, char *);
71778+ str2 = va_arg(ap, char *);
71779+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
71780+ break;
71781+ case GR_RBAC_MODE3:
71782+ dentry = va_arg(ap, struct dentry *);
71783+ mnt = va_arg(ap, struct vfsmount *);
71784+ str1 = va_arg(ap, char *);
71785+ str2 = va_arg(ap, char *);
71786+ str3 = va_arg(ap, char *);
71787+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
71788+ break;
71789+ case GR_FILENAME:
71790+ dentry = va_arg(ap, struct dentry *);
71791+ mnt = va_arg(ap, struct vfsmount *);
71792+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
71793+ break;
71794+ case GR_STR_FILENAME:
71795+ str1 = va_arg(ap, char *);
71796+ dentry = va_arg(ap, struct dentry *);
71797+ mnt = va_arg(ap, struct vfsmount *);
71798+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
71799+ break;
71800+ case GR_FILENAME_STR:
71801+ dentry = va_arg(ap, struct dentry *);
71802+ mnt = va_arg(ap, struct vfsmount *);
71803+ str1 = va_arg(ap, char *);
71804+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
71805+ break;
71806+ case GR_FILENAME_TWO_INT:
71807+ dentry = va_arg(ap, struct dentry *);
71808+ mnt = va_arg(ap, struct vfsmount *);
71809+ num1 = va_arg(ap, int);
71810+ num2 = va_arg(ap, int);
71811+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
71812+ break;
71813+ case GR_FILENAME_TWO_INT_STR:
71814+ dentry = va_arg(ap, struct dentry *);
71815+ mnt = va_arg(ap, struct vfsmount *);
71816+ num1 = va_arg(ap, int);
71817+ num2 = va_arg(ap, int);
71818+ str1 = va_arg(ap, char *);
71819+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
71820+ break;
71821+ case GR_TEXTREL:
71822+ file = va_arg(ap, struct file *);
71823+ ulong1 = va_arg(ap, unsigned long);
71824+ ulong2 = va_arg(ap, unsigned long);
71825+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
71826+ break;
71827+ case GR_PTRACE:
71828+ task = va_arg(ap, struct task_struct *);
71829+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
71830+ break;
71831+ case GR_RESOURCE:
71832+ task = va_arg(ap, struct task_struct *);
71833+ cred = __task_cred(task);
71834+ pcred = __task_cred(task->real_parent);
71835+ ulong1 = va_arg(ap, unsigned long);
71836+ str1 = va_arg(ap, char *);
71837+ ulong2 = va_arg(ap, unsigned long);
71838+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71839+ break;
71840+ case GR_CAP:
71841+ task = va_arg(ap, struct task_struct *);
71842+ cred = __task_cred(task);
71843+ pcred = __task_cred(task->real_parent);
71844+ str1 = va_arg(ap, char *);
71845+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71846+ break;
71847+ case GR_SIG:
71848+ str1 = va_arg(ap, char *);
71849+ voidptr = va_arg(ap, void *);
71850+ gr_log_middle_varargs(audit, msg, str1, voidptr);
71851+ break;
71852+ case GR_SIG2:
71853+ task = va_arg(ap, struct task_struct *);
71854+ cred = __task_cred(task);
71855+ pcred = __task_cred(task->real_parent);
71856+ num1 = va_arg(ap, int);
71857+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71858+ break;
71859+ case GR_CRASH1:
71860+ task = va_arg(ap, struct task_struct *);
71861+ cred = __task_cred(task);
71862+ pcred = __task_cred(task->real_parent);
71863+ ulong1 = va_arg(ap, unsigned long);
71864+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
71865+ break;
71866+ case GR_CRASH2:
71867+ task = va_arg(ap, struct task_struct *);
71868+ cred = __task_cred(task);
71869+ pcred = __task_cred(task->real_parent);
71870+ ulong1 = va_arg(ap, unsigned long);
71871+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
71872+ break;
71873+ case GR_RWXMAP:
71874+ file = va_arg(ap, struct file *);
71875+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
71876+ break;
71877+ case GR_RWXMAPVMA:
71878+ vma = va_arg(ap, struct vm_area_struct *);
71879+ if (vma->vm_file)
71880+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
71881+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
71882+ str1 = "<stack>";
71883+ else if (vma->vm_start <= current->mm->brk &&
71884+ vma->vm_end >= current->mm->start_brk)
71885+ str1 = "<heap>";
71886+ else
71887+ str1 = "<anonymous mapping>";
71888+ gr_log_middle_varargs(audit, msg, str1);
71889+ break;
71890+ case GR_PSACCT:
71891+ {
71892+ unsigned int wday, cday;
71893+ __u8 whr, chr;
71894+ __u8 wmin, cmin;
71895+ __u8 wsec, csec;
71896+ char cur_tty[64] = { 0 };
71897+ char parent_tty[64] = { 0 };
71898+
71899+ task = va_arg(ap, struct task_struct *);
71900+ wday = va_arg(ap, unsigned int);
71901+ cday = va_arg(ap, unsigned int);
71902+ whr = va_arg(ap, int);
71903+ chr = va_arg(ap, int);
71904+ wmin = va_arg(ap, int);
71905+ cmin = va_arg(ap, int);
71906+ wsec = va_arg(ap, int);
71907+ csec = va_arg(ap, int);
71908+ ulong1 = va_arg(ap, unsigned long);
71909+ cred = __task_cred(task);
71910+ pcred = __task_cred(task->real_parent);
71911+
71912+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71913+ }
71914+ break;
71915+ default:
71916+ gr_log_middle(audit, msg, ap);
71917+ }
71918+ va_end(ap);
71919+ // these don't need DEFAULTSECARGS printed on the end
71920+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
71921+ gr_log_end(audit, 0);
71922+ else
71923+ gr_log_end(audit, 1);
71924+ END_LOCKS(audit);
71925+}
71926diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
71927new file mode 100644
71928index 0000000..f536303
71929--- /dev/null
71930+++ b/grsecurity/grsec_mem.c
71931@@ -0,0 +1,40 @@
71932+#include <linux/kernel.h>
71933+#include <linux/sched.h>
71934+#include <linux/mm.h>
71935+#include <linux/mman.h>
71936+#include <linux/grinternal.h>
71937+
71938+void
71939+gr_handle_ioperm(void)
71940+{
71941+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
71942+ return;
71943+}
71944+
71945+void
71946+gr_handle_iopl(void)
71947+{
71948+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
71949+ return;
71950+}
71951+
71952+void
71953+gr_handle_mem_readwrite(u64 from, u64 to)
71954+{
71955+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
71956+ return;
71957+}
71958+
71959+void
71960+gr_handle_vm86(void)
71961+{
71962+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
71963+ return;
71964+}
71965+
71966+void
71967+gr_log_badprocpid(const char *entry)
71968+{
71969+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
71970+ return;
71971+}
71972diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
71973new file mode 100644
71974index 0000000..2131422
71975--- /dev/null
71976+++ b/grsecurity/grsec_mount.c
71977@@ -0,0 +1,62 @@
71978+#include <linux/kernel.h>
71979+#include <linux/sched.h>
71980+#include <linux/mount.h>
71981+#include <linux/grsecurity.h>
71982+#include <linux/grinternal.h>
71983+
71984+void
71985+gr_log_remount(const char *devname, const int retval)
71986+{
71987+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71988+ if (grsec_enable_mount && (retval >= 0))
71989+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
71990+#endif
71991+ return;
71992+}
71993+
71994+void
71995+gr_log_unmount(const char *devname, const int retval)
71996+{
71997+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71998+ if (grsec_enable_mount && (retval >= 0))
71999+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
72000+#endif
72001+ return;
72002+}
72003+
72004+void
72005+gr_log_mount(const char *from, const char *to, const int retval)
72006+{
72007+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
72008+ if (grsec_enable_mount && (retval >= 0))
72009+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
72010+#endif
72011+ return;
72012+}
72013+
72014+int
72015+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
72016+{
72017+#ifdef CONFIG_GRKERNSEC_ROFS
72018+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
72019+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
72020+ return -EPERM;
72021+ } else
72022+ return 0;
72023+#endif
72024+ return 0;
72025+}
72026+
72027+int
72028+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
72029+{
72030+#ifdef CONFIG_GRKERNSEC_ROFS
72031+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
72032+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
72033+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
72034+ return -EPERM;
72035+ } else
72036+ return 0;
72037+#endif
72038+ return 0;
72039+}
72040diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
72041new file mode 100644
72042index 0000000..6ee9d50
72043--- /dev/null
72044+++ b/grsecurity/grsec_pax.c
72045@@ -0,0 +1,45 @@
72046+#include <linux/kernel.h>
72047+#include <linux/sched.h>
72048+#include <linux/mm.h>
72049+#include <linux/file.h>
72050+#include <linux/grinternal.h>
72051+#include <linux/grsecurity.h>
72052+
72053+void
72054+gr_log_textrel(struct vm_area_struct * vma)
72055+{
72056+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72057+ if (grsec_enable_log_rwxmaps)
72058+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
72059+#endif
72060+ return;
72061+}
72062+
72063+void gr_log_ptgnustack(struct file *file)
72064+{
72065+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72066+ if (grsec_enable_log_rwxmaps)
72067+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
72068+#endif
72069+ return;
72070+}
72071+
72072+void
72073+gr_log_rwxmmap(struct file *file)
72074+{
72075+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72076+ if (grsec_enable_log_rwxmaps)
72077+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
72078+#endif
72079+ return;
72080+}
72081+
72082+void
72083+gr_log_rwxmprotect(struct vm_area_struct *vma)
72084+{
72085+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72086+ if (grsec_enable_log_rwxmaps)
72087+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
72088+#endif
72089+ return;
72090+}
72091diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
72092new file mode 100644
72093index 0000000..f7f29aa
72094--- /dev/null
72095+++ b/grsecurity/grsec_ptrace.c
72096@@ -0,0 +1,30 @@
72097+#include <linux/kernel.h>
72098+#include <linux/sched.h>
72099+#include <linux/grinternal.h>
72100+#include <linux/security.h>
72101+
72102+void
72103+gr_audit_ptrace(struct task_struct *task)
72104+{
72105+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
72106+ if (grsec_enable_audit_ptrace)
72107+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
72108+#endif
72109+ return;
72110+}
72111+
72112+int
72113+gr_ptrace_readexec(struct file *file, int unsafe_flags)
72114+{
72115+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
72116+ const struct dentry *dentry = file->f_path.dentry;
72117+ const struct vfsmount *mnt = file->f_path.mnt;
72118+
72119+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
72120+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
72121+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
72122+ return -EACCES;
72123+ }
72124+#endif
72125+ return 0;
72126+}
72127diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
72128new file mode 100644
72129index 0000000..3860c7e
72130--- /dev/null
72131+++ b/grsecurity/grsec_sig.c
72132@@ -0,0 +1,236 @@
72133+#include <linux/kernel.h>
72134+#include <linux/sched.h>
72135+#include <linux/fs.h>
72136+#include <linux/delay.h>
72137+#include <linux/grsecurity.h>
72138+#include <linux/grinternal.h>
72139+#include <linux/hardirq.h>
72140+
72141+char *signames[] = {
72142+ [SIGSEGV] = "Segmentation fault",
72143+ [SIGILL] = "Illegal instruction",
72144+ [SIGABRT] = "Abort",
72145+ [SIGBUS] = "Invalid alignment/Bus error"
72146+};
72147+
72148+void
72149+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
72150+{
72151+#ifdef CONFIG_GRKERNSEC_SIGNAL
72152+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
72153+ (sig == SIGABRT) || (sig == SIGBUS))) {
72154+ if (task_pid_nr(t) == task_pid_nr(current)) {
72155+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
72156+ } else {
72157+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
72158+ }
72159+ }
72160+#endif
72161+ return;
72162+}
72163+
72164+int
72165+gr_handle_signal(const struct task_struct *p, const int sig)
72166+{
72167+#ifdef CONFIG_GRKERNSEC
72168+ /* ignore the 0 signal for protected task checks */
72169+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
72170+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
72171+ return -EPERM;
72172+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
72173+ return -EPERM;
72174+ }
72175+#endif
72176+ return 0;
72177+}
72178+
72179+#ifdef CONFIG_GRKERNSEC
72180+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
72181+
72182+int gr_fake_force_sig(int sig, struct task_struct *t)
72183+{
72184+ unsigned long int flags;
72185+ int ret, blocked, ignored;
72186+ struct k_sigaction *action;
72187+
72188+ spin_lock_irqsave(&t->sighand->siglock, flags);
72189+ action = &t->sighand->action[sig-1];
72190+ ignored = action->sa.sa_handler == SIG_IGN;
72191+ blocked = sigismember(&t->blocked, sig);
72192+ if (blocked || ignored) {
72193+ action->sa.sa_handler = SIG_DFL;
72194+ if (blocked) {
72195+ sigdelset(&t->blocked, sig);
72196+ recalc_sigpending_and_wake(t);
72197+ }
72198+ }
72199+ if (action->sa.sa_handler == SIG_DFL)
72200+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
72201+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
72202+
72203+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
72204+
72205+ return ret;
72206+}
72207+#endif
72208+
72209+#define GR_USER_BAN_TIME (15 * 60)
72210+#define GR_DAEMON_BRUTE_TIME (30 * 60)
72211+
72212+void gr_handle_brute_attach(int dumpable)
72213+{
72214+#ifdef CONFIG_GRKERNSEC_BRUTE
72215+ struct task_struct *p = current;
72216+ kuid_t uid = GLOBAL_ROOT_UID;
72217+ int daemon = 0;
72218+
72219+ if (!grsec_enable_brute)
72220+ return;
72221+
72222+ rcu_read_lock();
72223+ read_lock(&tasklist_lock);
72224+ read_lock(&grsec_exec_file_lock);
72225+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
72226+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
72227+ p->real_parent->brute = 1;
72228+ daemon = 1;
72229+ } else {
72230+ const struct cred *cred = __task_cred(p), *cred2;
72231+ struct task_struct *tsk, *tsk2;
72232+
72233+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
72234+ struct user_struct *user;
72235+
72236+ uid = cred->uid;
72237+
72238+ /* this is put upon execution past expiration */
72239+ user = find_user(uid);
72240+ if (user == NULL)
72241+ goto unlock;
72242+ user->suid_banned = 1;
72243+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
72244+ if (user->suid_ban_expires == ~0UL)
72245+ user->suid_ban_expires--;
72246+
72247+ /* only kill other threads of the same binary, from the same user */
72248+ do_each_thread(tsk2, tsk) {
72249+ cred2 = __task_cred(tsk);
72250+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
72251+ gr_fake_force_sig(SIGKILL, tsk);
72252+ } while_each_thread(tsk2, tsk);
72253+ }
72254+ }
72255+unlock:
72256+ read_unlock(&grsec_exec_file_lock);
72257+ read_unlock(&tasklist_lock);
72258+ rcu_read_unlock();
72259+
72260+ if (gr_is_global_nonroot(uid))
72261+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
72262+ else if (daemon)
72263+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
72264+
72265+#endif
72266+ return;
72267+}
72268+
72269+void gr_handle_brute_check(void)
72270+{
72271+#ifdef CONFIG_GRKERNSEC_BRUTE
72272+ struct task_struct *p = current;
72273+
72274+ if (unlikely(p->brute)) {
72275+ if (!grsec_enable_brute)
72276+ p->brute = 0;
72277+ else if (time_before(get_seconds(), p->brute_expires))
72278+ msleep(30 * 1000);
72279+ }
72280+#endif
72281+ return;
72282+}
72283+
72284+void gr_handle_kernel_exploit(void)
72285+{
72286+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
72287+ const struct cred *cred;
72288+ struct task_struct *tsk, *tsk2;
72289+ struct user_struct *user;
72290+ kuid_t uid;
72291+
72292+ if (in_irq() || in_serving_softirq() || in_nmi())
72293+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
72294+
72295+ uid = current_uid();
72296+
72297+ if (gr_is_global_root(uid))
72298+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
72299+ else {
72300+ /* kill all the processes of this user, hold a reference
72301+ to their creds struct, and prevent them from creating
72302+ another process until system reset
72303+ */
72304+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
72305+ GR_GLOBAL_UID(uid));
72306+ /* we intentionally leak this ref */
72307+ user = get_uid(current->cred->user);
72308+ if (user)
72309+ user->kernel_banned = 1;
72310+
72311+ /* kill all processes of this user */
72312+ read_lock(&tasklist_lock);
72313+ do_each_thread(tsk2, tsk) {
72314+ cred = __task_cred(tsk);
72315+ if (uid_eq(cred->uid, uid))
72316+ gr_fake_force_sig(SIGKILL, tsk);
72317+ } while_each_thread(tsk2, tsk);
72318+ read_unlock(&tasklist_lock);
72319+ }
72320+#endif
72321+}
72322+
72323+#ifdef CONFIG_GRKERNSEC_BRUTE
72324+static bool suid_ban_expired(struct user_struct *user)
72325+{
72326+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
72327+ user->suid_banned = 0;
72328+ user->suid_ban_expires = 0;
72329+ free_uid(user);
72330+ return true;
72331+ }
72332+
72333+ return false;
72334+}
72335+#endif
72336+
72337+int gr_process_kernel_exec_ban(void)
72338+{
72339+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
72340+ if (unlikely(current->cred->user->kernel_banned))
72341+ return -EPERM;
72342+#endif
72343+ return 0;
72344+}
72345+
72346+int gr_process_kernel_setuid_ban(struct user_struct *user)
72347+{
72348+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
72349+ if (unlikely(user->kernel_banned))
72350+ gr_fake_force_sig(SIGKILL, current);
72351+#endif
72352+ return 0;
72353+}
72354+
72355+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
72356+{
72357+#ifdef CONFIG_GRKERNSEC_BRUTE
72358+ struct user_struct *user = current->cred->user;
72359+ if (unlikely(user->suid_banned)) {
72360+ if (suid_ban_expired(user))
72361+ return 0;
72362+ /* disallow execution of suid binaries only */
72363+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
72364+ return -EPERM;
72365+ }
72366+#endif
72367+ return 0;
72368+}
72369diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
72370new file mode 100644
72371index 0000000..4030d57
72372--- /dev/null
72373+++ b/grsecurity/grsec_sock.c
72374@@ -0,0 +1,244 @@
72375+#include <linux/kernel.h>
72376+#include <linux/module.h>
72377+#include <linux/sched.h>
72378+#include <linux/file.h>
72379+#include <linux/net.h>
72380+#include <linux/in.h>
72381+#include <linux/ip.h>
72382+#include <net/sock.h>
72383+#include <net/inet_sock.h>
72384+#include <linux/grsecurity.h>
72385+#include <linux/grinternal.h>
72386+#include <linux/gracl.h>
72387+
72388+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
72389+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
72390+
72391+EXPORT_SYMBOL(gr_search_udp_recvmsg);
72392+EXPORT_SYMBOL(gr_search_udp_sendmsg);
72393+
72394+#ifdef CONFIG_UNIX_MODULE
72395+EXPORT_SYMBOL(gr_acl_handle_unix);
72396+EXPORT_SYMBOL(gr_acl_handle_mknod);
72397+EXPORT_SYMBOL(gr_handle_chroot_unix);
72398+EXPORT_SYMBOL(gr_handle_create);
72399+#endif
72400+
72401+#ifdef CONFIG_GRKERNSEC
72402+#define gr_conn_table_size 32749
72403+struct conn_table_entry {
72404+ struct conn_table_entry *next;
72405+ struct signal_struct *sig;
72406+};
72407+
72408+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
72409+DEFINE_SPINLOCK(gr_conn_table_lock);
72410+
72411+extern const char * gr_socktype_to_name(unsigned char type);
72412+extern const char * gr_proto_to_name(unsigned char proto);
72413+extern const char * gr_sockfamily_to_name(unsigned char family);
72414+
72415+static __inline__ int
72416+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
72417+{
72418+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
72419+}
72420+
72421+static __inline__ int
72422+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
72423+ __u16 sport, __u16 dport)
72424+{
72425+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
72426+ sig->gr_sport == sport && sig->gr_dport == dport))
72427+ return 1;
72428+ else
72429+ return 0;
72430+}
72431+
72432+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
72433+{
72434+ struct conn_table_entry **match;
72435+ unsigned int index;
72436+
72437+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
72438+ sig->gr_sport, sig->gr_dport,
72439+ gr_conn_table_size);
72440+
72441+ newent->sig = sig;
72442+
72443+ match = &gr_conn_table[index];
72444+ newent->next = *match;
72445+ *match = newent;
72446+
72447+ return;
72448+}
72449+
72450+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
72451+{
72452+ struct conn_table_entry *match, *last = NULL;
72453+ unsigned int index;
72454+
72455+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
72456+ sig->gr_sport, sig->gr_dport,
72457+ gr_conn_table_size);
72458+
72459+ match = gr_conn_table[index];
72460+ while (match && !conn_match(match->sig,
72461+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
72462+ sig->gr_dport)) {
72463+ last = match;
72464+ match = match->next;
72465+ }
72466+
72467+ if (match) {
72468+ if (last)
72469+ last->next = match->next;
72470+ else
72471+ gr_conn_table[index] = NULL;
72472+ kfree(match);
72473+ }
72474+
72475+ return;
72476+}
72477+
72478+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
72479+ __u16 sport, __u16 dport)
72480+{
72481+ struct conn_table_entry *match;
72482+ unsigned int index;
72483+
72484+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
72485+
72486+ match = gr_conn_table[index];
72487+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
72488+ match = match->next;
72489+
72490+ if (match)
72491+ return match->sig;
72492+ else
72493+ return NULL;
72494+}
72495+
72496+#endif
72497+
72498+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
72499+{
72500+#ifdef CONFIG_GRKERNSEC
72501+ struct signal_struct *sig = task->signal;
72502+ struct conn_table_entry *newent;
72503+
72504+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
72505+ if (newent == NULL)
72506+ return;
72507+ /* no bh lock needed since we are called with bh disabled */
72508+ spin_lock(&gr_conn_table_lock);
72509+ gr_del_task_from_ip_table_nolock(sig);
72510+ sig->gr_saddr = inet->inet_rcv_saddr;
72511+ sig->gr_daddr = inet->inet_daddr;
72512+ sig->gr_sport = inet->inet_sport;
72513+ sig->gr_dport = inet->inet_dport;
72514+ gr_add_to_task_ip_table_nolock(sig, newent);
72515+ spin_unlock(&gr_conn_table_lock);
72516+#endif
72517+ return;
72518+}
72519+
72520+void gr_del_task_from_ip_table(struct task_struct *task)
72521+{
72522+#ifdef CONFIG_GRKERNSEC
72523+ spin_lock_bh(&gr_conn_table_lock);
72524+ gr_del_task_from_ip_table_nolock(task->signal);
72525+ spin_unlock_bh(&gr_conn_table_lock);
72526+#endif
72527+ return;
72528+}
72529+
72530+void
72531+gr_attach_curr_ip(const struct sock *sk)
72532+{
72533+#ifdef CONFIG_GRKERNSEC
72534+ struct signal_struct *p, *set;
72535+ const struct inet_sock *inet = inet_sk(sk);
72536+
72537+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
72538+ return;
72539+
72540+ set = current->signal;
72541+
72542+ spin_lock_bh(&gr_conn_table_lock);
72543+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
72544+ inet->inet_dport, inet->inet_sport);
72545+ if (unlikely(p != NULL)) {
72546+ set->curr_ip = p->curr_ip;
72547+ set->used_accept = 1;
72548+ gr_del_task_from_ip_table_nolock(p);
72549+ spin_unlock_bh(&gr_conn_table_lock);
72550+ return;
72551+ }
72552+ spin_unlock_bh(&gr_conn_table_lock);
72553+
72554+ set->curr_ip = inet->inet_daddr;
72555+ set->used_accept = 1;
72556+#endif
72557+ return;
72558+}
72559+
72560+int
72561+gr_handle_sock_all(const int family, const int type, const int protocol)
72562+{
72563+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
72564+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
72565+ (family != AF_UNIX)) {
72566+ if (family == AF_INET)
72567+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
72568+ else
72569+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
72570+ return -EACCES;
72571+ }
72572+#endif
72573+ return 0;
72574+}
72575+
72576+int
72577+gr_handle_sock_server(const struct sockaddr *sck)
72578+{
72579+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
72580+ if (grsec_enable_socket_server &&
72581+ in_group_p(grsec_socket_server_gid) &&
72582+ sck && (sck->sa_family != AF_UNIX) &&
72583+ (sck->sa_family != AF_LOCAL)) {
72584+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
72585+ return -EACCES;
72586+ }
72587+#endif
72588+ return 0;
72589+}
72590+
72591+int
72592+gr_handle_sock_server_other(const struct sock *sck)
72593+{
72594+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
72595+ if (grsec_enable_socket_server &&
72596+ in_group_p(grsec_socket_server_gid) &&
72597+ sck && (sck->sk_family != AF_UNIX) &&
72598+ (sck->sk_family != AF_LOCAL)) {
72599+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
72600+ return -EACCES;
72601+ }
72602+#endif
72603+ return 0;
72604+}
72605+
72606+int
72607+gr_handle_sock_client(const struct sockaddr *sck)
72608+{
72609+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
72610+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
72611+ sck && (sck->sa_family != AF_UNIX) &&
72612+ (sck->sa_family != AF_LOCAL)) {
72613+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
72614+ return -EACCES;
72615+ }
72616+#endif
72617+ return 0;
72618+}
72619diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
72620new file mode 100644
72621index 0000000..8159888
72622--- /dev/null
72623+++ b/grsecurity/grsec_sysctl.c
72624@@ -0,0 +1,479 @@
72625+#include <linux/kernel.h>
72626+#include <linux/sched.h>
72627+#include <linux/sysctl.h>
72628+#include <linux/grsecurity.h>
72629+#include <linux/grinternal.h>
72630+
72631+int
72632+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
72633+{
72634+#ifdef CONFIG_GRKERNSEC_SYSCTL
72635+ if (dirname == NULL || name == NULL)
72636+ return 0;
72637+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
72638+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
72639+ return -EACCES;
72640+ }
72641+#endif
72642+ return 0;
72643+}
72644+
72645+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
72646+static int __maybe_unused __read_only one = 1;
72647+#endif
72648+
72649+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
72650+ defined(CONFIG_GRKERNSEC_DENYUSB)
72651+struct ctl_table grsecurity_table[] = {
72652+#ifdef CONFIG_GRKERNSEC_SYSCTL
72653+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
72654+#ifdef CONFIG_GRKERNSEC_IO
72655+ {
72656+ .procname = "disable_priv_io",
72657+ .data = &grsec_disable_privio,
72658+ .maxlen = sizeof(int),
72659+ .mode = 0600,
72660+ .proc_handler = &proc_dointvec,
72661+ },
72662+#endif
72663+#endif
72664+#ifdef CONFIG_GRKERNSEC_LINK
72665+ {
72666+ .procname = "linking_restrictions",
72667+ .data = &grsec_enable_link,
72668+ .maxlen = sizeof(int),
72669+ .mode = 0600,
72670+ .proc_handler = &proc_dointvec,
72671+ },
72672+#endif
72673+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
72674+ {
72675+ .procname = "enforce_symlinksifowner",
72676+ .data = &grsec_enable_symlinkown,
72677+ .maxlen = sizeof(int),
72678+ .mode = 0600,
72679+ .proc_handler = &proc_dointvec,
72680+ },
72681+ {
72682+ .procname = "symlinkown_gid",
72683+ .data = &grsec_symlinkown_gid,
72684+ .maxlen = sizeof(int),
72685+ .mode = 0600,
72686+ .proc_handler = &proc_dointvec,
72687+ },
72688+#endif
72689+#ifdef CONFIG_GRKERNSEC_BRUTE
72690+ {
72691+ .procname = "deter_bruteforce",
72692+ .data = &grsec_enable_brute,
72693+ .maxlen = sizeof(int),
72694+ .mode = 0600,
72695+ .proc_handler = &proc_dointvec,
72696+ },
72697+#endif
72698+#ifdef CONFIG_GRKERNSEC_FIFO
72699+ {
72700+ .procname = "fifo_restrictions",
72701+ .data = &grsec_enable_fifo,
72702+ .maxlen = sizeof(int),
72703+ .mode = 0600,
72704+ .proc_handler = &proc_dointvec,
72705+ },
72706+#endif
72707+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
72708+ {
72709+ .procname = "ptrace_readexec",
72710+ .data = &grsec_enable_ptrace_readexec,
72711+ .maxlen = sizeof(int),
72712+ .mode = 0600,
72713+ .proc_handler = &proc_dointvec,
72714+ },
72715+#endif
72716+#ifdef CONFIG_GRKERNSEC_SETXID
72717+ {
72718+ .procname = "consistent_setxid",
72719+ .data = &grsec_enable_setxid,
72720+ .maxlen = sizeof(int),
72721+ .mode = 0600,
72722+ .proc_handler = &proc_dointvec,
72723+ },
72724+#endif
72725+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72726+ {
72727+ .procname = "ip_blackhole",
72728+ .data = &grsec_enable_blackhole,
72729+ .maxlen = sizeof(int),
72730+ .mode = 0600,
72731+ .proc_handler = &proc_dointvec,
72732+ },
72733+ {
72734+ .procname = "lastack_retries",
72735+ .data = &grsec_lastack_retries,
72736+ .maxlen = sizeof(int),
72737+ .mode = 0600,
72738+ .proc_handler = &proc_dointvec,
72739+ },
72740+#endif
72741+#ifdef CONFIG_GRKERNSEC_EXECLOG
72742+ {
72743+ .procname = "exec_logging",
72744+ .data = &grsec_enable_execlog,
72745+ .maxlen = sizeof(int),
72746+ .mode = 0600,
72747+ .proc_handler = &proc_dointvec,
72748+ },
72749+#endif
72750+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72751+ {
72752+ .procname = "rwxmap_logging",
72753+ .data = &grsec_enable_log_rwxmaps,
72754+ .maxlen = sizeof(int),
72755+ .mode = 0600,
72756+ .proc_handler = &proc_dointvec,
72757+ },
72758+#endif
72759+#ifdef CONFIG_GRKERNSEC_SIGNAL
72760+ {
72761+ .procname = "signal_logging",
72762+ .data = &grsec_enable_signal,
72763+ .maxlen = sizeof(int),
72764+ .mode = 0600,
72765+ .proc_handler = &proc_dointvec,
72766+ },
72767+#endif
72768+#ifdef CONFIG_GRKERNSEC_FORKFAIL
72769+ {
72770+ .procname = "forkfail_logging",
72771+ .data = &grsec_enable_forkfail,
72772+ .maxlen = sizeof(int),
72773+ .mode = 0600,
72774+ .proc_handler = &proc_dointvec,
72775+ },
72776+#endif
72777+#ifdef CONFIG_GRKERNSEC_TIME
72778+ {
72779+ .procname = "timechange_logging",
72780+ .data = &grsec_enable_time,
72781+ .maxlen = sizeof(int),
72782+ .mode = 0600,
72783+ .proc_handler = &proc_dointvec,
72784+ },
72785+#endif
72786+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
72787+ {
72788+ .procname = "chroot_deny_shmat",
72789+ .data = &grsec_enable_chroot_shmat,
72790+ .maxlen = sizeof(int),
72791+ .mode = 0600,
72792+ .proc_handler = &proc_dointvec,
72793+ },
72794+#endif
72795+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
72796+ {
72797+ .procname = "chroot_deny_unix",
72798+ .data = &grsec_enable_chroot_unix,
72799+ .maxlen = sizeof(int),
72800+ .mode = 0600,
72801+ .proc_handler = &proc_dointvec,
72802+ },
72803+#endif
72804+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
72805+ {
72806+ .procname = "chroot_deny_mount",
72807+ .data = &grsec_enable_chroot_mount,
72808+ .maxlen = sizeof(int),
72809+ .mode = 0600,
72810+ .proc_handler = &proc_dointvec,
72811+ },
72812+#endif
72813+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
72814+ {
72815+ .procname = "chroot_deny_fchdir",
72816+ .data = &grsec_enable_chroot_fchdir,
72817+ .maxlen = sizeof(int),
72818+ .mode = 0600,
72819+ .proc_handler = &proc_dointvec,
72820+ },
72821+#endif
72822+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
72823+ {
72824+ .procname = "chroot_deny_chroot",
72825+ .data = &grsec_enable_chroot_double,
72826+ .maxlen = sizeof(int),
72827+ .mode = 0600,
72828+ .proc_handler = &proc_dointvec,
72829+ },
72830+#endif
72831+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
72832+ {
72833+ .procname = "chroot_deny_pivot",
72834+ .data = &grsec_enable_chroot_pivot,
72835+ .maxlen = sizeof(int),
72836+ .mode = 0600,
72837+ .proc_handler = &proc_dointvec,
72838+ },
72839+#endif
72840+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
72841+ {
72842+ .procname = "chroot_enforce_chdir",
72843+ .data = &grsec_enable_chroot_chdir,
72844+ .maxlen = sizeof(int),
72845+ .mode = 0600,
72846+ .proc_handler = &proc_dointvec,
72847+ },
72848+#endif
72849+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
72850+ {
72851+ .procname = "chroot_deny_chmod",
72852+ .data = &grsec_enable_chroot_chmod,
72853+ .maxlen = sizeof(int),
72854+ .mode = 0600,
72855+ .proc_handler = &proc_dointvec,
72856+ },
72857+#endif
72858+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
72859+ {
72860+ .procname = "chroot_deny_mknod",
72861+ .data = &grsec_enable_chroot_mknod,
72862+ .maxlen = sizeof(int),
72863+ .mode = 0600,
72864+ .proc_handler = &proc_dointvec,
72865+ },
72866+#endif
72867+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72868+ {
72869+ .procname = "chroot_restrict_nice",
72870+ .data = &grsec_enable_chroot_nice,
72871+ .maxlen = sizeof(int),
72872+ .mode = 0600,
72873+ .proc_handler = &proc_dointvec,
72874+ },
72875+#endif
72876+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
72877+ {
72878+ .procname = "chroot_execlog",
72879+ .data = &grsec_enable_chroot_execlog,
72880+ .maxlen = sizeof(int),
72881+ .mode = 0600,
72882+ .proc_handler = &proc_dointvec,
72883+ },
72884+#endif
72885+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72886+ {
72887+ .procname = "chroot_caps",
72888+ .data = &grsec_enable_chroot_caps,
72889+ .maxlen = sizeof(int),
72890+ .mode = 0600,
72891+ .proc_handler = &proc_dointvec,
72892+ },
72893+#endif
72894+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
72895+ {
72896+ .procname = "chroot_deny_sysctl",
72897+ .data = &grsec_enable_chroot_sysctl,
72898+ .maxlen = sizeof(int),
72899+ .mode = 0600,
72900+ .proc_handler = &proc_dointvec,
72901+ },
72902+#endif
72903+#ifdef CONFIG_GRKERNSEC_TPE
72904+ {
72905+ .procname = "tpe",
72906+ .data = &grsec_enable_tpe,
72907+ .maxlen = sizeof(int),
72908+ .mode = 0600,
72909+ .proc_handler = &proc_dointvec,
72910+ },
72911+ {
72912+ .procname = "tpe_gid",
72913+ .data = &grsec_tpe_gid,
72914+ .maxlen = sizeof(int),
72915+ .mode = 0600,
72916+ .proc_handler = &proc_dointvec,
72917+ },
72918+#endif
72919+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
72920+ {
72921+ .procname = "tpe_invert",
72922+ .data = &grsec_enable_tpe_invert,
72923+ .maxlen = sizeof(int),
72924+ .mode = 0600,
72925+ .proc_handler = &proc_dointvec,
72926+ },
72927+#endif
72928+#ifdef CONFIG_GRKERNSEC_TPE_ALL
72929+ {
72930+ .procname = "tpe_restrict_all",
72931+ .data = &grsec_enable_tpe_all,
72932+ .maxlen = sizeof(int),
72933+ .mode = 0600,
72934+ .proc_handler = &proc_dointvec,
72935+ },
72936+#endif
72937+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
72938+ {
72939+ .procname = "socket_all",
72940+ .data = &grsec_enable_socket_all,
72941+ .maxlen = sizeof(int),
72942+ .mode = 0600,
72943+ .proc_handler = &proc_dointvec,
72944+ },
72945+ {
72946+ .procname = "socket_all_gid",
72947+ .data = &grsec_socket_all_gid,
72948+ .maxlen = sizeof(int),
72949+ .mode = 0600,
72950+ .proc_handler = &proc_dointvec,
72951+ },
72952+#endif
72953+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
72954+ {
72955+ .procname = "socket_client",
72956+ .data = &grsec_enable_socket_client,
72957+ .maxlen = sizeof(int),
72958+ .mode = 0600,
72959+ .proc_handler = &proc_dointvec,
72960+ },
72961+ {
72962+ .procname = "socket_client_gid",
72963+ .data = &grsec_socket_client_gid,
72964+ .maxlen = sizeof(int),
72965+ .mode = 0600,
72966+ .proc_handler = &proc_dointvec,
72967+ },
72968+#endif
72969+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
72970+ {
72971+ .procname = "socket_server",
72972+ .data = &grsec_enable_socket_server,
72973+ .maxlen = sizeof(int),
72974+ .mode = 0600,
72975+ .proc_handler = &proc_dointvec,
72976+ },
72977+ {
72978+ .procname = "socket_server_gid",
72979+ .data = &grsec_socket_server_gid,
72980+ .maxlen = sizeof(int),
72981+ .mode = 0600,
72982+ .proc_handler = &proc_dointvec,
72983+ },
72984+#endif
72985+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
72986+ {
72987+ .procname = "audit_group",
72988+ .data = &grsec_enable_group,
72989+ .maxlen = sizeof(int),
72990+ .mode = 0600,
72991+ .proc_handler = &proc_dointvec,
72992+ },
72993+ {
72994+ .procname = "audit_gid",
72995+ .data = &grsec_audit_gid,
72996+ .maxlen = sizeof(int),
72997+ .mode = 0600,
72998+ .proc_handler = &proc_dointvec,
72999+ },
73000+#endif
73001+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
73002+ {
73003+ .procname = "audit_chdir",
73004+ .data = &grsec_enable_chdir,
73005+ .maxlen = sizeof(int),
73006+ .mode = 0600,
73007+ .proc_handler = &proc_dointvec,
73008+ },
73009+#endif
73010+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73011+ {
73012+ .procname = "audit_mount",
73013+ .data = &grsec_enable_mount,
73014+ .maxlen = sizeof(int),
73015+ .mode = 0600,
73016+ .proc_handler = &proc_dointvec,
73017+ },
73018+#endif
73019+#ifdef CONFIG_GRKERNSEC_DMESG
73020+ {
73021+ .procname = "dmesg",
73022+ .data = &grsec_enable_dmesg,
73023+ .maxlen = sizeof(int),
73024+ .mode = 0600,
73025+ .proc_handler = &proc_dointvec,
73026+ },
73027+#endif
73028+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73029+ {
73030+ .procname = "chroot_findtask",
73031+ .data = &grsec_enable_chroot_findtask,
73032+ .maxlen = sizeof(int),
73033+ .mode = 0600,
73034+ .proc_handler = &proc_dointvec,
73035+ },
73036+#endif
73037+#ifdef CONFIG_GRKERNSEC_RESLOG
73038+ {
73039+ .procname = "resource_logging",
73040+ .data = &grsec_resource_logging,
73041+ .maxlen = sizeof(int),
73042+ .mode = 0600,
73043+ .proc_handler = &proc_dointvec,
73044+ },
73045+#endif
73046+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73047+ {
73048+ .procname = "audit_ptrace",
73049+ .data = &grsec_enable_audit_ptrace,
73050+ .maxlen = sizeof(int),
73051+ .mode = 0600,
73052+ .proc_handler = &proc_dointvec,
73053+ },
73054+#endif
73055+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73056+ {
73057+ .procname = "harden_ptrace",
73058+ .data = &grsec_enable_harden_ptrace,
73059+ .maxlen = sizeof(int),
73060+ .mode = 0600,
73061+ .proc_handler = &proc_dointvec,
73062+ },
73063+#endif
73064+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73065+ {
73066+ .procname = "harden_ipc",
73067+ .data = &grsec_enable_harden_ipc,
73068+ .maxlen = sizeof(int),
73069+ .mode = 0600,
73070+ .proc_handler = &proc_dointvec,
73071+ },
73072+#endif
73073+ {
73074+ .procname = "grsec_lock",
73075+ .data = &grsec_lock,
73076+ .maxlen = sizeof(int),
73077+ .mode = 0600,
73078+ .proc_handler = &proc_dointvec,
73079+ },
73080+#endif
73081+#ifdef CONFIG_GRKERNSEC_ROFS
73082+ {
73083+ .procname = "romount_protect",
73084+ .data = &grsec_enable_rofs,
73085+ .maxlen = sizeof(int),
73086+ .mode = 0600,
73087+ .proc_handler = &proc_dointvec_minmax,
73088+ .extra1 = &one,
73089+ .extra2 = &one,
73090+ },
73091+#endif
73092+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
73093+ {
73094+ .procname = "deny_new_usb",
73095+ .data = &grsec_deny_new_usb,
73096+ .maxlen = sizeof(int),
73097+ .mode = 0600,
73098+ .proc_handler = &proc_dointvec,
73099+ },
73100+#endif
73101+ { }
73102+};
73103+#endif
73104diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
73105new file mode 100644
73106index 0000000..0dc13c3
73107--- /dev/null
73108+++ b/grsecurity/grsec_time.c
73109@@ -0,0 +1,16 @@
73110+#include <linux/kernel.h>
73111+#include <linux/sched.h>
73112+#include <linux/grinternal.h>
73113+#include <linux/module.h>
73114+
73115+void
73116+gr_log_timechange(void)
73117+{
73118+#ifdef CONFIG_GRKERNSEC_TIME
73119+ if (grsec_enable_time)
73120+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
73121+#endif
73122+ return;
73123+}
73124+
73125+EXPORT_SYMBOL(gr_log_timechange);
73126diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
73127new file mode 100644
73128index 0000000..ee57dcf
73129--- /dev/null
73130+++ b/grsecurity/grsec_tpe.c
73131@@ -0,0 +1,73 @@
73132+#include <linux/kernel.h>
73133+#include <linux/sched.h>
73134+#include <linux/file.h>
73135+#include <linux/fs.h>
73136+#include <linux/grinternal.h>
73137+
73138+extern int gr_acl_tpe_check(void);
73139+
73140+int
73141+gr_tpe_allow(const struct file *file)
73142+{
73143+#ifdef CONFIG_GRKERNSEC
73144+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
73145+ const struct cred *cred = current_cred();
73146+ char *msg = NULL;
73147+ char *msg2 = NULL;
73148+
73149+ // never restrict root
73150+ if (gr_is_global_root(cred->uid))
73151+ return 1;
73152+
73153+ if (grsec_enable_tpe) {
73154+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
73155+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
73156+ msg = "not being in trusted group";
73157+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
73158+ msg = "being in untrusted group";
73159+#else
73160+ if (in_group_p(grsec_tpe_gid))
73161+ msg = "being in untrusted group";
73162+#endif
73163+ }
73164+ if (!msg && gr_acl_tpe_check())
73165+ msg = "being in untrusted role";
73166+
73167+ // not in any affected group/role
73168+ if (!msg)
73169+ goto next_check;
73170+
73171+ if (gr_is_global_nonroot(inode->i_uid))
73172+ msg2 = "file in non-root-owned directory";
73173+ else if (inode->i_mode & S_IWOTH)
73174+ msg2 = "file in world-writable directory";
73175+ else if (inode->i_mode & S_IWGRP)
73176+ msg2 = "file in group-writable directory";
73177+
73178+ if (msg && msg2) {
73179+ char fullmsg[70] = {0};
73180+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
73181+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
73182+ return 0;
73183+ }
73184+ msg = NULL;
73185+next_check:
73186+#ifdef CONFIG_GRKERNSEC_TPE_ALL
73187+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
73188+ return 1;
73189+
73190+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
73191+ msg = "directory not owned by user";
73192+ else if (inode->i_mode & S_IWOTH)
73193+ msg = "file in world-writable directory";
73194+ else if (inode->i_mode & S_IWGRP)
73195+ msg = "file in group-writable directory";
73196+
73197+ if (msg) {
73198+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
73199+ return 0;
73200+ }
73201+#endif
73202+#endif
73203+ return 1;
73204+}
73205diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
73206new file mode 100644
73207index 0000000..ae02d8e
73208--- /dev/null
73209+++ b/grsecurity/grsec_usb.c
73210@@ -0,0 +1,15 @@
73211+#include <linux/kernel.h>
73212+#include <linux/grinternal.h>
73213+#include <linux/module.h>
73214+
73215+int gr_handle_new_usb(void)
73216+{
73217+#ifdef CONFIG_GRKERNSEC_DENYUSB
73218+ if (grsec_deny_new_usb) {
73219+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
73220+ return 1;
73221+ }
73222+#endif
73223+ return 0;
73224+}
73225+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
73226diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
73227new file mode 100644
73228index 0000000..9f7b1ac
73229--- /dev/null
73230+++ b/grsecurity/grsum.c
73231@@ -0,0 +1,61 @@
73232+#include <linux/err.h>
73233+#include <linux/kernel.h>
73234+#include <linux/sched.h>
73235+#include <linux/mm.h>
73236+#include <linux/scatterlist.h>
73237+#include <linux/crypto.h>
73238+#include <linux/gracl.h>
73239+
73240+
73241+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
73242+#error "crypto and sha256 must be built into the kernel"
73243+#endif
73244+
73245+int
73246+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
73247+{
73248+ char *p;
73249+ struct crypto_hash *tfm;
73250+ struct hash_desc desc;
73251+ struct scatterlist sg;
73252+ unsigned char temp_sum[GR_SHA_LEN];
73253+ volatile int retval = 0;
73254+ volatile int dummy = 0;
73255+ unsigned int i;
73256+
73257+ sg_init_table(&sg, 1);
73258+
73259+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
73260+ if (IS_ERR(tfm)) {
73261+ /* should never happen, since sha256 should be built in */
73262+ return 1;
73263+ }
73264+
73265+ desc.tfm = tfm;
73266+ desc.flags = 0;
73267+
73268+ crypto_hash_init(&desc);
73269+
73270+ p = salt;
73271+ sg_set_buf(&sg, p, GR_SALT_LEN);
73272+ crypto_hash_update(&desc, &sg, sg.length);
73273+
73274+ p = entry->pw;
73275+ sg_set_buf(&sg, p, strlen(p));
73276+
73277+ crypto_hash_update(&desc, &sg, sg.length);
73278+
73279+ crypto_hash_final(&desc, temp_sum);
73280+
73281+ memset(entry->pw, 0, GR_PW_LEN);
73282+
73283+ for (i = 0; i < GR_SHA_LEN; i++)
73284+ if (sum[i] != temp_sum[i])
73285+ retval = 1;
73286+ else
73287+ dummy = 1; // waste a cycle
73288+
73289+ crypto_free_hash(tfm);
73290+
73291+ return retval;
73292+}
73293diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
73294index 77ff547..181834f 100644
73295--- a/include/asm-generic/4level-fixup.h
73296+++ b/include/asm-generic/4level-fixup.h
73297@@ -13,8 +13,10 @@
73298 #define pmd_alloc(mm, pud, address) \
73299 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
73300 NULL: pmd_offset(pud, address))
73301+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
73302
73303 #define pud_alloc(mm, pgd, address) (pgd)
73304+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
73305 #define pud_offset(pgd, start) (pgd)
73306 #define pud_none(pud) 0
73307 #define pud_bad(pud) 0
73308diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
73309index b7babf0..97f4c4f 100644
73310--- a/include/asm-generic/atomic-long.h
73311+++ b/include/asm-generic/atomic-long.h
73312@@ -22,6 +22,12 @@
73313
73314 typedef atomic64_t atomic_long_t;
73315
73316+#ifdef CONFIG_PAX_REFCOUNT
73317+typedef atomic64_unchecked_t atomic_long_unchecked_t;
73318+#else
73319+typedef atomic64_t atomic_long_unchecked_t;
73320+#endif
73321+
73322 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
73323
73324 static inline long atomic_long_read(atomic_long_t *l)
73325@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
73326 return (long)atomic64_read(v);
73327 }
73328
73329+#ifdef CONFIG_PAX_REFCOUNT
73330+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
73331+{
73332+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73333+
73334+ return (long)atomic64_read_unchecked(v);
73335+}
73336+#endif
73337+
73338 static inline void atomic_long_set(atomic_long_t *l, long i)
73339 {
73340 atomic64_t *v = (atomic64_t *)l;
73341@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
73342 atomic64_set(v, i);
73343 }
73344
73345+#ifdef CONFIG_PAX_REFCOUNT
73346+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
73347+{
73348+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73349+
73350+ atomic64_set_unchecked(v, i);
73351+}
73352+#endif
73353+
73354 static inline void atomic_long_inc(atomic_long_t *l)
73355 {
73356 atomic64_t *v = (atomic64_t *)l;
73357@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
73358 atomic64_inc(v);
73359 }
73360
73361+#ifdef CONFIG_PAX_REFCOUNT
73362+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
73363+{
73364+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73365+
73366+ atomic64_inc_unchecked(v);
73367+}
73368+#endif
73369+
73370 static inline void atomic_long_dec(atomic_long_t *l)
73371 {
73372 atomic64_t *v = (atomic64_t *)l;
73373@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
73374 atomic64_dec(v);
73375 }
73376
73377+#ifdef CONFIG_PAX_REFCOUNT
73378+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
73379+{
73380+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73381+
73382+ atomic64_dec_unchecked(v);
73383+}
73384+#endif
73385+
73386 static inline void atomic_long_add(long i, atomic_long_t *l)
73387 {
73388 atomic64_t *v = (atomic64_t *)l;
73389@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
73390 atomic64_add(i, v);
73391 }
73392
73393+#ifdef CONFIG_PAX_REFCOUNT
73394+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
73395+{
73396+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73397+
73398+ atomic64_add_unchecked(i, v);
73399+}
73400+#endif
73401+
73402 static inline void atomic_long_sub(long i, atomic_long_t *l)
73403 {
73404 atomic64_t *v = (atomic64_t *)l;
73405@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
73406 atomic64_sub(i, v);
73407 }
73408
73409+#ifdef CONFIG_PAX_REFCOUNT
73410+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
73411+{
73412+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73413+
73414+ atomic64_sub_unchecked(i, v);
73415+}
73416+#endif
73417+
73418 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
73419 {
73420 atomic64_t *v = (atomic64_t *)l;
73421@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
73422 return atomic64_add_negative(i, v);
73423 }
73424
73425-static inline long atomic_long_add_return(long i, atomic_long_t *l)
73426+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
73427 {
73428 atomic64_t *v = (atomic64_t *)l;
73429
73430 return (long)atomic64_add_return(i, v);
73431 }
73432
73433+#ifdef CONFIG_PAX_REFCOUNT
73434+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
73435+{
73436+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73437+
73438+ return (long)atomic64_add_return_unchecked(i, v);
73439+}
73440+#endif
73441+
73442 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
73443 {
73444 atomic64_t *v = (atomic64_t *)l;
73445@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
73446 return (long)atomic64_inc_return(v);
73447 }
73448
73449+#ifdef CONFIG_PAX_REFCOUNT
73450+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
73451+{
73452+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73453+
73454+ return (long)atomic64_inc_return_unchecked(v);
73455+}
73456+#endif
73457+
73458 static inline long atomic_long_dec_return(atomic_long_t *l)
73459 {
73460 atomic64_t *v = (atomic64_t *)l;
73461@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
73462
73463 typedef atomic_t atomic_long_t;
73464
73465+#ifdef CONFIG_PAX_REFCOUNT
73466+typedef atomic_unchecked_t atomic_long_unchecked_t;
73467+#else
73468+typedef atomic_t atomic_long_unchecked_t;
73469+#endif
73470+
73471 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
73472 static inline long atomic_long_read(atomic_long_t *l)
73473 {
73474@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
73475 return (long)atomic_read(v);
73476 }
73477
73478+#ifdef CONFIG_PAX_REFCOUNT
73479+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
73480+{
73481+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73482+
73483+ return (long)atomic_read_unchecked(v);
73484+}
73485+#endif
73486+
73487 static inline void atomic_long_set(atomic_long_t *l, long i)
73488 {
73489 atomic_t *v = (atomic_t *)l;
73490@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
73491 atomic_set(v, i);
73492 }
73493
73494+#ifdef CONFIG_PAX_REFCOUNT
73495+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
73496+{
73497+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73498+
73499+ atomic_set_unchecked(v, i);
73500+}
73501+#endif
73502+
73503 static inline void atomic_long_inc(atomic_long_t *l)
73504 {
73505 atomic_t *v = (atomic_t *)l;
73506@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
73507 atomic_inc(v);
73508 }
73509
73510+#ifdef CONFIG_PAX_REFCOUNT
73511+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
73512+{
73513+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73514+
73515+ atomic_inc_unchecked(v);
73516+}
73517+#endif
73518+
73519 static inline void atomic_long_dec(atomic_long_t *l)
73520 {
73521 atomic_t *v = (atomic_t *)l;
73522@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
73523 atomic_dec(v);
73524 }
73525
73526+#ifdef CONFIG_PAX_REFCOUNT
73527+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
73528+{
73529+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73530+
73531+ atomic_dec_unchecked(v);
73532+}
73533+#endif
73534+
73535 static inline void atomic_long_add(long i, atomic_long_t *l)
73536 {
73537 atomic_t *v = (atomic_t *)l;
73538@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
73539 atomic_add(i, v);
73540 }
73541
73542+#ifdef CONFIG_PAX_REFCOUNT
73543+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
73544+{
73545+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73546+
73547+ atomic_add_unchecked(i, v);
73548+}
73549+#endif
73550+
73551 static inline void atomic_long_sub(long i, atomic_long_t *l)
73552 {
73553 atomic_t *v = (atomic_t *)l;
73554@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
73555 atomic_sub(i, v);
73556 }
73557
73558+#ifdef CONFIG_PAX_REFCOUNT
73559+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
73560+{
73561+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73562+
73563+ atomic_sub_unchecked(i, v);
73564+}
73565+#endif
73566+
73567 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
73568 {
73569 atomic_t *v = (atomic_t *)l;
73570@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
73571 return (long)atomic_add_return(i, v);
73572 }
73573
73574+#ifdef CONFIG_PAX_REFCOUNT
73575+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
73576+{
73577+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73578+
73579+ return (long)atomic_add_return_unchecked(i, v);
73580+}
73581+
73582+#endif
73583+
73584 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
73585 {
73586 atomic_t *v = (atomic_t *)l;
73587@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
73588 return (long)atomic_inc_return(v);
73589 }
73590
73591+#ifdef CONFIG_PAX_REFCOUNT
73592+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
73593+{
73594+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73595+
73596+ return (long)atomic_inc_return_unchecked(v);
73597+}
73598+#endif
73599+
73600 static inline long atomic_long_dec_return(atomic_long_t *l)
73601 {
73602 atomic_t *v = (atomic_t *)l;
73603@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
73604
73605 #endif /* BITS_PER_LONG == 64 */
73606
73607+#ifdef CONFIG_PAX_REFCOUNT
73608+static inline void pax_refcount_needs_these_functions(void)
73609+{
73610+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
73611+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
73612+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
73613+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
73614+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
73615+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
73616+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
73617+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
73618+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
73619+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
73620+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
73621+#ifdef CONFIG_X86
73622+ atomic_clear_mask_unchecked(0, NULL);
73623+ atomic_set_mask_unchecked(0, NULL);
73624+#endif
73625+
73626+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
73627+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
73628+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
73629+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
73630+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
73631+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
73632+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
73633+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
73634+}
73635+#else
73636+#define atomic_read_unchecked(v) atomic_read(v)
73637+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
73638+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
73639+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
73640+#define atomic_inc_unchecked(v) atomic_inc(v)
73641+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
73642+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
73643+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
73644+#define atomic_dec_unchecked(v) atomic_dec(v)
73645+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
73646+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
73647+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
73648+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
73649+
73650+#define atomic_long_read_unchecked(v) atomic_long_read(v)
73651+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
73652+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
73653+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
73654+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
73655+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
73656+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
73657+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
73658+#endif
73659+
73660 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
73661diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
73662index 33bd2de..f31bff97 100644
73663--- a/include/asm-generic/atomic.h
73664+++ b/include/asm-generic/atomic.h
73665@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
73666 * Atomically clears the bits set in @mask from @v
73667 */
73668 #ifndef atomic_clear_mask
73669-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
73670+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
73671 {
73672 unsigned long flags;
73673
73674diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
73675index b18ce4f..2ee2843 100644
73676--- a/include/asm-generic/atomic64.h
73677+++ b/include/asm-generic/atomic64.h
73678@@ -16,6 +16,8 @@ typedef struct {
73679 long long counter;
73680 } atomic64_t;
73681
73682+typedef atomic64_t atomic64_unchecked_t;
73683+
73684 #define ATOMIC64_INIT(i) { (i) }
73685
73686 extern long long atomic64_read(const atomic64_t *v);
73687@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
73688 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
73689 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
73690
73691+#define atomic64_read_unchecked(v) atomic64_read(v)
73692+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
73693+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
73694+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
73695+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
73696+#define atomic64_inc_unchecked(v) atomic64_inc(v)
73697+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
73698+#define atomic64_dec_unchecked(v) atomic64_dec(v)
73699+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
73700+
73701 #endif /* _ASM_GENERIC_ATOMIC64_H */
73702diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
73703index a60a7cc..0fe12f2 100644
73704--- a/include/asm-generic/bitops/__fls.h
73705+++ b/include/asm-generic/bitops/__fls.h
73706@@ -9,7 +9,7 @@
73707 *
73708 * Undefined if no set bit exists, so code should check against 0 first.
73709 */
73710-static __always_inline unsigned long __fls(unsigned long word)
73711+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
73712 {
73713 int num = BITS_PER_LONG - 1;
73714
73715diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
73716index 0576d1f..dad6c71 100644
73717--- a/include/asm-generic/bitops/fls.h
73718+++ b/include/asm-generic/bitops/fls.h
73719@@ -9,7 +9,7 @@
73720 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
73721 */
73722
73723-static __always_inline int fls(int x)
73724+static __always_inline int __intentional_overflow(-1) fls(int x)
73725 {
73726 int r = 32;
73727
73728diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
73729index b097cf8..3d40e14 100644
73730--- a/include/asm-generic/bitops/fls64.h
73731+++ b/include/asm-generic/bitops/fls64.h
73732@@ -15,7 +15,7 @@
73733 * at position 64.
73734 */
73735 #if BITS_PER_LONG == 32
73736-static __always_inline int fls64(__u64 x)
73737+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
73738 {
73739 __u32 h = x >> 32;
73740 if (h)
73741@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
73742 return fls(x);
73743 }
73744 #elif BITS_PER_LONG == 64
73745-static __always_inline int fls64(__u64 x)
73746+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
73747 {
73748 if (x == 0)
73749 return 0;
73750diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
73751index 1bfcfe5..e04c5c9 100644
73752--- a/include/asm-generic/cache.h
73753+++ b/include/asm-generic/cache.h
73754@@ -6,7 +6,7 @@
73755 * cache lines need to provide their own cache.h.
73756 */
73757
73758-#define L1_CACHE_SHIFT 5
73759-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
73760+#define L1_CACHE_SHIFT 5UL
73761+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
73762
73763 #endif /* __ASM_GENERIC_CACHE_H */
73764diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
73765index 0d68a1e..b74a761 100644
73766--- a/include/asm-generic/emergency-restart.h
73767+++ b/include/asm-generic/emergency-restart.h
73768@@ -1,7 +1,7 @@
73769 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
73770 #define _ASM_GENERIC_EMERGENCY_RESTART_H
73771
73772-static inline void machine_emergency_restart(void)
73773+static inline __noreturn void machine_emergency_restart(void)
73774 {
73775 machine_restart(NULL);
73776 }
73777diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
73778index 90f99c7..00ce236 100644
73779--- a/include/asm-generic/kmap_types.h
73780+++ b/include/asm-generic/kmap_types.h
73781@@ -2,9 +2,9 @@
73782 #define _ASM_GENERIC_KMAP_TYPES_H
73783
73784 #ifdef __WITH_KM_FENCE
73785-# define KM_TYPE_NR 41
73786+# define KM_TYPE_NR 42
73787 #else
73788-# define KM_TYPE_NR 20
73789+# define KM_TYPE_NR 21
73790 #endif
73791
73792 #endif
73793diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
73794index 9ceb03b..62b0b8f 100644
73795--- a/include/asm-generic/local.h
73796+++ b/include/asm-generic/local.h
73797@@ -23,24 +23,37 @@ typedef struct
73798 atomic_long_t a;
73799 } local_t;
73800
73801+typedef struct {
73802+ atomic_long_unchecked_t a;
73803+} local_unchecked_t;
73804+
73805 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
73806
73807 #define local_read(l) atomic_long_read(&(l)->a)
73808+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
73809 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
73810+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
73811 #define local_inc(l) atomic_long_inc(&(l)->a)
73812+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
73813 #define local_dec(l) atomic_long_dec(&(l)->a)
73814+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
73815 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
73816+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
73817 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
73818+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
73819
73820 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
73821 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
73822 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
73823 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
73824 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
73825+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
73826 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
73827 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
73828+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
73829
73830 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
73831+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
73832 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
73833 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
73834 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
73835diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
73836index 725612b..9cc513a 100644
73837--- a/include/asm-generic/pgtable-nopmd.h
73838+++ b/include/asm-generic/pgtable-nopmd.h
73839@@ -1,14 +1,19 @@
73840 #ifndef _PGTABLE_NOPMD_H
73841 #define _PGTABLE_NOPMD_H
73842
73843-#ifndef __ASSEMBLY__
73844-
73845 #include <asm-generic/pgtable-nopud.h>
73846
73847-struct mm_struct;
73848-
73849 #define __PAGETABLE_PMD_FOLDED
73850
73851+#define PMD_SHIFT PUD_SHIFT
73852+#define PTRS_PER_PMD 1
73853+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
73854+#define PMD_MASK (~(PMD_SIZE-1))
73855+
73856+#ifndef __ASSEMBLY__
73857+
73858+struct mm_struct;
73859+
73860 /*
73861 * Having the pmd type consist of a pud gets the size right, and allows
73862 * us to conceptually access the pud entry that this pmd is folded into
73863@@ -16,11 +21,6 @@ struct mm_struct;
73864 */
73865 typedef struct { pud_t pud; } pmd_t;
73866
73867-#define PMD_SHIFT PUD_SHIFT
73868-#define PTRS_PER_PMD 1
73869-#define PMD_SIZE (1UL << PMD_SHIFT)
73870-#define PMD_MASK (~(PMD_SIZE-1))
73871-
73872 /*
73873 * The "pud_xxx()" functions here are trivial for a folded two-level
73874 * setup: the pmd is never bad, and a pmd always exists (as it's folded
73875diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
73876index 810431d..0ec4804f 100644
73877--- a/include/asm-generic/pgtable-nopud.h
73878+++ b/include/asm-generic/pgtable-nopud.h
73879@@ -1,10 +1,15 @@
73880 #ifndef _PGTABLE_NOPUD_H
73881 #define _PGTABLE_NOPUD_H
73882
73883-#ifndef __ASSEMBLY__
73884-
73885 #define __PAGETABLE_PUD_FOLDED
73886
73887+#define PUD_SHIFT PGDIR_SHIFT
73888+#define PTRS_PER_PUD 1
73889+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
73890+#define PUD_MASK (~(PUD_SIZE-1))
73891+
73892+#ifndef __ASSEMBLY__
73893+
73894 /*
73895 * Having the pud type consist of a pgd gets the size right, and allows
73896 * us to conceptually access the pgd entry that this pud is folded into
73897@@ -12,11 +17,6 @@
73898 */
73899 typedef struct { pgd_t pgd; } pud_t;
73900
73901-#define PUD_SHIFT PGDIR_SHIFT
73902-#define PTRS_PER_PUD 1
73903-#define PUD_SIZE (1UL << PUD_SHIFT)
73904-#define PUD_MASK (~(PUD_SIZE-1))
73905-
73906 /*
73907 * The "pgd_xxx()" functions here are trivial for a folded two-level
73908 * setup: the pud is never bad, and a pud always exists (as it's folded
73909@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
73910 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
73911
73912 #define pgd_populate(mm, pgd, pud) do { } while (0)
73913+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
73914 /*
73915 * (puds are folded into pgds so this doesn't get actually called,
73916 * but the define is needed for a generic inline function.)
73917diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
73918index f330d28..d93bba9 100644
73919--- a/include/asm-generic/pgtable.h
73920+++ b/include/asm-generic/pgtable.h
73921@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
73922 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
73923 barrier();
73924 #endif
73925- if (pmd_none(pmdval))
73926+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
73927 return 1;
73928 if (unlikely(pmd_bad(pmdval))) {
73929- if (!pmd_trans_huge(pmdval))
73930- pmd_clear_bad(pmd);
73931+ pmd_clear_bad(pmd);
73932 return 1;
73933 }
73934 return 0;
73935@@ -737,6 +736,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
73936 }
73937 #endif /* CONFIG_NUMA_BALANCING */
73938
73939+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
73940+#ifdef CONFIG_PAX_KERNEXEC
73941+#error KERNEXEC requires pax_open_kernel
73942+#else
73943+static inline unsigned long pax_open_kernel(void) { return 0; }
73944+#endif
73945+#endif
73946+
73947+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
73948+#ifdef CONFIG_PAX_KERNEXEC
73949+#error KERNEXEC requires pax_close_kernel
73950+#else
73951+static inline unsigned long pax_close_kernel(void) { return 0; }
73952+#endif
73953+#endif
73954+
73955 #endif /* CONFIG_MMU */
73956
73957 #endif /* !__ASSEMBLY__ */
73958diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
73959index dc1269c..48a4f51 100644
73960--- a/include/asm-generic/uaccess.h
73961+++ b/include/asm-generic/uaccess.h
73962@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
73963 return __clear_user(to, n);
73964 }
73965
73966+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
73967+#ifdef CONFIG_PAX_MEMORY_UDEREF
73968+#error UDEREF requires pax_open_userland
73969+#else
73970+static inline unsigned long pax_open_userland(void) { return 0; }
73971+#endif
73972+#endif
73973+
73974+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
73975+#ifdef CONFIG_PAX_MEMORY_UDEREF
73976+#error UDEREF requires pax_close_userland
73977+#else
73978+static inline unsigned long pax_close_userland(void) { return 0; }
73979+#endif
73980+#endif
73981+
73982 #endif /* __ASM_GENERIC_UACCESS_H */
73983diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
73984index 83e2c31..eeb4a04 100644
73985--- a/include/asm-generic/vmlinux.lds.h
73986+++ b/include/asm-generic/vmlinux.lds.h
73987@@ -232,6 +232,7 @@
73988 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
73989 VMLINUX_SYMBOL(__start_rodata) = .; \
73990 *(.rodata) *(.rodata.*) \
73991+ *(.data..read_only) \
73992 *(__vermagic) /* Kernel version magic */ \
73993 . = ALIGN(8); \
73994 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
73995@@ -715,17 +716,18 @@
73996 * section in the linker script will go there too. @phdr should have
73997 * a leading colon.
73998 *
73999- * Note that this macros defines __per_cpu_load as an absolute symbol.
74000+ * Note that this macros defines per_cpu_load as an absolute symbol.
74001 * If there is no need to put the percpu section at a predetermined
74002 * address, use PERCPU_SECTION.
74003 */
74004 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
74005- VMLINUX_SYMBOL(__per_cpu_load) = .; \
74006- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
74007+ per_cpu_load = .; \
74008+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
74009 - LOAD_OFFSET) { \
74010+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
74011 PERCPU_INPUT(cacheline) \
74012 } phdr \
74013- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
74014+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
74015
74016 /**
74017 * PERCPU_SECTION - define output section for percpu area, simple version
74018diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
74019index 418d270..bfd2794 100644
74020--- a/include/crypto/algapi.h
74021+++ b/include/crypto/algapi.h
74022@@ -34,7 +34,7 @@ struct crypto_type {
74023 unsigned int maskclear;
74024 unsigned int maskset;
74025 unsigned int tfmsize;
74026-};
74027+} __do_const;
74028
74029 struct crypto_instance {
74030 struct crypto_alg alg;
74031diff --git a/include/drm/drmP.h b/include/drm/drmP.h
74032index b46fb45..b30d6d5 100644
74033--- a/include/drm/drmP.h
74034+++ b/include/drm/drmP.h
74035@@ -66,6 +66,7 @@
74036 #include <linux/workqueue.h>
74037 #include <linux/poll.h>
74038 #include <asm/pgalloc.h>
74039+#include <asm/local.h>
74040 #include <drm/drm.h>
74041 #include <drm/drm_sarea.h>
74042 #include <drm/drm_vma_manager.h>
74043@@ -277,10 +278,12 @@ do { \
74044 * \param cmd command.
74045 * \param arg argument.
74046 */
74047-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
74048+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
74049+ struct drm_file *file_priv);
74050+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
74051 struct drm_file *file_priv);
74052
74053-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
74054+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
74055 unsigned long arg);
74056
74057 #define DRM_IOCTL_NR(n) _IOC_NR(n)
74058@@ -296,10 +299,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
74059 struct drm_ioctl_desc {
74060 unsigned int cmd;
74061 int flags;
74062- drm_ioctl_t *func;
74063+ drm_ioctl_t func;
74064 unsigned int cmd_drv;
74065 const char *name;
74066-};
74067+} __do_const;
74068
74069 /**
74070 * Creates a driver or general drm_ioctl_desc array entry for the given
74071@@ -1027,7 +1030,7 @@ struct drm_info_list {
74072 int (*show)(struct seq_file*, void*); /** show callback */
74073 u32 driver_features; /**< Required driver features for this entry */
74074 void *data;
74075-};
74076+} __do_const;
74077
74078 /**
74079 * debugfs node structure. This structure represents a debugfs file.
74080@@ -1098,7 +1101,7 @@ struct drm_device {
74081
74082 /** \name Usage Counters */
74083 /*@{ */
74084- int open_count; /**< Outstanding files open */
74085+ local_t open_count; /**< Outstanding files open */
74086 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
74087 atomic_t vma_count; /**< Outstanding vma areas open */
74088 int buf_use; /**< Buffers in use -- cannot alloc */
74089@@ -1109,7 +1112,7 @@ struct drm_device {
74090 /*@{ */
74091 unsigned long counters;
74092 enum drm_stat_type types[15];
74093- atomic_t counts[15];
74094+ atomic_unchecked_t counts[15];
74095 /*@} */
74096
74097 struct list_head filelist;
74098diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
74099index f43d556..94d9343 100644
74100--- a/include/drm/drm_crtc_helper.h
74101+++ b/include/drm/drm_crtc_helper.h
74102@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
74103 struct drm_connector *connector);
74104 /* disable encoder when not in use - more explicit than dpms off */
74105 void (*disable)(struct drm_encoder *encoder);
74106-};
74107+} __no_const;
74108
74109 /**
74110 * drm_connector_helper_funcs - helper operations for connectors
74111diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
74112index 8a10f5c..5de7f5c 100644
74113--- a/include/drm/i915_pciids.h
74114+++ b/include/drm/i915_pciids.h
74115@@ -37,7 +37,7 @@
74116 */
74117 #define INTEL_VGA_DEVICE(id, info) { \
74118 0x8086, id, \
74119- ~0, ~0, \
74120+ PCI_ANY_ID, PCI_ANY_ID, \
74121 0x030000, 0xff0000, \
74122 (unsigned long) info }
74123
74124diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
74125index 72dcbe8..8db58d7 100644
74126--- a/include/drm/ttm/ttm_memory.h
74127+++ b/include/drm/ttm/ttm_memory.h
74128@@ -48,7 +48,7 @@
74129
74130 struct ttm_mem_shrink {
74131 int (*do_shrink) (struct ttm_mem_shrink *);
74132-};
74133+} __no_const;
74134
74135 /**
74136 * struct ttm_mem_global - Global memory accounting structure.
74137diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
74138index 4b840e8..155d235 100644
74139--- a/include/keys/asymmetric-subtype.h
74140+++ b/include/keys/asymmetric-subtype.h
74141@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
74142 /* Verify the signature on a key of this subtype (optional) */
74143 int (*verify_signature)(const struct key *key,
74144 const struct public_key_signature *sig);
74145-};
74146+} __do_const;
74147
74148 /**
74149 * asymmetric_key_subtype - Get the subtype from an asymmetric key
74150diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
74151index c1da539..1dcec55 100644
74152--- a/include/linux/atmdev.h
74153+++ b/include/linux/atmdev.h
74154@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
74155 #endif
74156
74157 struct k_atm_aal_stats {
74158-#define __HANDLE_ITEM(i) atomic_t i
74159+#define __HANDLE_ITEM(i) atomic_unchecked_t i
74160 __AAL_STAT_ITEMS
74161 #undef __HANDLE_ITEM
74162 };
74163@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
74164 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
74165 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
74166 struct module *owner;
74167-};
74168+} __do_const ;
74169
74170 struct atmphy_ops {
74171 int (*start)(struct atm_dev *dev);
74172diff --git a/include/linux/audit.h b/include/linux/audit.h
74173index 729a4d1..9b304ae 100644
74174--- a/include/linux/audit.h
74175+++ b/include/linux/audit.h
74176@@ -193,7 +193,7 @@ static inline void audit_ptrace(struct task_struct *t)
74177 extern unsigned int audit_serial(void);
74178 extern int auditsc_get_stamp(struct audit_context *ctx,
74179 struct timespec *t, unsigned int *serial);
74180-extern int audit_set_loginuid(kuid_t loginuid);
74181+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
74182
74183 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
74184 {
74185diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
74186index 669fef5..3e0fbe4 100644
74187--- a/include/linux/auxvec.h
74188+++ b/include/linux/auxvec.h
74189@@ -3,6 +3,6 @@
74190
74191 #include <uapi/linux/auxvec.h>
74192
74193-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
74194+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
74195 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
74196 #endif /* _LINUX_AUXVEC_H */
74197diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
74198index 7554fd4..0f86379 100644
74199--- a/include/linux/binfmts.h
74200+++ b/include/linux/binfmts.h
74201@@ -73,8 +73,10 @@ struct linux_binfmt {
74202 int (*load_binary)(struct linux_binprm *);
74203 int (*load_shlib)(struct file *);
74204 int (*core_dump)(struct coredump_params *cprm);
74205+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
74206+ void (*handle_mmap)(struct file *);
74207 unsigned long min_coredump; /* minimal dump size */
74208-};
74209+} __do_const;
74210
74211 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
74212
74213diff --git a/include/linux/bitops.h b/include/linux/bitops.h
74214index a3b6b82..2a7d758 100644
74215--- a/include/linux/bitops.h
74216+++ b/include/linux/bitops.h
74217@@ -91,7 +91,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
74218 * @word: value to rotate
74219 * @shift: bits to roll
74220 */
74221-static inline __u32 rol32(__u32 word, unsigned int shift)
74222+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
74223 {
74224 return (word << shift) | (word >> (32 - shift));
74225 }
74226@@ -101,7 +101,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
74227 * @word: value to rotate
74228 * @shift: bits to roll
74229 */
74230-static inline __u32 ror32(__u32 word, unsigned int shift)
74231+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
74232 {
74233 return (word >> shift) | (word << (32 - shift));
74234 }
74235@@ -157,7 +157,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
74236 return (__s32)(value << shift) >> shift;
74237 }
74238
74239-static inline unsigned fls_long(unsigned long l)
74240+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
74241 {
74242 if (sizeof(l) == 4)
74243 return fls(l);
74244diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
74245index 0e6f765..885bb2b 100644
74246--- a/include/linux/blkdev.h
74247+++ b/include/linux/blkdev.h
74248@@ -1537,7 +1537,7 @@ struct block_device_operations {
74249 /* this callback is with swap_lock and sometimes page table lock held */
74250 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
74251 struct module *owner;
74252-};
74253+} __do_const;
74254
74255 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
74256 unsigned long);
74257diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
74258index 7c2e030..b72475d 100644
74259--- a/include/linux/blktrace_api.h
74260+++ b/include/linux/blktrace_api.h
74261@@ -23,7 +23,7 @@ struct blk_trace {
74262 struct dentry *dir;
74263 struct dentry *dropped_file;
74264 struct dentry *msg_file;
74265- atomic_t dropped;
74266+ atomic_unchecked_t dropped;
74267 };
74268
74269 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
74270diff --git a/include/linux/cache.h b/include/linux/cache.h
74271index 4c57065..40346da 100644
74272--- a/include/linux/cache.h
74273+++ b/include/linux/cache.h
74274@@ -16,6 +16,14 @@
74275 #define __read_mostly
74276 #endif
74277
74278+#ifndef __read_only
74279+#ifdef CONFIG_PAX_KERNEXEC
74280+#error KERNEXEC requires __read_only
74281+#else
74282+#define __read_only __read_mostly
74283+#endif
74284+#endif
74285+
74286 #ifndef ____cacheline_aligned
74287 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
74288 #endif
74289diff --git a/include/linux/capability.h b/include/linux/capability.h
74290index a6ee1f9..e1ca49d 100644
74291--- a/include/linux/capability.h
74292+++ b/include/linux/capability.h
74293@@ -212,8 +212,13 @@ extern bool capable(int cap);
74294 extern bool ns_capable(struct user_namespace *ns, int cap);
74295 extern bool inode_capable(const struct inode *inode, int cap);
74296 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
74297+extern bool capable_nolog(int cap);
74298+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
74299+extern bool inode_capable_nolog(const struct inode *inode, int cap);
74300
74301 /* audit system wants to get cap info from files as well */
74302 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
74303
74304+extern int is_privileged_binary(const struct dentry *dentry);
74305+
74306 #endif /* !_LINUX_CAPABILITY_H */
74307diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
74308index 8609d57..86e4d79 100644
74309--- a/include/linux/cdrom.h
74310+++ b/include/linux/cdrom.h
74311@@ -87,7 +87,6 @@ struct cdrom_device_ops {
74312
74313 /* driver specifications */
74314 const int capability; /* capability flags */
74315- int n_minors; /* number of active minor devices */
74316 /* handle uniform packets for scsi type devices (scsi,atapi) */
74317 int (*generic_packet) (struct cdrom_device_info *,
74318 struct packet_command *);
74319diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
74320index 4ce9056..86caac6 100644
74321--- a/include/linux/cleancache.h
74322+++ b/include/linux/cleancache.h
74323@@ -31,7 +31,7 @@ struct cleancache_ops {
74324 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
74325 void (*invalidate_inode)(int, struct cleancache_filekey);
74326 void (*invalidate_fs)(int);
74327-};
74328+} __no_const;
74329
74330 extern struct cleancache_ops *
74331 cleancache_register_ops(struct cleancache_ops *ops);
74332diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
74333index 73bdb69..d66d47a 100644
74334--- a/include/linux/clk-provider.h
74335+++ b/include/linux/clk-provider.h
74336@@ -141,6 +141,7 @@ struct clk_ops {
74337 unsigned long);
74338 void (*init)(struct clk_hw *hw);
74339 };
74340+typedef struct clk_ops __no_const clk_ops_no_const;
74341
74342 /**
74343 * struct clk_init_data - holds init data that's common to all clocks and is
74344diff --git a/include/linux/compat.h b/include/linux/compat.h
74345index 345da00..b6eff26 100644
74346--- a/include/linux/compat.h
74347+++ b/include/linux/compat.h
74348@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
74349 compat_size_t __user *len_ptr);
74350
74351 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
74352-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
74353+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
74354 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
74355 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
74356 compat_ssize_t msgsz, int msgflg);
74357@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
74358 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
74359 compat_ulong_t addr, compat_ulong_t data);
74360 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74361- compat_long_t addr, compat_long_t data);
74362+ compat_ulong_t addr, compat_ulong_t data);
74363
74364 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
74365 /*
74366diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
74367index ded4299..da50e3b 100644
74368--- a/include/linux/compiler-gcc4.h
74369+++ b/include/linux/compiler-gcc4.h
74370@@ -39,9 +39,29 @@
74371 # define __compiletime_warning(message) __attribute__((warning(message)))
74372 # define __compiletime_error(message) __attribute__((error(message)))
74373 #endif /* __CHECKER__ */
74374+
74375+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
74376+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
74377+#define __bos0(ptr) __bos((ptr), 0)
74378+#define __bos1(ptr) __bos((ptr), 1)
74379 #endif /* GCC_VERSION >= 40300 */
74380
74381 #if GCC_VERSION >= 40500
74382+
74383+#ifdef CONSTIFY_PLUGIN
74384+#define __no_const __attribute__((no_const))
74385+#define __do_const __attribute__((do_const))
74386+#endif
74387+
74388+#ifdef SIZE_OVERFLOW_PLUGIN
74389+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
74390+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
74391+#endif
74392+
74393+#ifdef LATENT_ENTROPY_PLUGIN
74394+#define __latent_entropy __attribute__((latent_entropy))
74395+#endif
74396+
74397 /*
74398 * Mark a position in code as unreachable. This can be used to
74399 * suppress control flow warnings after asm blocks that transfer
74400diff --git a/include/linux/compiler.h b/include/linux/compiler.h
74401index 92669cd..1771a15 100644
74402--- a/include/linux/compiler.h
74403+++ b/include/linux/compiler.h
74404@@ -5,11 +5,14 @@
74405
74406 #ifdef __CHECKER__
74407 # define __user __attribute__((noderef, address_space(1)))
74408+# define __force_user __force __user
74409 # define __kernel __attribute__((address_space(0)))
74410+# define __force_kernel __force __kernel
74411 # define __safe __attribute__((safe))
74412 # define __force __attribute__((force))
74413 # define __nocast __attribute__((nocast))
74414 # define __iomem __attribute__((noderef, address_space(2)))
74415+# define __force_iomem __force __iomem
74416 # define __must_hold(x) __attribute__((context(x,1,1)))
74417 # define __acquires(x) __attribute__((context(x,0,1)))
74418 # define __releases(x) __attribute__((context(x,1,0)))
74419@@ -17,20 +20,37 @@
74420 # define __release(x) __context__(x,-1)
74421 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
74422 # define __percpu __attribute__((noderef, address_space(3)))
74423+# define __force_percpu __force __percpu
74424 #ifdef CONFIG_SPARSE_RCU_POINTER
74425 # define __rcu __attribute__((noderef, address_space(4)))
74426+# define __force_rcu __force __rcu
74427 #else
74428 # define __rcu
74429+# define __force_rcu
74430 #endif
74431 extern void __chk_user_ptr(const volatile void __user *);
74432 extern void __chk_io_ptr(const volatile void __iomem *);
74433 #else
74434-# define __user
74435-# define __kernel
74436+# ifdef CHECKER_PLUGIN
74437+//# define __user
74438+//# define __force_user
74439+//# define __kernel
74440+//# define __force_kernel
74441+# else
74442+# ifdef STRUCTLEAK_PLUGIN
74443+# define __user __attribute__((user))
74444+# else
74445+# define __user
74446+# endif
74447+# define __force_user
74448+# define __kernel
74449+# define __force_kernel
74450+# endif
74451 # define __safe
74452 # define __force
74453 # define __nocast
74454 # define __iomem
74455+# define __force_iomem
74456 # define __chk_user_ptr(x) (void)0
74457 # define __chk_io_ptr(x) (void)0
74458 # define __builtin_warning(x, y...) (1)
74459@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
74460 # define __release(x) (void)0
74461 # define __cond_lock(x,c) (c)
74462 # define __percpu
74463+# define __force_percpu
74464 # define __rcu
74465+# define __force_rcu
74466 #endif
74467
74468 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
74469@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
74470 # define __attribute_const__ /* unimplemented */
74471 #endif
74472
74473+#ifndef __no_const
74474+# define __no_const
74475+#endif
74476+
74477+#ifndef __do_const
74478+# define __do_const
74479+#endif
74480+
74481+#ifndef __size_overflow
74482+# define __size_overflow(...)
74483+#endif
74484+
74485+#ifndef __intentional_overflow
74486+# define __intentional_overflow(...)
74487+#endif
74488+
74489+#ifndef __latent_entropy
74490+# define __latent_entropy
74491+#endif
74492+
74493 /*
74494 * Tell gcc if a function is cold. The compiler will assume any path
74495 * directly leading to the call is unlikely.
74496@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
74497 #define __cold
74498 #endif
74499
74500+#ifndef __alloc_size
74501+#define __alloc_size(...)
74502+#endif
74503+
74504+#ifndef __bos
74505+#define __bos(ptr, arg)
74506+#endif
74507+
74508+#ifndef __bos0
74509+#define __bos0(ptr)
74510+#endif
74511+
74512+#ifndef __bos1
74513+#define __bos1(ptr)
74514+#endif
74515+
74516 /* Simple shorthand for a section definition */
74517 #ifndef __section
74518 # define __section(S) __attribute__ ((__section__(#S)))
74519@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
74520 * use is to mediate communication between process-level code and irq/NMI
74521 * handlers, all running on the same CPU.
74522 */
74523-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
74524+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
74525+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
74526
74527 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
74528 #ifdef CONFIG_KPROBES
74529diff --git a/include/linux/completion.h b/include/linux/completion.h
74530index 3cd574d..240dcb0 100644
74531--- a/include/linux/completion.h
74532+++ b/include/linux/completion.h
74533@@ -78,16 +78,16 @@ static inline void init_completion(struct completion *x)
74534
74535 extern void wait_for_completion(struct completion *);
74536 extern void wait_for_completion_io(struct completion *);
74537-extern int wait_for_completion_interruptible(struct completion *x);
74538-extern int wait_for_completion_killable(struct completion *x);
74539+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
74540+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
74541 extern unsigned long wait_for_completion_timeout(struct completion *x,
74542- unsigned long timeout);
74543+ unsigned long timeout) __intentional_overflow(-1);
74544 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
74545- unsigned long timeout);
74546+ unsigned long timeout) __intentional_overflow(-1);
74547 extern long wait_for_completion_interruptible_timeout(
74548- struct completion *x, unsigned long timeout);
74549+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
74550 extern long wait_for_completion_killable_timeout(
74551- struct completion *x, unsigned long timeout);
74552+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
74553 extern bool try_wait_for_completion(struct completion *x);
74554 extern bool completion_done(struct completion *x);
74555
74556diff --git a/include/linux/configfs.h b/include/linux/configfs.h
74557index 34025df..d94bbbc 100644
74558--- a/include/linux/configfs.h
74559+++ b/include/linux/configfs.h
74560@@ -125,7 +125,7 @@ struct configfs_attribute {
74561 const char *ca_name;
74562 struct module *ca_owner;
74563 umode_t ca_mode;
74564-};
74565+} __do_const;
74566
74567 /*
74568 * Users often need to create attribute structures for their configurable
74569diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
74570index fcabc42..cba5d93 100644
74571--- a/include/linux/cpufreq.h
74572+++ b/include/linux/cpufreq.h
74573@@ -167,6 +167,7 @@ struct global_attr {
74574 ssize_t (*store)(struct kobject *a, struct attribute *b,
74575 const char *c, size_t count);
74576 };
74577+typedef struct global_attr __no_const global_attr_no_const;
74578
74579 #define define_one_global_ro(_name) \
74580 static struct global_attr _name = \
74581@@ -208,7 +209,7 @@ struct cpufreq_driver {
74582 int (*suspend) (struct cpufreq_policy *policy);
74583 int (*resume) (struct cpufreq_policy *policy);
74584 struct freq_attr **attr;
74585-};
74586+} __do_const;
74587
74588 /* flags */
74589 #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
74590diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
74591index 781addc..d1e1fe6 100644
74592--- a/include/linux/cpuidle.h
74593+++ b/include/linux/cpuidle.h
74594@@ -50,7 +50,8 @@ struct cpuidle_state {
74595 int index);
74596
74597 int (*enter_dead) (struct cpuidle_device *dev, int index);
74598-};
74599+} __do_const;
74600+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
74601
74602 /* Idle State Flags */
74603 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
74604@@ -192,7 +193,7 @@ struct cpuidle_governor {
74605 void (*reflect) (struct cpuidle_device *dev, int index);
74606
74607 struct module *owner;
74608-};
74609+} __do_const;
74610
74611 #ifdef CONFIG_CPU_IDLE
74612
74613diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
74614index d08e4d2..95fad61 100644
74615--- a/include/linux/cpumask.h
74616+++ b/include/linux/cpumask.h
74617@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
74618 }
74619
74620 /* Valid inputs for n are -1 and 0. */
74621-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
74622+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
74623 {
74624 return n+1;
74625 }
74626
74627-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
74628+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
74629 {
74630 return n+1;
74631 }
74632
74633-static inline unsigned int cpumask_next_and(int n,
74634+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
74635 const struct cpumask *srcp,
74636 const struct cpumask *andp)
74637 {
74638@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
74639 *
74640 * Returns >= nr_cpu_ids if no further cpus set.
74641 */
74642-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
74643+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
74644 {
74645 /* -1 is a legal arg here. */
74646 if (n != -1)
74647@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
74648 *
74649 * Returns >= nr_cpu_ids if no further cpus unset.
74650 */
74651-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
74652+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
74653 {
74654 /* -1 is a legal arg here. */
74655 if (n != -1)
74656@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
74657 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
74658 }
74659
74660-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
74661+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
74662 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
74663
74664 /**
74665diff --git a/include/linux/cred.h b/include/linux/cred.h
74666index 04421e8..6bce4ef 100644
74667--- a/include/linux/cred.h
74668+++ b/include/linux/cred.h
74669@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
74670 static inline void validate_process_creds(void)
74671 {
74672 }
74673+static inline void validate_task_creds(struct task_struct *task)
74674+{
74675+}
74676 #endif
74677
74678 /**
74679diff --git a/include/linux/crypto.h b/include/linux/crypto.h
74680index b92eadf..b4ecdc1 100644
74681--- a/include/linux/crypto.h
74682+++ b/include/linux/crypto.h
74683@@ -373,7 +373,7 @@ struct cipher_tfm {
74684 const u8 *key, unsigned int keylen);
74685 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
74686 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
74687-};
74688+} __no_const;
74689
74690 struct hash_tfm {
74691 int (*init)(struct hash_desc *desc);
74692@@ -394,13 +394,13 @@ struct compress_tfm {
74693 int (*cot_decompress)(struct crypto_tfm *tfm,
74694 const u8 *src, unsigned int slen,
74695 u8 *dst, unsigned int *dlen);
74696-};
74697+} __no_const;
74698
74699 struct rng_tfm {
74700 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
74701 unsigned int dlen);
74702 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
74703-};
74704+} __no_const;
74705
74706 #define crt_ablkcipher crt_u.ablkcipher
74707 #define crt_aead crt_u.aead
74708diff --git a/include/linux/ctype.h b/include/linux/ctype.h
74709index 653589e..4ef254a 100644
74710--- a/include/linux/ctype.h
74711+++ b/include/linux/ctype.h
74712@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
74713 * Fast implementation of tolower() for internal usage. Do not use in your
74714 * code.
74715 */
74716-static inline char _tolower(const char c)
74717+static inline unsigned char _tolower(const unsigned char c)
74718 {
74719 return c | 0x20;
74720 }
74721diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
74722index 7925bf0..d5143d2 100644
74723--- a/include/linux/decompress/mm.h
74724+++ b/include/linux/decompress/mm.h
74725@@ -77,7 +77,7 @@ static void free(void *where)
74726 * warnings when not needed (indeed large_malloc / large_free are not
74727 * needed by inflate */
74728
74729-#define malloc(a) kmalloc(a, GFP_KERNEL)
74730+#define malloc(a) kmalloc((a), GFP_KERNEL)
74731 #define free(a) kfree(a)
74732
74733 #define large_malloc(a) vmalloc(a)
74734diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
74735index 5f1ab92..39c35ae 100644
74736--- a/include/linux/devfreq.h
74737+++ b/include/linux/devfreq.h
74738@@ -114,7 +114,7 @@ struct devfreq_governor {
74739 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
74740 int (*event_handler)(struct devfreq *devfreq,
74741 unsigned int event, void *data);
74742-};
74743+} __do_const;
74744
74745 /**
74746 * struct devfreq - Device devfreq structure
74747diff --git a/include/linux/device.h b/include/linux/device.h
74748index 2a9d6ed..d14551e3 100644
74749--- a/include/linux/device.h
74750+++ b/include/linux/device.h
74751@@ -313,7 +313,7 @@ struct subsys_interface {
74752 struct list_head node;
74753 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
74754 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
74755-};
74756+} __do_const;
74757
74758 int subsys_interface_register(struct subsys_interface *sif);
74759 void subsys_interface_unregister(struct subsys_interface *sif);
74760@@ -501,7 +501,7 @@ struct device_type {
74761 void (*release)(struct device *dev);
74762
74763 const struct dev_pm_ops *pm;
74764-};
74765+} __do_const;
74766
74767 /* interface for exporting device attributes */
74768 struct device_attribute {
74769@@ -511,11 +511,12 @@ struct device_attribute {
74770 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
74771 const char *buf, size_t count);
74772 };
74773+typedef struct device_attribute __no_const device_attribute_no_const;
74774
74775 struct dev_ext_attribute {
74776 struct device_attribute attr;
74777 void *var;
74778-};
74779+} __do_const;
74780
74781 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
74782 char *buf);
74783diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
74784index 3a8d0a2..c762be2 100644
74785--- a/include/linux/dma-mapping.h
74786+++ b/include/linux/dma-mapping.h
74787@@ -54,7 +54,7 @@ struct dma_map_ops {
74788 u64 (*get_required_mask)(struct device *dev);
74789 #endif
74790 int is_phys;
74791-};
74792+} __do_const;
74793
74794 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
74795
74796diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
74797index 0bc7275..4ccbf11 100644
74798--- a/include/linux/dmaengine.h
74799+++ b/include/linux/dmaengine.h
74800@@ -1078,9 +1078,9 @@ struct dma_pinned_list {
74801 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
74802 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
74803
74804-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
74805+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
74806 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
74807-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
74808+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
74809 struct dma_pinned_list *pinned_list, struct page *page,
74810 unsigned int offset, size_t len);
74811
74812diff --git a/include/linux/efi.h b/include/linux/efi.h
74813index 094ddd0..f1dfcd3 100644
74814--- a/include/linux/efi.h
74815+++ b/include/linux/efi.h
74816@@ -745,6 +745,7 @@ struct efivar_operations {
74817 efi_set_variable_t *set_variable;
74818 efi_query_variable_store_t *query_variable_store;
74819 };
74820+typedef struct efivar_operations __no_const efivar_operations_no_const;
74821
74822 struct efivars {
74823 /*
74824diff --git a/include/linux/elf.h b/include/linux/elf.h
74825index 40a3c0e0..4c45a38 100644
74826--- a/include/linux/elf.h
74827+++ b/include/linux/elf.h
74828@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
74829 #define elf_note elf32_note
74830 #define elf_addr_t Elf32_Off
74831 #define Elf_Half Elf32_Half
74832+#define elf_dyn Elf32_Dyn
74833
74834 #else
74835
74836@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
74837 #define elf_note elf64_note
74838 #define elf_addr_t Elf64_Off
74839 #define Elf_Half Elf64_Half
74840+#define elf_dyn Elf64_Dyn
74841
74842 #endif
74843
74844diff --git a/include/linux/err.h b/include/linux/err.h
74845index 15f92e0..e825a8e 100644
74846--- a/include/linux/err.h
74847+++ b/include/linux/err.h
74848@@ -19,12 +19,12 @@
74849
74850 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
74851
74852-static inline void * __must_check ERR_PTR(long error)
74853+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
74854 {
74855 return (void *) error;
74856 }
74857
74858-static inline long __must_check PTR_ERR(__force const void *ptr)
74859+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
74860 {
74861 return (long) ptr;
74862 }
74863diff --git a/include/linux/extcon.h b/include/linux/extcon.h
74864index fcb51c8..bdafcf6 100644
74865--- a/include/linux/extcon.h
74866+++ b/include/linux/extcon.h
74867@@ -134,7 +134,7 @@ struct extcon_dev {
74868 /* /sys/class/extcon/.../mutually_exclusive/... */
74869 struct attribute_group attr_g_muex;
74870 struct attribute **attrs_muex;
74871- struct device_attribute *d_attrs_muex;
74872+ device_attribute_no_const *d_attrs_muex;
74873 };
74874
74875 /**
74876diff --git a/include/linux/fb.h b/include/linux/fb.h
74877index ffac70a..ca3e711 100644
74878--- a/include/linux/fb.h
74879+++ b/include/linux/fb.h
74880@@ -304,7 +304,7 @@ struct fb_ops {
74881 /* called at KDB enter and leave time to prepare the console */
74882 int (*fb_debug_enter)(struct fb_info *info);
74883 int (*fb_debug_leave)(struct fb_info *info);
74884-};
74885+} __do_const;
74886
74887 #ifdef CONFIG_FB_TILEBLITTING
74888 #define FB_TILE_CURSOR_NONE 0
74889diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
74890index 085197b..0fa6f0b 100644
74891--- a/include/linux/fdtable.h
74892+++ b/include/linux/fdtable.h
74893@@ -95,7 +95,7 @@ struct files_struct *get_files_struct(struct task_struct *);
74894 void put_files_struct(struct files_struct *fs);
74895 void reset_files_struct(struct files_struct *);
74896 int unshare_files(struct files_struct **);
74897-struct files_struct *dup_fd(struct files_struct *, int *);
74898+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
74899 void do_close_on_exec(struct files_struct *);
74900 int iterate_fd(struct files_struct *, unsigned,
74901 int (*)(const void *, struct file *, unsigned),
74902diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
74903index 8293262..2b3b8bd 100644
74904--- a/include/linux/frontswap.h
74905+++ b/include/linux/frontswap.h
74906@@ -11,7 +11,7 @@ struct frontswap_ops {
74907 int (*load)(unsigned, pgoff_t, struct page *);
74908 void (*invalidate_page)(unsigned, pgoff_t);
74909 void (*invalidate_area)(unsigned);
74910-};
74911+} __no_const;
74912
74913 extern bool frontswap_enabled;
74914 extern struct frontswap_ops *
74915diff --git a/include/linux/fs.h b/include/linux/fs.h
74916index 164d2a9..0ffa41d0 100644
74917--- a/include/linux/fs.h
74918+++ b/include/linux/fs.h
74919@@ -1552,7 +1552,8 @@ struct file_operations {
74920 long (*fallocate)(struct file *file, int mode, loff_t offset,
74921 loff_t len);
74922 int (*show_fdinfo)(struct seq_file *m, struct file *f);
74923-};
74924+} __do_const;
74925+typedef struct file_operations __no_const file_operations_no_const;
74926
74927 struct inode_operations {
74928 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
74929@@ -2747,4 +2748,14 @@ static inline bool dir_relax(struct inode *inode)
74930 return !IS_DEADDIR(inode);
74931 }
74932
74933+static inline bool is_sidechannel_device(const struct inode *inode)
74934+{
74935+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
74936+ umode_t mode = inode->i_mode;
74937+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
74938+#else
74939+ return false;
74940+#endif
74941+}
74942+
74943 #endif /* _LINUX_FS_H */
74944diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
74945index 0efc3e6..e0e1e5f 100644
74946--- a/include/linux/fs_struct.h
74947+++ b/include/linux/fs_struct.h
74948@@ -6,7 +6,7 @@
74949 #include <linux/seqlock.h>
74950
74951 struct fs_struct {
74952- int users;
74953+ atomic_t users;
74954 spinlock_t lock;
74955 seqcount_t seq;
74956 int umask;
74957diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
74958index 7823e9e..56b6f2f 100644
74959--- a/include/linux/fscache-cache.h
74960+++ b/include/linux/fscache-cache.h
74961@@ -113,7 +113,7 @@ struct fscache_operation {
74962 fscache_operation_release_t release;
74963 };
74964
74965-extern atomic_t fscache_op_debug_id;
74966+extern atomic_unchecked_t fscache_op_debug_id;
74967 extern void fscache_op_work_func(struct work_struct *work);
74968
74969 extern void fscache_enqueue_operation(struct fscache_operation *);
74970@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
74971 INIT_WORK(&op->work, fscache_op_work_func);
74972 atomic_set(&op->usage, 1);
74973 op->state = FSCACHE_OP_ST_INITIALISED;
74974- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
74975+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
74976 op->processor = processor;
74977 op->release = release;
74978 INIT_LIST_HEAD(&op->pend_link);
74979diff --git a/include/linux/fscache.h b/include/linux/fscache.h
74980index 19b4645..3b73dfc 100644
74981--- a/include/linux/fscache.h
74982+++ b/include/linux/fscache.h
74983@@ -152,7 +152,7 @@ struct fscache_cookie_def {
74984 * - this is mandatory for any object that may have data
74985 */
74986 void (*now_uncached)(void *cookie_netfs_data);
74987-};
74988+} __do_const;
74989
74990 /*
74991 * fscache cached network filesystem type
74992diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
74993index 1c804b0..1432c2b 100644
74994--- a/include/linux/fsnotify.h
74995+++ b/include/linux/fsnotify.h
74996@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
74997 struct inode *inode = file_inode(file);
74998 __u32 mask = FS_ACCESS;
74999
75000+ if (is_sidechannel_device(inode))
75001+ return;
75002+
75003 if (S_ISDIR(inode->i_mode))
75004 mask |= FS_ISDIR;
75005
75006@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
75007 struct inode *inode = file_inode(file);
75008 __u32 mask = FS_MODIFY;
75009
75010+ if (is_sidechannel_device(inode))
75011+ return;
75012+
75013 if (S_ISDIR(inode->i_mode))
75014 mask |= FS_ISDIR;
75015
75016@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
75017 */
75018 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
75019 {
75020- return kstrdup(name, GFP_KERNEL);
75021+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
75022 }
75023
75024 /*
75025diff --git a/include/linux/genhd.h b/include/linux/genhd.h
75026index 9f3c275..8bdff5d 100644
75027--- a/include/linux/genhd.h
75028+++ b/include/linux/genhd.h
75029@@ -194,7 +194,7 @@ struct gendisk {
75030 struct kobject *slave_dir;
75031
75032 struct timer_rand_state *random;
75033- atomic_t sync_io; /* RAID */
75034+ atomic_unchecked_t sync_io; /* RAID */
75035 struct disk_events *ev;
75036 #ifdef CONFIG_BLK_DEV_INTEGRITY
75037 struct blk_integrity *integrity;
75038@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
75039 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
75040
75041 /* drivers/char/random.c */
75042-extern void add_disk_randomness(struct gendisk *disk);
75043+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
75044 extern void rand_initialize_disk(struct gendisk *disk);
75045
75046 static inline sector_t get_start_sect(struct block_device *bdev)
75047diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
75048index 023bc34..b02b46a 100644
75049--- a/include/linux/genl_magic_func.h
75050+++ b/include/linux/genl_magic_func.h
75051@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
75052 },
75053
75054 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
75055-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
75056+static struct genl_ops ZZZ_genl_ops[] = {
75057 #include GENL_MAGIC_INCLUDE_FILE
75058 };
75059
75060diff --git a/include/linux/gfp.h b/include/linux/gfp.h
75061index 9b4dd49..61fd41d 100644
75062--- a/include/linux/gfp.h
75063+++ b/include/linux/gfp.h
75064@@ -35,6 +35,13 @@ struct vm_area_struct;
75065 #define ___GFP_NO_KSWAPD 0x400000u
75066 #define ___GFP_OTHER_NODE 0x800000u
75067 #define ___GFP_WRITE 0x1000000u
75068+
75069+#ifdef CONFIG_PAX_USERCOPY_SLABS
75070+#define ___GFP_USERCOPY 0x2000000u
75071+#else
75072+#define ___GFP_USERCOPY 0
75073+#endif
75074+
75075 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
75076
75077 /*
75078@@ -92,6 +99,7 @@ struct vm_area_struct;
75079 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
75080 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
75081 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
75082+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
75083
75084 /*
75085 * This may seem redundant, but it's a way of annotating false positives vs.
75086@@ -99,7 +107,7 @@ struct vm_area_struct;
75087 */
75088 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
75089
75090-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
75091+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
75092 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
75093
75094 /* This equals 0, but use constants in case they ever change */
75095@@ -153,6 +161,8 @@ struct vm_area_struct;
75096 /* 4GB DMA on some platforms */
75097 #define GFP_DMA32 __GFP_DMA32
75098
75099+#define GFP_USERCOPY __GFP_USERCOPY
75100+
75101 /* Convert GFP flags to their corresponding migrate type */
75102 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
75103 {
75104diff --git a/include/linux/gracl.h b/include/linux/gracl.h
75105new file mode 100644
75106index 0000000..edb2cb6
75107--- /dev/null
75108+++ b/include/linux/gracl.h
75109@@ -0,0 +1,340 @@
75110+#ifndef GR_ACL_H
75111+#define GR_ACL_H
75112+
75113+#include <linux/grdefs.h>
75114+#include <linux/resource.h>
75115+#include <linux/capability.h>
75116+#include <linux/dcache.h>
75117+#include <asm/resource.h>
75118+
75119+/* Major status information */
75120+
75121+#define GR_VERSION "grsecurity 3.0"
75122+#define GRSECURITY_VERSION 0x3000
75123+
75124+enum {
75125+ GR_SHUTDOWN = 0,
75126+ GR_ENABLE = 1,
75127+ GR_SPROLE = 2,
75128+ GR_OLDRELOAD = 3,
75129+ GR_SEGVMOD = 4,
75130+ GR_STATUS = 5,
75131+ GR_UNSPROLE = 6,
75132+ GR_PASSSET = 7,
75133+ GR_SPROLEPAM = 8,
75134+ GR_RELOAD = 9,
75135+};
75136+
75137+/* Password setup definitions
75138+ * kernel/grhash.c */
75139+enum {
75140+ GR_PW_LEN = 128,
75141+ GR_SALT_LEN = 16,
75142+ GR_SHA_LEN = 32,
75143+};
75144+
75145+enum {
75146+ GR_SPROLE_LEN = 64,
75147+};
75148+
75149+enum {
75150+ GR_NO_GLOB = 0,
75151+ GR_REG_GLOB,
75152+ GR_CREATE_GLOB
75153+};
75154+
75155+#define GR_NLIMITS 32
75156+
75157+/* Begin Data Structures */
75158+
75159+struct sprole_pw {
75160+ unsigned char *rolename;
75161+ unsigned char salt[GR_SALT_LEN];
75162+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
75163+};
75164+
75165+struct name_entry {
75166+ __u32 key;
75167+ ino_t inode;
75168+ dev_t device;
75169+ char *name;
75170+ __u16 len;
75171+ __u8 deleted;
75172+ struct name_entry *prev;
75173+ struct name_entry *next;
75174+};
75175+
75176+struct inodev_entry {
75177+ struct name_entry *nentry;
75178+ struct inodev_entry *prev;
75179+ struct inodev_entry *next;
75180+};
75181+
75182+struct acl_role_db {
75183+ struct acl_role_label **r_hash;
75184+ __u32 r_size;
75185+};
75186+
75187+struct inodev_db {
75188+ struct inodev_entry **i_hash;
75189+ __u32 i_size;
75190+};
75191+
75192+struct name_db {
75193+ struct name_entry **n_hash;
75194+ __u32 n_size;
75195+};
75196+
75197+struct crash_uid {
75198+ uid_t uid;
75199+ unsigned long expires;
75200+};
75201+
75202+struct gr_hash_struct {
75203+ void **table;
75204+ void **nametable;
75205+ void *first;
75206+ __u32 table_size;
75207+ __u32 used_size;
75208+ int type;
75209+};
75210+
75211+/* Userspace Grsecurity ACL data structures */
75212+
75213+struct acl_subject_label {
75214+ char *filename;
75215+ ino_t inode;
75216+ dev_t device;
75217+ __u32 mode;
75218+ kernel_cap_t cap_mask;
75219+ kernel_cap_t cap_lower;
75220+ kernel_cap_t cap_invert_audit;
75221+
75222+ struct rlimit res[GR_NLIMITS];
75223+ __u32 resmask;
75224+
75225+ __u8 user_trans_type;
75226+ __u8 group_trans_type;
75227+ uid_t *user_transitions;
75228+ gid_t *group_transitions;
75229+ __u16 user_trans_num;
75230+ __u16 group_trans_num;
75231+
75232+ __u32 sock_families[2];
75233+ __u32 ip_proto[8];
75234+ __u32 ip_type;
75235+ struct acl_ip_label **ips;
75236+ __u32 ip_num;
75237+ __u32 inaddr_any_override;
75238+
75239+ __u32 crashes;
75240+ unsigned long expires;
75241+
75242+ struct acl_subject_label *parent_subject;
75243+ struct gr_hash_struct *hash;
75244+ struct acl_subject_label *prev;
75245+ struct acl_subject_label *next;
75246+
75247+ struct acl_object_label **obj_hash;
75248+ __u32 obj_hash_size;
75249+ __u16 pax_flags;
75250+};
75251+
75252+struct role_allowed_ip {
75253+ __u32 addr;
75254+ __u32 netmask;
75255+
75256+ struct role_allowed_ip *prev;
75257+ struct role_allowed_ip *next;
75258+};
75259+
75260+struct role_transition {
75261+ char *rolename;
75262+
75263+ struct role_transition *prev;
75264+ struct role_transition *next;
75265+};
75266+
75267+struct acl_role_label {
75268+ char *rolename;
75269+ uid_t uidgid;
75270+ __u16 roletype;
75271+
75272+ __u16 auth_attempts;
75273+ unsigned long expires;
75274+
75275+ struct acl_subject_label *root_label;
75276+ struct gr_hash_struct *hash;
75277+
75278+ struct acl_role_label *prev;
75279+ struct acl_role_label *next;
75280+
75281+ struct role_transition *transitions;
75282+ struct role_allowed_ip *allowed_ips;
75283+ uid_t *domain_children;
75284+ __u16 domain_child_num;
75285+
75286+ umode_t umask;
75287+
75288+ struct acl_subject_label **subj_hash;
75289+ __u32 subj_hash_size;
75290+};
75291+
75292+struct user_acl_role_db {
75293+ struct acl_role_label **r_table;
75294+ __u32 num_pointers; /* Number of allocations to track */
75295+ __u32 num_roles; /* Number of roles */
75296+ __u32 num_domain_children; /* Number of domain children */
75297+ __u32 num_subjects; /* Number of subjects */
75298+ __u32 num_objects; /* Number of objects */
75299+};
75300+
75301+struct acl_object_label {
75302+ char *filename;
75303+ ino_t inode;
75304+ dev_t device;
75305+ __u32 mode;
75306+
75307+ struct acl_subject_label *nested;
75308+ struct acl_object_label *globbed;
75309+
75310+ /* next two structures not used */
75311+
75312+ struct acl_object_label *prev;
75313+ struct acl_object_label *next;
75314+};
75315+
75316+struct acl_ip_label {
75317+ char *iface;
75318+ __u32 addr;
75319+ __u32 netmask;
75320+ __u16 low, high;
75321+ __u8 mode;
75322+ __u32 type;
75323+ __u32 proto[8];
75324+
75325+ /* next two structures not used */
75326+
75327+ struct acl_ip_label *prev;
75328+ struct acl_ip_label *next;
75329+};
75330+
75331+struct gr_arg {
75332+ struct user_acl_role_db role_db;
75333+ unsigned char pw[GR_PW_LEN];
75334+ unsigned char salt[GR_SALT_LEN];
75335+ unsigned char sum[GR_SHA_LEN];
75336+ unsigned char sp_role[GR_SPROLE_LEN];
75337+ struct sprole_pw *sprole_pws;
75338+ dev_t segv_device;
75339+ ino_t segv_inode;
75340+ uid_t segv_uid;
75341+ __u16 num_sprole_pws;
75342+ __u16 mode;
75343+};
75344+
75345+struct gr_arg_wrapper {
75346+ struct gr_arg *arg;
75347+ __u32 version;
75348+ __u32 size;
75349+};
75350+
75351+struct subject_map {
75352+ struct acl_subject_label *user;
75353+ struct acl_subject_label *kernel;
75354+ struct subject_map *prev;
75355+ struct subject_map *next;
75356+};
75357+
75358+struct acl_subj_map_db {
75359+ struct subject_map **s_hash;
75360+ __u32 s_size;
75361+};
75362+
75363+struct gr_policy_state {
75364+ struct sprole_pw **acl_special_roles;
75365+ __u16 num_sprole_pws;
75366+ struct acl_role_label *kernel_role;
75367+ struct acl_role_label *role_list;
75368+ struct acl_role_label *default_role;
75369+ struct acl_role_db acl_role_set;
75370+ struct acl_subj_map_db subj_map_set;
75371+ struct name_db name_set;
75372+ struct inodev_db inodev_set;
75373+};
75374+
75375+struct gr_alloc_state {
75376+ unsigned long alloc_stack_next;
75377+ unsigned long alloc_stack_size;
75378+ void **alloc_stack;
75379+};
75380+
75381+struct gr_reload_state {
75382+ struct gr_policy_state oldpolicy;
75383+ struct gr_alloc_state oldalloc;
75384+ struct gr_policy_state newpolicy;
75385+ struct gr_alloc_state newalloc;
75386+ struct gr_policy_state *oldpolicy_ptr;
75387+ struct gr_alloc_state *oldalloc_ptr;
75388+ unsigned char oldmode;
75389+};
75390+
75391+/* End Data Structures Section */
75392+
75393+/* Hash functions generated by empirical testing by Brad Spengler
75394+ Makes good use of the low bits of the inode. Generally 0-1 times
75395+ in loop for successful match. 0-3 for unsuccessful match.
75396+ Shift/add algorithm with modulus of table size and an XOR*/
75397+
75398+static __inline__ unsigned int
75399+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
75400+{
75401+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
75402+}
75403+
75404+ static __inline__ unsigned int
75405+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
75406+{
75407+ return ((const unsigned long)userp % sz);
75408+}
75409+
75410+static __inline__ unsigned int
75411+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
75412+{
75413+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
75414+}
75415+
75416+static __inline__ unsigned int
75417+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
75418+{
75419+ return full_name_hash((const unsigned char *)name, len) % sz;
75420+}
75421+
75422+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
75423+ subj = NULL; \
75424+ iter = 0; \
75425+ while (iter < role->subj_hash_size) { \
75426+ if (subj == NULL) \
75427+ subj = role->subj_hash[iter]; \
75428+ if (subj == NULL) { \
75429+ iter++; \
75430+ continue; \
75431+ }
75432+
75433+#define FOR_EACH_SUBJECT_END(subj,iter) \
75434+ subj = subj->next; \
75435+ if (subj == NULL) \
75436+ iter++; \
75437+ }
75438+
75439+
75440+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
75441+ subj = role->hash->first; \
75442+ while (subj != NULL) {
75443+
75444+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
75445+ subj = subj->next; \
75446+ }
75447+
75448+#endif
75449+
75450diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
75451new file mode 100644
75452index 0000000..33ebd1f
75453--- /dev/null
75454+++ b/include/linux/gracl_compat.h
75455@@ -0,0 +1,156 @@
75456+#ifndef GR_ACL_COMPAT_H
75457+#define GR_ACL_COMPAT_H
75458+
75459+#include <linux/resource.h>
75460+#include <asm/resource.h>
75461+
75462+struct sprole_pw_compat {
75463+ compat_uptr_t rolename;
75464+ unsigned char salt[GR_SALT_LEN];
75465+ unsigned char sum[GR_SHA_LEN];
75466+};
75467+
75468+struct gr_hash_struct_compat {
75469+ compat_uptr_t table;
75470+ compat_uptr_t nametable;
75471+ compat_uptr_t first;
75472+ __u32 table_size;
75473+ __u32 used_size;
75474+ int type;
75475+};
75476+
75477+struct acl_subject_label_compat {
75478+ compat_uptr_t filename;
75479+ compat_ino_t inode;
75480+ __u32 device;
75481+ __u32 mode;
75482+ kernel_cap_t cap_mask;
75483+ kernel_cap_t cap_lower;
75484+ kernel_cap_t cap_invert_audit;
75485+
75486+ struct compat_rlimit res[GR_NLIMITS];
75487+ __u32 resmask;
75488+
75489+ __u8 user_trans_type;
75490+ __u8 group_trans_type;
75491+ compat_uptr_t user_transitions;
75492+ compat_uptr_t group_transitions;
75493+ __u16 user_trans_num;
75494+ __u16 group_trans_num;
75495+
75496+ __u32 sock_families[2];
75497+ __u32 ip_proto[8];
75498+ __u32 ip_type;
75499+ compat_uptr_t ips;
75500+ __u32 ip_num;
75501+ __u32 inaddr_any_override;
75502+
75503+ __u32 crashes;
75504+ compat_ulong_t expires;
75505+
75506+ compat_uptr_t parent_subject;
75507+ compat_uptr_t hash;
75508+ compat_uptr_t prev;
75509+ compat_uptr_t next;
75510+
75511+ compat_uptr_t obj_hash;
75512+ __u32 obj_hash_size;
75513+ __u16 pax_flags;
75514+};
75515+
75516+struct role_allowed_ip_compat {
75517+ __u32 addr;
75518+ __u32 netmask;
75519+
75520+ compat_uptr_t prev;
75521+ compat_uptr_t next;
75522+};
75523+
75524+struct role_transition_compat {
75525+ compat_uptr_t rolename;
75526+
75527+ compat_uptr_t prev;
75528+ compat_uptr_t next;
75529+};
75530+
75531+struct acl_role_label_compat {
75532+ compat_uptr_t rolename;
75533+ uid_t uidgid;
75534+ __u16 roletype;
75535+
75536+ __u16 auth_attempts;
75537+ compat_ulong_t expires;
75538+
75539+ compat_uptr_t root_label;
75540+ compat_uptr_t hash;
75541+
75542+ compat_uptr_t prev;
75543+ compat_uptr_t next;
75544+
75545+ compat_uptr_t transitions;
75546+ compat_uptr_t allowed_ips;
75547+ compat_uptr_t domain_children;
75548+ __u16 domain_child_num;
75549+
75550+ umode_t umask;
75551+
75552+ compat_uptr_t subj_hash;
75553+ __u32 subj_hash_size;
75554+};
75555+
75556+struct user_acl_role_db_compat {
75557+ compat_uptr_t r_table;
75558+ __u32 num_pointers;
75559+ __u32 num_roles;
75560+ __u32 num_domain_children;
75561+ __u32 num_subjects;
75562+ __u32 num_objects;
75563+};
75564+
75565+struct acl_object_label_compat {
75566+ compat_uptr_t filename;
75567+ compat_ino_t inode;
75568+ __u32 device;
75569+ __u32 mode;
75570+
75571+ compat_uptr_t nested;
75572+ compat_uptr_t globbed;
75573+
75574+ compat_uptr_t prev;
75575+ compat_uptr_t next;
75576+};
75577+
75578+struct acl_ip_label_compat {
75579+ compat_uptr_t iface;
75580+ __u32 addr;
75581+ __u32 netmask;
75582+ __u16 low, high;
75583+ __u8 mode;
75584+ __u32 type;
75585+ __u32 proto[8];
75586+
75587+ compat_uptr_t prev;
75588+ compat_uptr_t next;
75589+};
75590+
75591+struct gr_arg_compat {
75592+ struct user_acl_role_db_compat role_db;
75593+ unsigned char pw[GR_PW_LEN];
75594+ unsigned char salt[GR_SALT_LEN];
75595+ unsigned char sum[GR_SHA_LEN];
75596+ unsigned char sp_role[GR_SPROLE_LEN];
75597+ compat_uptr_t sprole_pws;
75598+ __u32 segv_device;
75599+ compat_ino_t segv_inode;
75600+ uid_t segv_uid;
75601+ __u16 num_sprole_pws;
75602+ __u16 mode;
75603+};
75604+
75605+struct gr_arg_wrapper_compat {
75606+ compat_uptr_t arg;
75607+ __u32 version;
75608+ __u32 size;
75609+};
75610+
75611+#endif
75612diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
75613new file mode 100644
75614index 0000000..323ecf2
75615--- /dev/null
75616+++ b/include/linux/gralloc.h
75617@@ -0,0 +1,9 @@
75618+#ifndef __GRALLOC_H
75619+#define __GRALLOC_H
75620+
75621+void acl_free_all(void);
75622+int acl_alloc_stack_init(unsigned long size);
75623+void *acl_alloc(unsigned long len);
75624+void *acl_alloc_num(unsigned long num, unsigned long len);
75625+
75626+#endif
75627diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
75628new file mode 100644
75629index 0000000..be66033
75630--- /dev/null
75631+++ b/include/linux/grdefs.h
75632@@ -0,0 +1,140 @@
75633+#ifndef GRDEFS_H
75634+#define GRDEFS_H
75635+
75636+/* Begin grsecurity status declarations */
75637+
75638+enum {
75639+ GR_READY = 0x01,
75640+ GR_STATUS_INIT = 0x00 // disabled state
75641+};
75642+
75643+/* Begin ACL declarations */
75644+
75645+/* Role flags */
75646+
75647+enum {
75648+ GR_ROLE_USER = 0x0001,
75649+ GR_ROLE_GROUP = 0x0002,
75650+ GR_ROLE_DEFAULT = 0x0004,
75651+ GR_ROLE_SPECIAL = 0x0008,
75652+ GR_ROLE_AUTH = 0x0010,
75653+ GR_ROLE_NOPW = 0x0020,
75654+ GR_ROLE_GOD = 0x0040,
75655+ GR_ROLE_LEARN = 0x0080,
75656+ GR_ROLE_TPE = 0x0100,
75657+ GR_ROLE_DOMAIN = 0x0200,
75658+ GR_ROLE_PAM = 0x0400,
75659+ GR_ROLE_PERSIST = 0x0800
75660+};
75661+
75662+/* ACL Subject and Object mode flags */
75663+enum {
75664+ GR_DELETED = 0x80000000
75665+};
75666+
75667+/* ACL Object-only mode flags */
75668+enum {
75669+ GR_READ = 0x00000001,
75670+ GR_APPEND = 0x00000002,
75671+ GR_WRITE = 0x00000004,
75672+ GR_EXEC = 0x00000008,
75673+ GR_FIND = 0x00000010,
75674+ GR_INHERIT = 0x00000020,
75675+ GR_SETID = 0x00000040,
75676+ GR_CREATE = 0x00000080,
75677+ GR_DELETE = 0x00000100,
75678+ GR_LINK = 0x00000200,
75679+ GR_AUDIT_READ = 0x00000400,
75680+ GR_AUDIT_APPEND = 0x00000800,
75681+ GR_AUDIT_WRITE = 0x00001000,
75682+ GR_AUDIT_EXEC = 0x00002000,
75683+ GR_AUDIT_FIND = 0x00004000,
75684+ GR_AUDIT_INHERIT= 0x00008000,
75685+ GR_AUDIT_SETID = 0x00010000,
75686+ GR_AUDIT_CREATE = 0x00020000,
75687+ GR_AUDIT_DELETE = 0x00040000,
75688+ GR_AUDIT_LINK = 0x00080000,
75689+ GR_PTRACERD = 0x00100000,
75690+ GR_NOPTRACE = 0x00200000,
75691+ GR_SUPPRESS = 0x00400000,
75692+ GR_NOLEARN = 0x00800000,
75693+ GR_INIT_TRANSFER= 0x01000000
75694+};
75695+
75696+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
75697+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
75698+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
75699+
75700+/* ACL subject-only mode flags */
75701+enum {
75702+ GR_KILL = 0x00000001,
75703+ GR_VIEW = 0x00000002,
75704+ GR_PROTECTED = 0x00000004,
75705+ GR_LEARN = 0x00000008,
75706+ GR_OVERRIDE = 0x00000010,
75707+ /* just a placeholder, this mode is only used in userspace */
75708+ GR_DUMMY = 0x00000020,
75709+ GR_PROTSHM = 0x00000040,
75710+ GR_KILLPROC = 0x00000080,
75711+ GR_KILLIPPROC = 0x00000100,
75712+ /* just a placeholder, this mode is only used in userspace */
75713+ GR_NOTROJAN = 0x00000200,
75714+ GR_PROTPROCFD = 0x00000400,
75715+ GR_PROCACCT = 0x00000800,
75716+ GR_RELAXPTRACE = 0x00001000,
75717+ //GR_NESTED = 0x00002000,
75718+ GR_INHERITLEARN = 0x00004000,
75719+ GR_PROCFIND = 0x00008000,
75720+ GR_POVERRIDE = 0x00010000,
75721+ GR_KERNELAUTH = 0x00020000,
75722+ GR_ATSECURE = 0x00040000,
75723+ GR_SHMEXEC = 0x00080000
75724+};
75725+
75726+enum {
75727+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
75728+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
75729+ GR_PAX_ENABLE_MPROTECT = 0x0004,
75730+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
75731+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
75732+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
75733+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
75734+ GR_PAX_DISABLE_MPROTECT = 0x0400,
75735+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
75736+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
75737+};
75738+
75739+enum {
75740+ GR_ID_USER = 0x01,
75741+ GR_ID_GROUP = 0x02,
75742+};
75743+
75744+enum {
75745+ GR_ID_ALLOW = 0x01,
75746+ GR_ID_DENY = 0x02,
75747+};
75748+
75749+#define GR_CRASH_RES 31
75750+#define GR_UIDTABLE_MAX 500
75751+
75752+/* begin resource learning section */
75753+enum {
75754+ GR_RLIM_CPU_BUMP = 60,
75755+ GR_RLIM_FSIZE_BUMP = 50000,
75756+ GR_RLIM_DATA_BUMP = 10000,
75757+ GR_RLIM_STACK_BUMP = 1000,
75758+ GR_RLIM_CORE_BUMP = 10000,
75759+ GR_RLIM_RSS_BUMP = 500000,
75760+ GR_RLIM_NPROC_BUMP = 1,
75761+ GR_RLIM_NOFILE_BUMP = 5,
75762+ GR_RLIM_MEMLOCK_BUMP = 50000,
75763+ GR_RLIM_AS_BUMP = 500000,
75764+ GR_RLIM_LOCKS_BUMP = 2,
75765+ GR_RLIM_SIGPENDING_BUMP = 5,
75766+ GR_RLIM_MSGQUEUE_BUMP = 10000,
75767+ GR_RLIM_NICE_BUMP = 1,
75768+ GR_RLIM_RTPRIO_BUMP = 1,
75769+ GR_RLIM_RTTIME_BUMP = 1000000
75770+};
75771+
75772+#endif
75773diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
75774new file mode 100644
75775index 0000000..d25522e
75776--- /dev/null
75777+++ b/include/linux/grinternal.h
75778@@ -0,0 +1,229 @@
75779+#ifndef __GRINTERNAL_H
75780+#define __GRINTERNAL_H
75781+
75782+#ifdef CONFIG_GRKERNSEC
75783+
75784+#include <linux/fs.h>
75785+#include <linux/mnt_namespace.h>
75786+#include <linux/nsproxy.h>
75787+#include <linux/gracl.h>
75788+#include <linux/grdefs.h>
75789+#include <linux/grmsg.h>
75790+
75791+void gr_add_learn_entry(const char *fmt, ...)
75792+ __attribute__ ((format (printf, 1, 2)));
75793+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
75794+ const struct vfsmount *mnt);
75795+__u32 gr_check_create(const struct dentry *new_dentry,
75796+ const struct dentry *parent,
75797+ const struct vfsmount *mnt, const __u32 mode);
75798+int gr_check_protected_task(const struct task_struct *task);
75799+__u32 to_gr_audit(const __u32 reqmode);
75800+int gr_set_acls(const int type);
75801+int gr_acl_is_enabled(void);
75802+char gr_roletype_to_char(void);
75803+
75804+void gr_handle_alertkill(struct task_struct *task);
75805+char *gr_to_filename(const struct dentry *dentry,
75806+ const struct vfsmount *mnt);
75807+char *gr_to_filename1(const struct dentry *dentry,
75808+ const struct vfsmount *mnt);
75809+char *gr_to_filename2(const struct dentry *dentry,
75810+ const struct vfsmount *mnt);
75811+char *gr_to_filename3(const struct dentry *dentry,
75812+ const struct vfsmount *mnt);
75813+
75814+extern int grsec_enable_ptrace_readexec;
75815+extern int grsec_enable_harden_ptrace;
75816+extern int grsec_enable_link;
75817+extern int grsec_enable_fifo;
75818+extern int grsec_enable_execve;
75819+extern int grsec_enable_shm;
75820+extern int grsec_enable_execlog;
75821+extern int grsec_enable_signal;
75822+extern int grsec_enable_audit_ptrace;
75823+extern int grsec_enable_forkfail;
75824+extern int grsec_enable_time;
75825+extern int grsec_enable_rofs;
75826+extern int grsec_deny_new_usb;
75827+extern int grsec_enable_chroot_shmat;
75828+extern int grsec_enable_chroot_mount;
75829+extern int grsec_enable_chroot_double;
75830+extern int grsec_enable_chroot_pivot;
75831+extern int grsec_enable_chroot_chdir;
75832+extern int grsec_enable_chroot_chmod;
75833+extern int grsec_enable_chroot_mknod;
75834+extern int grsec_enable_chroot_fchdir;
75835+extern int grsec_enable_chroot_nice;
75836+extern int grsec_enable_chroot_execlog;
75837+extern int grsec_enable_chroot_caps;
75838+extern int grsec_enable_chroot_sysctl;
75839+extern int grsec_enable_chroot_unix;
75840+extern int grsec_enable_symlinkown;
75841+extern kgid_t grsec_symlinkown_gid;
75842+extern int grsec_enable_tpe;
75843+extern kgid_t grsec_tpe_gid;
75844+extern int grsec_enable_tpe_all;
75845+extern int grsec_enable_tpe_invert;
75846+extern int grsec_enable_socket_all;
75847+extern kgid_t grsec_socket_all_gid;
75848+extern int grsec_enable_socket_client;
75849+extern kgid_t grsec_socket_client_gid;
75850+extern int grsec_enable_socket_server;
75851+extern kgid_t grsec_socket_server_gid;
75852+extern kgid_t grsec_audit_gid;
75853+extern int grsec_enable_group;
75854+extern int grsec_enable_log_rwxmaps;
75855+extern int grsec_enable_mount;
75856+extern int grsec_enable_chdir;
75857+extern int grsec_resource_logging;
75858+extern int grsec_enable_blackhole;
75859+extern int grsec_lastack_retries;
75860+extern int grsec_enable_brute;
75861+extern int grsec_enable_harden_ipc;
75862+extern int grsec_lock;
75863+
75864+extern spinlock_t grsec_alert_lock;
75865+extern unsigned long grsec_alert_wtime;
75866+extern unsigned long grsec_alert_fyet;
75867+
75868+extern spinlock_t grsec_audit_lock;
75869+
75870+extern rwlock_t grsec_exec_file_lock;
75871+
75872+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
75873+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
75874+ (tsk)->exec_file->f_path.mnt) : "/")
75875+
75876+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
75877+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
75878+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
75879+
75880+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
75881+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
75882+ (tsk)->exec_file->f_path.mnt) : "/")
75883+
75884+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
75885+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
75886+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
75887+
75888+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
75889+
75890+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
75891+
75892+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
75893+{
75894+ if (file1 && file2) {
75895+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
75896+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
75897+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
75898+ return true;
75899+ }
75900+
75901+ return false;
75902+}
75903+
75904+#define GR_CHROOT_CAPS {{ \
75905+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
75906+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
75907+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
75908+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
75909+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
75910+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
75911+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
75912+
75913+#define security_learn(normal_msg,args...) \
75914+({ \
75915+ read_lock(&grsec_exec_file_lock); \
75916+ gr_add_learn_entry(normal_msg "\n", ## args); \
75917+ read_unlock(&grsec_exec_file_lock); \
75918+})
75919+
75920+enum {
75921+ GR_DO_AUDIT,
75922+ GR_DONT_AUDIT,
75923+ /* used for non-audit messages that we shouldn't kill the task on */
75924+ GR_DONT_AUDIT_GOOD
75925+};
75926+
75927+enum {
75928+ GR_TTYSNIFF,
75929+ GR_RBAC,
75930+ GR_RBAC_STR,
75931+ GR_STR_RBAC,
75932+ GR_RBAC_MODE2,
75933+ GR_RBAC_MODE3,
75934+ GR_FILENAME,
75935+ GR_SYSCTL_HIDDEN,
75936+ GR_NOARGS,
75937+ GR_ONE_INT,
75938+ GR_ONE_INT_TWO_STR,
75939+ GR_ONE_STR,
75940+ GR_STR_INT,
75941+ GR_TWO_STR_INT,
75942+ GR_TWO_INT,
75943+ GR_TWO_U64,
75944+ GR_THREE_INT,
75945+ GR_FIVE_INT_TWO_STR,
75946+ GR_TWO_STR,
75947+ GR_THREE_STR,
75948+ GR_FOUR_STR,
75949+ GR_STR_FILENAME,
75950+ GR_FILENAME_STR,
75951+ GR_FILENAME_TWO_INT,
75952+ GR_FILENAME_TWO_INT_STR,
75953+ GR_TEXTREL,
75954+ GR_PTRACE,
75955+ GR_RESOURCE,
75956+ GR_CAP,
75957+ GR_SIG,
75958+ GR_SIG2,
75959+ GR_CRASH1,
75960+ GR_CRASH2,
75961+ GR_PSACCT,
75962+ GR_RWXMAP,
75963+ GR_RWXMAPVMA
75964+};
75965+
75966+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
75967+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
75968+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
75969+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
75970+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
75971+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
75972+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
75973+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
75974+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
75975+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
75976+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
75977+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
75978+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
75979+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
75980+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
75981+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
75982+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
75983+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
75984+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
75985+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
75986+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
75987+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
75988+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
75989+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
75990+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
75991+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
75992+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
75993+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
75994+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
75995+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
75996+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
75997+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
75998+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
75999+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
76000+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
76001+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
76002+
76003+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
76004+
76005+#endif
76006+
76007+#endif
76008diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
76009new file mode 100644
76010index 0000000..2b07594
76011--- /dev/null
76012+++ b/include/linux/grmsg.h
76013@@ -0,0 +1,115 @@
76014+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
76015+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
76016+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
76017+#define GR_STOPMOD_MSG "denied modification of module state by "
76018+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
76019+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
76020+#define GR_IOPERM_MSG "denied use of ioperm() by "
76021+#define GR_IOPL_MSG "denied use of iopl() by "
76022+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
76023+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
76024+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
76025+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
76026+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
76027+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
76028+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
76029+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
76030+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
76031+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
76032+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
76033+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
76034+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
76035+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
76036+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
76037+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
76038+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
76039+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
76040+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
76041+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
76042+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
76043+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
76044+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
76045+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
76046+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
76047+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
76048+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
76049+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
76050+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
76051+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
76052+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
76053+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
76054+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
76055+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
76056+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
76057+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
76058+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
76059+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
76060+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
76061+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
76062+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
76063+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
76064+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
76065+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
76066+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
76067+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
76068+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
76069+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
76070+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
76071+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
76072+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
76073+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
76074+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
76075+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
76076+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
76077+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
76078+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
76079+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
76080+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
76081+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
76082+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
76083+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
76084+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
76085+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
76086+#define GR_FAILFORK_MSG "failed fork with errno %s by "
76087+#define GR_NICE_CHROOT_MSG "denied priority change by "
76088+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
76089+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
76090+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
76091+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
76092+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
76093+#define GR_TIME_MSG "time set by "
76094+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
76095+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
76096+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
76097+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
76098+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
76099+#define GR_BIND_MSG "denied bind() by "
76100+#define GR_CONNECT_MSG "denied connect() by "
76101+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
76102+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
76103+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
76104+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
76105+#define GR_CAP_ACL_MSG "use of %s denied for "
76106+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
76107+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
76108+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
76109+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
76110+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
76111+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
76112+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
76113+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
76114+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
76115+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
76116+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
76117+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
76118+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
76119+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
76120+#define GR_VM86_MSG "denied use of vm86 by "
76121+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
76122+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
76123+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
76124+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
76125+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
76126+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
76127+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
76128+#define GR_IPC_DENIED_MSG "denied %s of globally-%sable IPC with creator uid %u by "
76129diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
76130new file mode 100644
76131index 0000000..d8b5b48
76132--- /dev/null
76133+++ b/include/linux/grsecurity.h
76134@@ -0,0 +1,245 @@
76135+#ifndef GR_SECURITY_H
76136+#define GR_SECURITY_H
76137+#include <linux/fs.h>
76138+#include <linux/fs_struct.h>
76139+#include <linux/binfmts.h>
76140+#include <linux/gracl.h>
76141+
76142+/* notify of brain-dead configs */
76143+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76144+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
76145+#endif
76146+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
76147+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
76148+#endif
76149+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
76150+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
76151+#endif
76152+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
76153+#error "CONFIG_PAX enabled, but no PaX options are enabled."
76154+#endif
76155+
76156+int gr_handle_new_usb(void);
76157+
76158+void gr_handle_brute_attach(int dumpable);
76159+void gr_handle_brute_check(void);
76160+void gr_handle_kernel_exploit(void);
76161+
76162+char gr_roletype_to_char(void);
76163+
76164+int gr_acl_enable_at_secure(void);
76165+
76166+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
76167+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
76168+
76169+void gr_del_task_from_ip_table(struct task_struct *p);
76170+
76171+int gr_pid_is_chrooted(struct task_struct *p);
76172+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
76173+int gr_handle_chroot_nice(void);
76174+int gr_handle_chroot_sysctl(const int op);
76175+int gr_handle_chroot_setpriority(struct task_struct *p,
76176+ const int niceval);
76177+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
76178+int gr_handle_chroot_chroot(const struct dentry *dentry,
76179+ const struct vfsmount *mnt);
76180+void gr_handle_chroot_chdir(const struct path *path);
76181+int gr_handle_chroot_chmod(const struct dentry *dentry,
76182+ const struct vfsmount *mnt, const int mode);
76183+int gr_handle_chroot_mknod(const struct dentry *dentry,
76184+ const struct vfsmount *mnt, const int mode);
76185+int gr_handle_chroot_mount(const struct dentry *dentry,
76186+ const struct vfsmount *mnt,
76187+ const char *dev_name);
76188+int gr_handle_chroot_pivot(void);
76189+int gr_handle_chroot_unix(const pid_t pid);
76190+
76191+int gr_handle_rawio(const struct inode *inode);
76192+
76193+void gr_handle_ioperm(void);
76194+void gr_handle_iopl(void);
76195+
76196+umode_t gr_acl_umask(void);
76197+
76198+int gr_tpe_allow(const struct file *file);
76199+
76200+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
76201+void gr_clear_chroot_entries(struct task_struct *task);
76202+
76203+void gr_log_forkfail(const int retval);
76204+void gr_log_timechange(void);
76205+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
76206+void gr_log_chdir(const struct dentry *dentry,
76207+ const struct vfsmount *mnt);
76208+void gr_log_chroot_exec(const struct dentry *dentry,
76209+ const struct vfsmount *mnt);
76210+void gr_log_remount(const char *devname, const int retval);
76211+void gr_log_unmount(const char *devname, const int retval);
76212+void gr_log_mount(const char *from, const char *to, const int retval);
76213+void gr_log_textrel(struct vm_area_struct *vma);
76214+void gr_log_ptgnustack(struct file *file);
76215+void gr_log_rwxmmap(struct file *file);
76216+void gr_log_rwxmprotect(struct vm_area_struct *vma);
76217+
76218+int gr_handle_follow_link(const struct inode *parent,
76219+ const struct inode *inode,
76220+ const struct dentry *dentry,
76221+ const struct vfsmount *mnt);
76222+int gr_handle_fifo(const struct dentry *dentry,
76223+ const struct vfsmount *mnt,
76224+ const struct dentry *dir, const int flag,
76225+ const int acc_mode);
76226+int gr_handle_hardlink(const struct dentry *dentry,
76227+ const struct vfsmount *mnt,
76228+ struct inode *inode,
76229+ const int mode, const struct filename *to);
76230+
76231+int gr_is_capable(const int cap);
76232+int gr_is_capable_nolog(const int cap);
76233+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76234+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
76235+
76236+void gr_copy_label(struct task_struct *tsk);
76237+void gr_handle_crash(struct task_struct *task, const int sig);
76238+int gr_handle_signal(const struct task_struct *p, const int sig);
76239+int gr_check_crash_uid(const kuid_t uid);
76240+int gr_check_protected_task(const struct task_struct *task);
76241+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
76242+int gr_acl_handle_mmap(const struct file *file,
76243+ const unsigned long prot);
76244+int gr_acl_handle_mprotect(const struct file *file,
76245+ const unsigned long prot);
76246+int gr_check_hidden_task(const struct task_struct *tsk);
76247+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
76248+ const struct vfsmount *mnt);
76249+__u32 gr_acl_handle_utime(const struct dentry *dentry,
76250+ const struct vfsmount *mnt);
76251+__u32 gr_acl_handle_access(const struct dentry *dentry,
76252+ const struct vfsmount *mnt, const int fmode);
76253+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
76254+ const struct vfsmount *mnt, umode_t *mode);
76255+__u32 gr_acl_handle_chown(const struct dentry *dentry,
76256+ const struct vfsmount *mnt);
76257+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
76258+ const struct vfsmount *mnt);
76259+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
76260+ const struct vfsmount *mnt);
76261+int gr_handle_ptrace(struct task_struct *task, const long request);
76262+int gr_handle_proc_ptrace(struct task_struct *task);
76263+__u32 gr_acl_handle_execve(const struct dentry *dentry,
76264+ const struct vfsmount *mnt);
76265+int gr_check_crash_exec(const struct file *filp);
76266+int gr_acl_is_enabled(void);
76267+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
76268+ const kgid_t gid);
76269+int gr_set_proc_label(const struct dentry *dentry,
76270+ const struct vfsmount *mnt,
76271+ const int unsafe_flags);
76272+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
76273+ const struct vfsmount *mnt);
76274+__u32 gr_acl_handle_open(const struct dentry *dentry,
76275+ const struct vfsmount *mnt, int acc_mode);
76276+__u32 gr_acl_handle_creat(const struct dentry *dentry,
76277+ const struct dentry *p_dentry,
76278+ const struct vfsmount *p_mnt,
76279+ int open_flags, int acc_mode, const int imode);
76280+void gr_handle_create(const struct dentry *dentry,
76281+ const struct vfsmount *mnt);
76282+void gr_handle_proc_create(const struct dentry *dentry,
76283+ const struct inode *inode);
76284+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
76285+ const struct dentry *parent_dentry,
76286+ const struct vfsmount *parent_mnt,
76287+ const int mode);
76288+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
76289+ const struct dentry *parent_dentry,
76290+ const struct vfsmount *parent_mnt);
76291+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
76292+ const struct vfsmount *mnt);
76293+void gr_handle_delete(const ino_t ino, const dev_t dev);
76294+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
76295+ const struct vfsmount *mnt);
76296+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
76297+ const struct dentry *parent_dentry,
76298+ const struct vfsmount *parent_mnt,
76299+ const struct filename *from);
76300+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
76301+ const struct dentry *parent_dentry,
76302+ const struct vfsmount *parent_mnt,
76303+ const struct dentry *old_dentry,
76304+ const struct vfsmount *old_mnt, const struct filename *to);
76305+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
76306+int gr_acl_handle_rename(struct dentry *new_dentry,
76307+ struct dentry *parent_dentry,
76308+ const struct vfsmount *parent_mnt,
76309+ struct dentry *old_dentry,
76310+ struct inode *old_parent_inode,
76311+ struct vfsmount *old_mnt, const struct filename *newname);
76312+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76313+ struct dentry *old_dentry,
76314+ struct dentry *new_dentry,
76315+ struct vfsmount *mnt, const __u8 replace);
76316+__u32 gr_check_link(const struct dentry *new_dentry,
76317+ const struct dentry *parent_dentry,
76318+ const struct vfsmount *parent_mnt,
76319+ const struct dentry *old_dentry,
76320+ const struct vfsmount *old_mnt);
76321+int gr_acl_handle_filldir(const struct file *file, const char *name,
76322+ const unsigned int namelen, const ino_t ino);
76323+
76324+__u32 gr_acl_handle_unix(const struct dentry *dentry,
76325+ const struct vfsmount *mnt);
76326+void gr_acl_handle_exit(void);
76327+void gr_acl_handle_psacct(struct task_struct *task, const long code);
76328+int gr_acl_handle_procpidmem(const struct task_struct *task);
76329+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
76330+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
76331+void gr_audit_ptrace(struct task_struct *task);
76332+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
76333+void gr_put_exec_file(struct task_struct *task);
76334+
76335+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
76336+
76337+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
76338+extern void gr_learn_resource(const struct task_struct *task, const int res,
76339+ const unsigned long wanted, const int gt);
76340+#else
76341+static inline void gr_learn_resource(const struct task_struct *task, const int res,
76342+ const unsigned long wanted, const int gt)
76343+{
76344+}
76345+#endif
76346+
76347+#ifdef CONFIG_GRKERNSEC_RESLOG
76348+extern void gr_log_resource(const struct task_struct *task, const int res,
76349+ const unsigned long wanted, const int gt);
76350+#else
76351+static inline void gr_log_resource(const struct task_struct *task, const int res,
76352+ const unsigned long wanted, const int gt)
76353+{
76354+}
76355+#endif
76356+
76357+#ifdef CONFIG_GRKERNSEC
76358+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
76359+void gr_handle_vm86(void);
76360+void gr_handle_mem_readwrite(u64 from, u64 to);
76361+
76362+void gr_log_badprocpid(const char *entry);
76363+
76364+extern int grsec_enable_dmesg;
76365+extern int grsec_disable_privio;
76366+
76367+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
76368+extern kgid_t grsec_proc_gid;
76369+#endif
76370+
76371+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76372+extern int grsec_enable_chroot_findtask;
76373+#endif
76374+#ifdef CONFIG_GRKERNSEC_SETXID
76375+extern int grsec_enable_setxid;
76376+#endif
76377+#endif
76378+
76379+#endif
76380diff --git a/include/linux/grsock.h b/include/linux/grsock.h
76381new file mode 100644
76382index 0000000..e7ffaaf
76383--- /dev/null
76384+++ b/include/linux/grsock.h
76385@@ -0,0 +1,19 @@
76386+#ifndef __GRSOCK_H
76387+#define __GRSOCK_H
76388+
76389+extern void gr_attach_curr_ip(const struct sock *sk);
76390+extern int gr_handle_sock_all(const int family, const int type,
76391+ const int protocol);
76392+extern int gr_handle_sock_server(const struct sockaddr *sck);
76393+extern int gr_handle_sock_server_other(const struct sock *sck);
76394+extern int gr_handle_sock_client(const struct sockaddr *sck);
76395+extern int gr_search_connect(struct socket * sock,
76396+ struct sockaddr_in * addr);
76397+extern int gr_search_bind(struct socket * sock,
76398+ struct sockaddr_in * addr);
76399+extern int gr_search_listen(struct socket * sock);
76400+extern int gr_search_accept(struct socket * sock);
76401+extern int gr_search_socket(const int domain, const int type,
76402+ const int protocol);
76403+
76404+#endif
76405diff --git a/include/linux/highmem.h b/include/linux/highmem.h
76406index 7fb31da..08b5114 100644
76407--- a/include/linux/highmem.h
76408+++ b/include/linux/highmem.h
76409@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
76410 kunmap_atomic(kaddr);
76411 }
76412
76413+static inline void sanitize_highpage(struct page *page)
76414+{
76415+ void *kaddr;
76416+ unsigned long flags;
76417+
76418+ local_irq_save(flags);
76419+ kaddr = kmap_atomic(page);
76420+ clear_page(kaddr);
76421+ kunmap_atomic(kaddr);
76422+ local_irq_restore(flags);
76423+}
76424+
76425 static inline void zero_user_segments(struct page *page,
76426 unsigned start1, unsigned end1,
76427 unsigned start2, unsigned end2)
76428diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
76429index 1c7b89a..7dda400 100644
76430--- a/include/linux/hwmon-sysfs.h
76431+++ b/include/linux/hwmon-sysfs.h
76432@@ -25,7 +25,8 @@
76433 struct sensor_device_attribute{
76434 struct device_attribute dev_attr;
76435 int index;
76436-};
76437+} __do_const;
76438+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
76439 #define to_sensor_dev_attr(_dev_attr) \
76440 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
76441
76442@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
76443 struct device_attribute dev_attr;
76444 u8 index;
76445 u8 nr;
76446-};
76447+} __do_const;
76448+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
76449 #define to_sensor_dev_attr_2(_dev_attr) \
76450 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
76451
76452diff --git a/include/linux/i2c.h b/include/linux/i2c.h
76453index 2ab11dc..663a3f2 100644
76454--- a/include/linux/i2c.h
76455+++ b/include/linux/i2c.h
76456@@ -366,6 +366,7 @@ struct i2c_algorithm {
76457 /* To determine what the adapter supports */
76458 u32 (*functionality) (struct i2c_adapter *);
76459 };
76460+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
76461
76462 /**
76463 * struct i2c_bus_recovery_info - I2C bus recovery information
76464diff --git a/include/linux/i2o.h b/include/linux/i2o.h
76465index d23c3c2..eb63c81 100644
76466--- a/include/linux/i2o.h
76467+++ b/include/linux/i2o.h
76468@@ -565,7 +565,7 @@ struct i2o_controller {
76469 struct i2o_device *exec; /* Executive */
76470 #if BITS_PER_LONG == 64
76471 spinlock_t context_list_lock; /* lock for context_list */
76472- atomic_t context_list_counter; /* needed for unique contexts */
76473+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
76474 struct list_head context_list; /* list of context id's
76475 and pointers */
76476 #endif
76477diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
76478index aff7ad8..3942bbd 100644
76479--- a/include/linux/if_pppox.h
76480+++ b/include/linux/if_pppox.h
76481@@ -76,7 +76,7 @@ struct pppox_proto {
76482 int (*ioctl)(struct socket *sock, unsigned int cmd,
76483 unsigned long arg);
76484 struct module *owner;
76485-};
76486+} __do_const;
76487
76488 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
76489 extern void unregister_pppox_proto(int proto_num);
76490diff --git a/include/linux/init.h b/include/linux/init.h
76491index f1c27a71..7d6010e 100644
76492--- a/include/linux/init.h
76493+++ b/include/linux/init.h
76494@@ -39,9 +39,17 @@
76495 * Also note, that this data cannot be "const".
76496 */
76497
76498+#define add_init_latent_entropy __latent_entropy
76499+
76500+#ifdef CONFIG_MEMORY_HOTPLUG
76501+#define add_meminit_latent_entropy
76502+#else
76503+#define add_meminit_latent_entropy __latent_entropy
76504+#endif
76505+
76506 /* These are for everybody (although not all archs will actually
76507 discard it in modules) */
76508-#define __init __section(.init.text) __cold notrace
76509+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
76510 #define __initdata __section(.init.data)
76511 #define __initconst __constsection(.init.rodata)
76512 #define __exitdata __section(.exit.data)
76513@@ -102,7 +110,7 @@
76514 #define __cpuexitconst
76515
76516 /* Used for MEMORY_HOTPLUG */
76517-#define __meminit __section(.meminit.text) __cold notrace
76518+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
76519 #define __meminitdata __section(.meminit.data)
76520 #define __meminitconst __constsection(.meminit.rodata)
76521 #define __memexit __section(.memexit.text) __exitused __cold notrace
76522diff --git a/include/linux/init_task.h b/include/linux/init_task.h
76523index 5cd0f09..c9f67cc 100644
76524--- a/include/linux/init_task.h
76525+++ b/include/linux/init_task.h
76526@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
76527
76528 #define INIT_TASK_COMM "swapper"
76529
76530+#ifdef CONFIG_X86
76531+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
76532+#else
76533+#define INIT_TASK_THREAD_INFO
76534+#endif
76535+
76536 /*
76537 * INIT_TASK is used to set up the first task table, touch at
76538 * your own risk!. Base=0, limit=0x1fffff (=2MB)
76539@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
76540 RCU_POINTER_INITIALIZER(cred, &init_cred), \
76541 .comm = INIT_TASK_COMM, \
76542 .thread = INIT_THREAD, \
76543+ INIT_TASK_THREAD_INFO \
76544 .fs = &init_fs, \
76545 .files = &init_files, \
76546 .signal = &init_signals, \
76547diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
76548index 5e865b5..71bd258 100644
76549--- a/include/linux/interrupt.h
76550+++ b/include/linux/interrupt.h
76551@@ -361,7 +361,7 @@ enum
76552 /* map softirq index to softirq name. update 'softirq_to_name' in
76553 * kernel/softirq.c when adding a new softirq.
76554 */
76555-extern char *softirq_to_name[NR_SOFTIRQS];
76556+extern const char * const softirq_to_name[NR_SOFTIRQS];
76557
76558 /* softirq mask and active fields moved to irq_cpustat_t in
76559 * asm/hardirq.h to get better cache usage. KAO
76560@@ -369,12 +369,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
76561
76562 struct softirq_action
76563 {
76564- void (*action)(struct softirq_action *);
76565-};
76566+ void (*action)(void);
76567+} __no_const;
76568
76569 asmlinkage void do_softirq(void);
76570 asmlinkage void __do_softirq(void);
76571-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
76572+extern void open_softirq(int nr, void (*action)(void));
76573 extern void softirq_init(void);
76574 extern void __raise_softirq_irqoff(unsigned int nr);
76575
76576diff --git a/include/linux/iommu.h b/include/linux/iommu.h
76577index 7ea319e..f9e971d 100644
76578--- a/include/linux/iommu.h
76579+++ b/include/linux/iommu.h
76580@@ -129,7 +129,7 @@ struct iommu_ops {
76581 u32 (*domain_get_windows)(struct iommu_domain *domain);
76582
76583 unsigned long pgsize_bitmap;
76584-};
76585+} __do_const;
76586
76587 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
76588 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
76589diff --git a/include/linux/ioport.h b/include/linux/ioport.h
76590index 89b7c24..382af74 100644
76591--- a/include/linux/ioport.h
76592+++ b/include/linux/ioport.h
76593@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
76594 int adjust_resource(struct resource *res, resource_size_t start,
76595 resource_size_t size);
76596 resource_size_t resource_alignment(struct resource *res);
76597-static inline resource_size_t resource_size(const struct resource *res)
76598+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
76599 {
76600 return res->end - res->start + 1;
76601 }
76602diff --git a/include/linux/irq.h b/include/linux/irq.h
76603index 56bb0dc..8ae94d62 100644
76604--- a/include/linux/irq.h
76605+++ b/include/linux/irq.h
76606@@ -333,7 +333,8 @@ struct irq_chip {
76607 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
76608
76609 unsigned long flags;
76610-};
76611+} __do_const;
76612+typedef struct irq_chip __no_const irq_chip_no_const;
76613
76614 /*
76615 * irq_chip specific flags
76616diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
76617index 0e5d9ec..46acb3a 100644
76618--- a/include/linux/irqchip/arm-gic.h
76619+++ b/include/linux/irqchip/arm-gic.h
76620@@ -59,9 +59,11 @@
76621
76622 #ifndef __ASSEMBLY__
76623
76624+#include <linux/irq.h>
76625+
76626 struct device_node;
76627
76628-extern struct irq_chip gic_arch_extn;
76629+extern irq_chip_no_const gic_arch_extn;
76630
76631 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
76632 u32 offset, struct device_node *);
76633diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
76634index d235e88..8ccbe74 100644
76635--- a/include/linux/jiffies.h
76636+++ b/include/linux/jiffies.h
76637@@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
76638 /*
76639 * Convert various time units to each other:
76640 */
76641-extern unsigned int jiffies_to_msecs(const unsigned long j);
76642-extern unsigned int jiffies_to_usecs(const unsigned long j);
76643-extern unsigned long msecs_to_jiffies(const unsigned int m);
76644-extern unsigned long usecs_to_jiffies(const unsigned int u);
76645-extern unsigned long timespec_to_jiffies(const struct timespec *value);
76646+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
76647+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
76648+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
76649+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
76650+extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
76651 extern void jiffies_to_timespec(const unsigned long jiffies,
76652 struct timespec *value);
76653-extern unsigned long timeval_to_jiffies(const struct timeval *value);
76654+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
76655 extern void jiffies_to_timeval(const unsigned long jiffies,
76656 struct timeval *value);
76657
76658diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
76659index 6883e19..e854fcb 100644
76660--- a/include/linux/kallsyms.h
76661+++ b/include/linux/kallsyms.h
76662@@ -15,7 +15,8 @@
76663
76664 struct module;
76665
76666-#ifdef CONFIG_KALLSYMS
76667+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
76668+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76669 /* Lookup the address for a symbol. Returns 0 if not found. */
76670 unsigned long kallsyms_lookup_name(const char *name);
76671
76672@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
76673 /* Stupid that this does nothing, but I didn't create this mess. */
76674 #define __print_symbol(fmt, addr)
76675 #endif /*CONFIG_KALLSYMS*/
76676+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
76677+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
76678+extern unsigned long kallsyms_lookup_name(const char *name);
76679+extern void __print_symbol(const char *fmt, unsigned long address);
76680+extern int sprint_backtrace(char *buffer, unsigned long address);
76681+extern int sprint_symbol(char *buffer, unsigned long address);
76682+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
76683+const char *kallsyms_lookup(unsigned long addr,
76684+ unsigned long *symbolsize,
76685+ unsigned long *offset,
76686+ char **modname, char *namebuf);
76687+extern int kallsyms_lookup_size_offset(unsigned long addr,
76688+ unsigned long *symbolsize,
76689+ unsigned long *offset);
76690+#endif
76691
76692 /* This macro allows us to keep printk typechecking */
76693 static __printf(1, 2)
76694diff --git a/include/linux/key-type.h b/include/linux/key-type.h
76695index 518a53a..5e28358 100644
76696--- a/include/linux/key-type.h
76697+++ b/include/linux/key-type.h
76698@@ -125,7 +125,7 @@ struct key_type {
76699 /* internal fields */
76700 struct list_head link; /* link in types list */
76701 struct lock_class_key lock_class; /* key->sem lock class */
76702-};
76703+} __do_const;
76704
76705 extern struct key_type key_type_keyring;
76706
76707diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
76708index c6e091b..a940adf 100644
76709--- a/include/linux/kgdb.h
76710+++ b/include/linux/kgdb.h
76711@@ -52,7 +52,7 @@ extern int kgdb_connected;
76712 extern int kgdb_io_module_registered;
76713
76714 extern atomic_t kgdb_setting_breakpoint;
76715-extern atomic_t kgdb_cpu_doing_single_step;
76716+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
76717
76718 extern struct task_struct *kgdb_usethread;
76719 extern struct task_struct *kgdb_contthread;
76720@@ -254,7 +254,7 @@ struct kgdb_arch {
76721 void (*correct_hw_break)(void);
76722
76723 void (*enable_nmi)(bool on);
76724-};
76725+} __do_const;
76726
76727 /**
76728 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
76729@@ -279,7 +279,7 @@ struct kgdb_io {
76730 void (*pre_exception) (void);
76731 void (*post_exception) (void);
76732 int is_console;
76733-};
76734+} __do_const;
76735
76736 extern struct kgdb_arch arch_kgdb_ops;
76737
76738diff --git a/include/linux/kmod.h b/include/linux/kmod.h
76739index 0555cc6..40116ce 100644
76740--- a/include/linux/kmod.h
76741+++ b/include/linux/kmod.h
76742@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
76743 * usually useless though. */
76744 extern __printf(2, 3)
76745 int __request_module(bool wait, const char *name, ...);
76746+extern __printf(3, 4)
76747+int ___request_module(bool wait, char *param_name, const char *name, ...);
76748 #define request_module(mod...) __request_module(true, mod)
76749 #define request_module_nowait(mod...) __request_module(false, mod)
76750 #define try_then_request_module(x, mod...) \
76751@@ -57,6 +59,9 @@ struct subprocess_info {
76752 struct work_struct work;
76753 struct completion *complete;
76754 char *path;
76755+#ifdef CONFIG_GRKERNSEC
76756+ char *origpath;
76757+#endif
76758 char **argv;
76759 char **envp;
76760 int wait;
76761diff --git a/include/linux/kobject.h b/include/linux/kobject.h
76762index de6dcbcc..4735f88 100644
76763--- a/include/linux/kobject.h
76764+++ b/include/linux/kobject.h
76765@@ -115,7 +115,7 @@ struct kobj_type {
76766 struct attribute **default_attrs;
76767 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
76768 const void *(*namespace)(struct kobject *kobj);
76769-};
76770+} __do_const;
76771
76772 struct kobj_uevent_env {
76773 char *envp[UEVENT_NUM_ENVP];
76774@@ -138,6 +138,7 @@ struct kobj_attribute {
76775 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
76776 const char *buf, size_t count);
76777 };
76778+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
76779
76780 extern const struct sysfs_ops kobj_sysfs_ops;
76781
76782diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
76783index df32d25..fb52e27 100644
76784--- a/include/linux/kobject_ns.h
76785+++ b/include/linux/kobject_ns.h
76786@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
76787 const void *(*netlink_ns)(struct sock *sk);
76788 const void *(*initial_ns)(void);
76789 void (*drop_ns)(void *);
76790-};
76791+} __do_const;
76792
76793 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
76794 int kobj_ns_type_registered(enum kobj_ns_type type);
76795diff --git a/include/linux/kref.h b/include/linux/kref.h
76796index 484604d..0f6c5b6 100644
76797--- a/include/linux/kref.h
76798+++ b/include/linux/kref.h
76799@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
76800 static inline int kref_sub(struct kref *kref, unsigned int count,
76801 void (*release)(struct kref *kref))
76802 {
76803- WARN_ON(release == NULL);
76804+ BUG_ON(release == NULL);
76805
76806 if (atomic_sub_and_test((int) count, &kref->refcount)) {
76807 release(kref);
76808diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
76809index 0fbbc7a..db081e3 100644
76810--- a/include/linux/kvm_host.h
76811+++ b/include/linux/kvm_host.h
76812@@ -458,7 +458,7 @@ static inline void kvm_irqfd_exit(void)
76813 {
76814 }
76815 #endif
76816-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
76817+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
76818 struct module *module);
76819 void kvm_exit(void);
76820
76821@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
76822 struct kvm_guest_debug *dbg);
76823 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
76824
76825-int kvm_arch_init(void *opaque);
76826+int kvm_arch_init(const void *opaque);
76827 void kvm_arch_exit(void);
76828
76829 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
76830diff --git a/include/linux/libata.h b/include/linux/libata.h
76831index 0e23c26..6ad8c33 100644
76832--- a/include/linux/libata.h
76833+++ b/include/linux/libata.h
76834@@ -972,7 +972,7 @@ struct ata_port_operations {
76835 * fields must be pointers.
76836 */
76837 const struct ata_port_operations *inherits;
76838-};
76839+} __do_const;
76840
76841 struct ata_port_info {
76842 unsigned long flags;
76843diff --git a/include/linux/linkage.h b/include/linux/linkage.h
76844index d3e8ad2..a949f68 100644
76845--- a/include/linux/linkage.h
76846+++ b/include/linux/linkage.h
76847@@ -31,6 +31,7 @@
76848 #endif
76849
76850 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
76851+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
76852 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
76853
76854 /*
76855diff --git a/include/linux/list.h b/include/linux/list.h
76856index f4d8a2f..38e6e46 100644
76857--- a/include/linux/list.h
76858+++ b/include/linux/list.h
76859@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
76860 extern void list_del(struct list_head *entry);
76861 #endif
76862
76863+extern void __pax_list_add(struct list_head *new,
76864+ struct list_head *prev,
76865+ struct list_head *next);
76866+static inline void pax_list_add(struct list_head *new, struct list_head *head)
76867+{
76868+ __pax_list_add(new, head, head->next);
76869+}
76870+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
76871+{
76872+ __pax_list_add(new, head->prev, head);
76873+}
76874+extern void pax_list_del(struct list_head *entry);
76875+
76876 /**
76877 * list_replace - replace old entry by new one
76878 * @old : the element to be replaced
76879@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
76880 INIT_LIST_HEAD(entry);
76881 }
76882
76883+extern void pax_list_del_init(struct list_head *entry);
76884+
76885 /**
76886 * list_move - delete from one list and add as another's head
76887 * @list: the entry to move
76888diff --git a/include/linux/math64.h b/include/linux/math64.h
76889index 69ed5f5..243ed51 100644
76890--- a/include/linux/math64.h
76891+++ b/include/linux/math64.h
76892@@ -15,7 +15,7 @@
76893 * This is commonly provided by 32bit archs to provide an optimized 64bit
76894 * divide.
76895 */
76896-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76897+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76898 {
76899 *remainder = dividend % divisor;
76900 return dividend / divisor;
76901@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
76902 /**
76903 * div64_u64 - unsigned 64bit divide with 64bit divisor
76904 */
76905-static inline u64 div64_u64(u64 dividend, u64 divisor)
76906+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
76907 {
76908 return dividend / divisor;
76909 }
76910@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
76911 #define div64_ul(x, y) div_u64((x), (y))
76912
76913 #ifndef div_u64_rem
76914-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76915+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76916 {
76917 *remainder = do_div(dividend, divisor);
76918 return dividend;
76919@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
76920 #endif
76921
76922 #ifndef div64_u64
76923-extern u64 div64_u64(u64 dividend, u64 divisor);
76924+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
76925 #endif
76926
76927 #ifndef div64_s64
76928@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
76929 * divide.
76930 */
76931 #ifndef div_u64
76932-static inline u64 div_u64(u64 dividend, u32 divisor)
76933+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
76934 {
76935 u32 remainder;
76936 return div_u64_rem(dividend, divisor, &remainder);
76937diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
76938index da6716b..2e31db3 100644
76939--- a/include/linux/mempolicy.h
76940+++ b/include/linux/mempolicy.h
76941@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
76942 }
76943
76944 #define vma_policy(vma) ((vma)->vm_policy)
76945+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
76946+{
76947+ vma->vm_policy = pol;
76948+}
76949
76950 static inline void mpol_get(struct mempolicy *pol)
76951 {
76952@@ -240,6 +244,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
76953 }
76954
76955 #define vma_policy(vma) NULL
76956+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
76957+{
76958+}
76959
76960 static inline int
76961 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
76962diff --git a/include/linux/mm.h b/include/linux/mm.h
76963index 8b6e55e..c4edf39 100644
76964--- a/include/linux/mm.h
76965+++ b/include/linux/mm.h
76966@@ -113,6 +113,11 @@ extern unsigned int kobjsize(const void *objp);
76967 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
76968 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
76969 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
76970+
76971+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76972+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
76973+#endif
76974+
76975 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
76976
76977 #ifdef CONFIG_MEM_SOFT_DIRTY
76978@@ -215,8 +220,8 @@ struct vm_operations_struct {
76979 /* called by access_process_vm when get_user_pages() fails, typically
76980 * for use by special VMAs that can switch between memory and hardware
76981 */
76982- int (*access)(struct vm_area_struct *vma, unsigned long addr,
76983- void *buf, int len, int write);
76984+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
76985+ void *buf, size_t len, int write);
76986 #ifdef CONFIG_NUMA
76987 /*
76988 * set_policy() op must add a reference to any non-NULL @new mempolicy
76989@@ -246,6 +251,7 @@ struct vm_operations_struct {
76990 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
76991 unsigned long size, pgoff_t pgoff);
76992 };
76993+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
76994
76995 struct mmu_gather;
76996 struct inode;
76997@@ -977,8 +983,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
76998 unsigned long *pfn);
76999 int follow_phys(struct vm_area_struct *vma, unsigned long address,
77000 unsigned int flags, unsigned long *prot, resource_size_t *phys);
77001-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
77002- void *buf, int len, int write);
77003+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
77004+ void *buf, size_t len, int write);
77005
77006 static inline void unmap_shared_mapping_range(struct address_space *mapping,
77007 loff_t const holebegin, loff_t const holelen)
77008@@ -1017,9 +1023,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
77009 }
77010 #endif
77011
77012-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
77013-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
77014- void *buf, int len, int write);
77015+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
77016+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
77017+ void *buf, size_t len, int write);
77018
77019 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77020 unsigned long start, unsigned long nr_pages,
77021@@ -1051,34 +1057,6 @@ int set_page_dirty(struct page *page);
77022 int set_page_dirty_lock(struct page *page);
77023 int clear_page_dirty_for_io(struct page *page);
77024
77025-/* Is the vma a continuation of the stack vma above it? */
77026-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
77027-{
77028- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
77029-}
77030-
77031-static inline int stack_guard_page_start(struct vm_area_struct *vma,
77032- unsigned long addr)
77033-{
77034- return (vma->vm_flags & VM_GROWSDOWN) &&
77035- (vma->vm_start == addr) &&
77036- !vma_growsdown(vma->vm_prev, addr);
77037-}
77038-
77039-/* Is the vma a continuation of the stack vma below it? */
77040-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
77041-{
77042- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
77043-}
77044-
77045-static inline int stack_guard_page_end(struct vm_area_struct *vma,
77046- unsigned long addr)
77047-{
77048- return (vma->vm_flags & VM_GROWSUP) &&
77049- (vma->vm_end == addr) &&
77050- !vma_growsup(vma->vm_next, addr);
77051-}
77052-
77053 extern pid_t
77054 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
77055
77056@@ -1178,6 +1156,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
77057 }
77058 #endif
77059
77060+#ifdef CONFIG_MMU
77061+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
77062+#else
77063+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
77064+{
77065+ return __pgprot(0);
77066+}
77067+#endif
77068+
77069 int vma_wants_writenotify(struct vm_area_struct *vma);
77070
77071 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
77072@@ -1196,8 +1183,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
77073 {
77074 return 0;
77075 }
77076+
77077+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
77078+ unsigned long address)
77079+{
77080+ return 0;
77081+}
77082 #else
77083 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
77084+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
77085 #endif
77086
77087 #ifdef __PAGETABLE_PMD_FOLDED
77088@@ -1206,8 +1200,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
77089 {
77090 return 0;
77091 }
77092+
77093+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
77094+ unsigned long address)
77095+{
77096+ return 0;
77097+}
77098 #else
77099 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
77100+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
77101 #endif
77102
77103 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
77104@@ -1225,11 +1226,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
77105 NULL: pud_offset(pgd, address);
77106 }
77107
77108+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
77109+{
77110+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
77111+ NULL: pud_offset(pgd, address);
77112+}
77113+
77114 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
77115 {
77116 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
77117 NULL: pmd_offset(pud, address);
77118 }
77119+
77120+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
77121+{
77122+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
77123+ NULL: pmd_offset(pud, address);
77124+}
77125 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
77126
77127 #if USE_SPLIT_PTLOCKS
77128@@ -1517,7 +1530,7 @@ extern int install_special_mapping(struct mm_struct *mm,
77129 unsigned long addr, unsigned long len,
77130 unsigned long flags, struct page **pages);
77131
77132-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
77133+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
77134
77135 extern unsigned long mmap_region(struct file *file, unsigned long addr,
77136 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
77137@@ -1525,6 +1538,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77138 unsigned long len, unsigned long prot, unsigned long flags,
77139 unsigned long pgoff, unsigned long *populate);
77140 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
77141+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
77142
77143 #ifdef CONFIG_MMU
77144 extern int __mm_populate(unsigned long addr, unsigned long len,
77145@@ -1553,10 +1567,11 @@ struct vm_unmapped_area_info {
77146 unsigned long high_limit;
77147 unsigned long align_mask;
77148 unsigned long align_offset;
77149+ unsigned long threadstack_offset;
77150 };
77151
77152-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
77153-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
77154+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
77155+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
77156
77157 /*
77158 * Search for an unmapped address range.
77159@@ -1568,7 +1583,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
77160 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
77161 */
77162 static inline unsigned long
77163-vm_unmapped_area(struct vm_unmapped_area_info *info)
77164+vm_unmapped_area(const struct vm_unmapped_area_info *info)
77165 {
77166 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
77167 return unmapped_area(info);
77168@@ -1631,6 +1646,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
77169 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
77170 struct vm_area_struct **pprev);
77171
77172+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
77173+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
77174+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
77175+
77176 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
77177 NULL if none. Assume start_addr < end_addr. */
77178 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
77179@@ -1659,15 +1678,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
77180 return vma;
77181 }
77182
77183-#ifdef CONFIG_MMU
77184-pgprot_t vm_get_page_prot(unsigned long vm_flags);
77185-#else
77186-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
77187-{
77188- return __pgprot(0);
77189-}
77190-#endif
77191-
77192 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
77193 unsigned long change_prot_numa(struct vm_area_struct *vma,
77194 unsigned long start, unsigned long end);
77195@@ -1719,6 +1729,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
77196 static inline void vm_stat_account(struct mm_struct *mm,
77197 unsigned long flags, struct file *file, long pages)
77198 {
77199+
77200+#ifdef CONFIG_PAX_RANDMMAP
77201+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
77202+#endif
77203+
77204 mm->total_vm += pages;
77205 }
77206 #endif /* CONFIG_PROC_FS */
77207@@ -1800,7 +1815,7 @@ extern int unpoison_memory(unsigned long pfn);
77208 extern int sysctl_memory_failure_early_kill;
77209 extern int sysctl_memory_failure_recovery;
77210 extern void shake_page(struct page *p, int access);
77211-extern atomic_long_t num_poisoned_pages;
77212+extern atomic_long_unchecked_t num_poisoned_pages;
77213 extern int soft_offline_page(struct page *page, int flags);
77214
77215 extern void dump_page(struct page *page);
77216@@ -1837,5 +1852,11 @@ void __init setup_nr_node_ids(void);
77217 static inline void setup_nr_node_ids(void) {}
77218 #endif
77219
77220+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77221+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
77222+#else
77223+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
77224+#endif
77225+
77226 #endif /* __KERNEL__ */
77227 #endif /* _LINUX_MM_H */
77228diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
77229index d9851ee..619492d 100644
77230--- a/include/linux/mm_types.h
77231+++ b/include/linux/mm_types.h
77232@@ -289,6 +289,8 @@ struct vm_area_struct {
77233 #ifdef CONFIG_NUMA
77234 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
77235 #endif
77236+
77237+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
77238 };
77239
77240 struct core_thread {
77241@@ -436,6 +438,24 @@ struct mm_struct {
77242 int first_nid;
77243 #endif
77244 struct uprobes_state uprobes_state;
77245+
77246+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
77247+ unsigned long pax_flags;
77248+#endif
77249+
77250+#ifdef CONFIG_PAX_DLRESOLVE
77251+ unsigned long call_dl_resolve;
77252+#endif
77253+
77254+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
77255+ unsigned long call_syscall;
77256+#endif
77257+
77258+#ifdef CONFIG_PAX_ASLR
77259+ unsigned long delta_mmap; /* randomized offset */
77260+ unsigned long delta_stack; /* randomized offset */
77261+#endif
77262+
77263 };
77264
77265 /* first nid will either be a valid NID or one of these values */
77266diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
77267index c5d5278..f0b68c8 100644
77268--- a/include/linux/mmiotrace.h
77269+++ b/include/linux/mmiotrace.h
77270@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
77271 /* Called from ioremap.c */
77272 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
77273 void __iomem *addr);
77274-extern void mmiotrace_iounmap(volatile void __iomem *addr);
77275+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
77276
77277 /* For anyone to insert markers. Remember trailing newline. */
77278 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
77279@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
77280 {
77281 }
77282
77283-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
77284+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
77285 {
77286 }
77287
77288diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
77289index bd791e4..8617c34f 100644
77290--- a/include/linux/mmzone.h
77291+++ b/include/linux/mmzone.h
77292@@ -396,7 +396,7 @@ struct zone {
77293 unsigned long flags; /* zone flags, see below */
77294
77295 /* Zone statistics */
77296- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77297+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77298
77299 /*
77300 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
77301diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
77302index 45e9214..a7227d6 100644
77303--- a/include/linux/mod_devicetable.h
77304+++ b/include/linux/mod_devicetable.h
77305@@ -13,7 +13,7 @@
77306 typedef unsigned long kernel_ulong_t;
77307 #endif
77308
77309-#define PCI_ANY_ID (~0)
77310+#define PCI_ANY_ID ((__u16)~0)
77311
77312 struct pci_device_id {
77313 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
77314@@ -139,7 +139,7 @@ struct usb_device_id {
77315 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
77316 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
77317
77318-#define HID_ANY_ID (~0)
77319+#define HID_ANY_ID (~0U)
77320 #define HID_BUS_ANY 0xffff
77321 #define HID_GROUP_ANY 0x0000
77322
77323@@ -467,7 +467,7 @@ struct dmi_system_id {
77324 const char *ident;
77325 struct dmi_strmatch matches[4];
77326 void *driver_data;
77327-};
77328+} __do_const;
77329 /*
77330 * struct dmi_device_id appears during expansion of
77331 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
77332diff --git a/include/linux/module.h b/include/linux/module.h
77333index 05f2447..2aee07c 100644
77334--- a/include/linux/module.h
77335+++ b/include/linux/module.h
77336@@ -17,9 +17,11 @@
77337 #include <linux/moduleparam.h>
77338 #include <linux/tracepoint.h>
77339 #include <linux/export.h>
77340+#include <linux/fs.h>
77341
77342 #include <linux/percpu.h>
77343 #include <asm/module.h>
77344+#include <asm/pgtable.h>
77345
77346 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
77347 #define MODULE_SIG_STRING "~Module signature appended~\n"
77348@@ -55,12 +57,13 @@ struct module_attribute {
77349 int (*test)(struct module *);
77350 void (*free)(struct module *);
77351 };
77352+typedef struct module_attribute __no_const module_attribute_no_const;
77353
77354 struct module_version_attribute {
77355 struct module_attribute mattr;
77356 const char *module_name;
77357 const char *version;
77358-} __attribute__ ((__aligned__(sizeof(void *))));
77359+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
77360
77361 extern ssize_t __modver_version_show(struct module_attribute *,
77362 struct module_kobject *, char *);
77363@@ -238,7 +241,7 @@ struct module
77364
77365 /* Sysfs stuff. */
77366 struct module_kobject mkobj;
77367- struct module_attribute *modinfo_attrs;
77368+ module_attribute_no_const *modinfo_attrs;
77369 const char *version;
77370 const char *srcversion;
77371 struct kobject *holders_dir;
77372@@ -287,19 +290,16 @@ struct module
77373 int (*init)(void);
77374
77375 /* If this is non-NULL, vfree after init() returns */
77376- void *module_init;
77377+ void *module_init_rx, *module_init_rw;
77378
77379 /* Here is the actual code + data, vfree'd on unload. */
77380- void *module_core;
77381+ void *module_core_rx, *module_core_rw;
77382
77383 /* Here are the sizes of the init and core sections */
77384- unsigned int init_size, core_size;
77385+ unsigned int init_size_rw, core_size_rw;
77386
77387 /* The size of the executable code in each section. */
77388- unsigned int init_text_size, core_text_size;
77389-
77390- /* Size of RO sections of the module (text+rodata) */
77391- unsigned int init_ro_size, core_ro_size;
77392+ unsigned int init_size_rx, core_size_rx;
77393
77394 /* Arch-specific module values */
77395 struct mod_arch_specific arch;
77396@@ -355,6 +355,10 @@ struct module
77397 #ifdef CONFIG_EVENT_TRACING
77398 struct ftrace_event_call **trace_events;
77399 unsigned int num_trace_events;
77400+ struct file_operations trace_id;
77401+ struct file_operations trace_enable;
77402+ struct file_operations trace_format;
77403+ struct file_operations trace_filter;
77404 #endif
77405 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
77406 unsigned int num_ftrace_callsites;
77407@@ -402,16 +406,46 @@ bool is_module_address(unsigned long addr);
77408 bool is_module_percpu_address(unsigned long addr);
77409 bool is_module_text_address(unsigned long addr);
77410
77411+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
77412+{
77413+
77414+#ifdef CONFIG_PAX_KERNEXEC
77415+ if (ktla_ktva(addr) >= (unsigned long)start &&
77416+ ktla_ktva(addr) < (unsigned long)start + size)
77417+ return 1;
77418+#endif
77419+
77420+ return ((void *)addr >= start && (void *)addr < start + size);
77421+}
77422+
77423+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
77424+{
77425+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
77426+}
77427+
77428+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
77429+{
77430+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
77431+}
77432+
77433+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
77434+{
77435+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
77436+}
77437+
77438+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
77439+{
77440+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
77441+}
77442+
77443 static inline int within_module_core(unsigned long addr, const struct module *mod)
77444 {
77445- return (unsigned long)mod->module_core <= addr &&
77446- addr < (unsigned long)mod->module_core + mod->core_size;
77447+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
77448 }
77449
77450 static inline int within_module_init(unsigned long addr, const struct module *mod)
77451 {
77452- return (unsigned long)mod->module_init <= addr &&
77453- addr < (unsigned long)mod->module_init + mod->init_size;
77454+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
77455 }
77456
77457 /* Search for module by name: must hold module_mutex. */
77458diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
77459index 560ca53..ef621ef 100644
77460--- a/include/linux/moduleloader.h
77461+++ b/include/linux/moduleloader.h
77462@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
77463 sections. Returns NULL on failure. */
77464 void *module_alloc(unsigned long size);
77465
77466+#ifdef CONFIG_PAX_KERNEXEC
77467+void *module_alloc_exec(unsigned long size);
77468+#else
77469+#define module_alloc_exec(x) module_alloc(x)
77470+#endif
77471+
77472 /* Free memory returned from module_alloc. */
77473 void module_free(struct module *mod, void *module_region);
77474
77475+#ifdef CONFIG_PAX_KERNEXEC
77476+void module_free_exec(struct module *mod, void *module_region);
77477+#else
77478+#define module_free_exec(x, y) module_free((x), (y))
77479+#endif
77480+
77481 /*
77482 * Apply the given relocation to the (simplified) ELF. Return -error
77483 * or 0.
77484@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
77485 unsigned int relsec,
77486 struct module *me)
77487 {
77488+#ifdef CONFIG_MODULES
77489 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
77490+#endif
77491 return -ENOEXEC;
77492 }
77493 #endif
77494@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
77495 unsigned int relsec,
77496 struct module *me)
77497 {
77498+#ifdef CONFIG_MODULES
77499 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
77500+#endif
77501 return -ENOEXEC;
77502 }
77503 #endif
77504diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
77505index c3eb102..073c4a6 100644
77506--- a/include/linux/moduleparam.h
77507+++ b/include/linux/moduleparam.h
77508@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
77509 * @len is usually just sizeof(string).
77510 */
77511 #define module_param_string(name, string, len, perm) \
77512- static const struct kparam_string __param_string_##name \
77513+ static const struct kparam_string __param_string_##name __used \
77514 = { len, string }; \
77515 __module_param_call(MODULE_PARAM_PREFIX, name, \
77516 &param_ops_string, \
77517@@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
77518 */
77519 #define module_param_array_named(name, array, type, nump, perm) \
77520 param_check_##type(name, &(array)[0]); \
77521- static const struct kparam_array __param_arr_##name \
77522+ static const struct kparam_array __param_arr_##name __used \
77523 = { .max = ARRAY_SIZE(array), .num = nump, \
77524 .ops = &param_ops_##type, \
77525 .elemsize = sizeof(array[0]), .elem = array }; \
77526diff --git a/include/linux/namei.h b/include/linux/namei.h
77527index 8e47bc7..c70fd73 100644
77528--- a/include/linux/namei.h
77529+++ b/include/linux/namei.h
77530@@ -19,7 +19,7 @@ struct nameidata {
77531 unsigned seq;
77532 int last_type;
77533 unsigned depth;
77534- char *saved_names[MAX_NESTED_LINKS + 1];
77535+ const char *saved_names[MAX_NESTED_LINKS + 1];
77536 };
77537
77538 /*
77539@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
77540
77541 extern void nd_jump_link(struct nameidata *nd, struct path *path);
77542
77543-static inline void nd_set_link(struct nameidata *nd, char *path)
77544+static inline void nd_set_link(struct nameidata *nd, const char *path)
77545 {
77546 nd->saved_names[nd->depth] = path;
77547 }
77548
77549-static inline char *nd_get_link(struct nameidata *nd)
77550+static inline const char *nd_get_link(const struct nameidata *nd)
77551 {
77552 return nd->saved_names[nd->depth];
77553 }
77554diff --git a/include/linux/net.h b/include/linux/net.h
77555index 8bd9d92..08b1c20 100644
77556--- a/include/linux/net.h
77557+++ b/include/linux/net.h
77558@@ -191,7 +191,7 @@ struct net_proto_family {
77559 int (*create)(struct net *net, struct socket *sock,
77560 int protocol, int kern);
77561 struct module *owner;
77562-};
77563+} __do_const;
77564
77565 struct iovec;
77566 struct kvec;
77567diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
77568index 25f5d2d1..5cf2120 100644
77569--- a/include/linux/netdevice.h
77570+++ b/include/linux/netdevice.h
77571@@ -1098,6 +1098,7 @@ struct net_device_ops {
77572 sa_family_t sa_family,
77573 __be16 port);
77574 };
77575+typedef struct net_device_ops __no_const net_device_ops_no_const;
77576
77577 /*
77578 * The DEVICE structure.
77579@@ -1169,7 +1170,7 @@ struct net_device {
77580 int iflink;
77581
77582 struct net_device_stats stats;
77583- atomic_long_t rx_dropped; /* dropped packets by core network
77584+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
77585 * Do not use this in drivers.
77586 */
77587
77588diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
77589index 708fe72ab9..77084a3 100644
77590--- a/include/linux/netfilter.h
77591+++ b/include/linux/netfilter.h
77592@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
77593 #endif
77594 /* Use the module struct to lock set/get code in place */
77595 struct module *owner;
77596-};
77597+} __do_const;
77598
77599 /* Function to register/unregister hook points. */
77600 int nf_register_hook(struct nf_hook_ops *reg);
77601diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
77602index cadb740..d7c37c0 100644
77603--- a/include/linux/netfilter/nfnetlink.h
77604+++ b/include/linux/netfilter/nfnetlink.h
77605@@ -16,7 +16,7 @@ struct nfnl_callback {
77606 const struct nlattr * const cda[]);
77607 const struct nla_policy *policy; /* netlink attribute policy */
77608 const u_int16_t attr_count; /* number of nlattr's */
77609-};
77610+} __do_const;
77611
77612 struct nfnetlink_subsystem {
77613 const char *name;
77614diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
77615new file mode 100644
77616index 0000000..33f4af8
77617--- /dev/null
77618+++ b/include/linux/netfilter/xt_gradm.h
77619@@ -0,0 +1,9 @@
77620+#ifndef _LINUX_NETFILTER_XT_GRADM_H
77621+#define _LINUX_NETFILTER_XT_GRADM_H 1
77622+
77623+struct xt_gradm_mtinfo {
77624+ __u16 flags;
77625+ __u16 invflags;
77626+};
77627+
77628+#endif
77629diff --git a/include/linux/nls.h b/include/linux/nls.h
77630index 5dc635f..35f5e11 100644
77631--- a/include/linux/nls.h
77632+++ b/include/linux/nls.h
77633@@ -31,7 +31,7 @@ struct nls_table {
77634 const unsigned char *charset2upper;
77635 struct module *owner;
77636 struct nls_table *next;
77637-};
77638+} __do_const;
77639
77640 /* this value hold the maximum octet of charset */
77641 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
77642diff --git a/include/linux/notifier.h b/include/linux/notifier.h
77643index d14a4c3..a078786 100644
77644--- a/include/linux/notifier.h
77645+++ b/include/linux/notifier.h
77646@@ -54,7 +54,8 @@ struct notifier_block {
77647 notifier_fn_t notifier_call;
77648 struct notifier_block __rcu *next;
77649 int priority;
77650-};
77651+} __do_const;
77652+typedef struct notifier_block __no_const notifier_block_no_const;
77653
77654 struct atomic_notifier_head {
77655 spinlock_t lock;
77656diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
77657index b2a0f15..4d7da32 100644
77658--- a/include/linux/oprofile.h
77659+++ b/include/linux/oprofile.h
77660@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
77661 int oprofilefs_create_ro_ulong(struct dentry * root,
77662 char const * name, ulong * val);
77663
77664-/** Create a file for read-only access to an atomic_t. */
77665+/** Create a file for read-only access to an atomic_unchecked_t. */
77666 int oprofilefs_create_ro_atomic(struct dentry * root,
77667- char const * name, atomic_t * val);
77668+ char const * name, atomic_unchecked_t * val);
77669
77670 /** create a directory */
77671 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
77672diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
77673index 430dd96..544e26e 100644
77674--- a/include/linux/pci_hotplug.h
77675+++ b/include/linux/pci_hotplug.h
77676@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
77677 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
77678 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
77679 int (*reset_slot) (struct hotplug_slot *slot, int probe);
77680-};
77681+} __do_const;
77682+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
77683
77684 /**
77685 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
77686diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
77687index c8ba627..24bdfa8 100644
77688--- a/include/linux/perf_event.h
77689+++ b/include/linux/perf_event.h
77690@@ -327,8 +327,8 @@ struct perf_event {
77691
77692 enum perf_event_active_state state;
77693 unsigned int attach_state;
77694- local64_t count;
77695- atomic64_t child_count;
77696+ local64_t count; /* PaX: fix it one day */
77697+ atomic64_unchecked_t child_count;
77698
77699 /*
77700 * These are the total time in nanoseconds that the event
77701@@ -379,8 +379,8 @@ struct perf_event {
77702 * These accumulate total time (in nanoseconds) that children
77703 * events have been enabled and running, respectively.
77704 */
77705- atomic64_t child_total_time_enabled;
77706- atomic64_t child_total_time_running;
77707+ atomic64_unchecked_t child_total_time_enabled;
77708+ atomic64_unchecked_t child_total_time_running;
77709
77710 /*
77711 * Protect attach/detach and child_list:
77712@@ -702,7 +702,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
77713 entry->ip[entry->nr++] = ip;
77714 }
77715
77716-extern int sysctl_perf_event_paranoid;
77717+extern int sysctl_perf_event_legitimately_concerned;
77718 extern int sysctl_perf_event_mlock;
77719 extern int sysctl_perf_event_sample_rate;
77720 extern int sysctl_perf_cpu_time_max_percent;
77721@@ -717,19 +717,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
77722 loff_t *ppos);
77723
77724
77725+static inline bool perf_paranoid_any(void)
77726+{
77727+ return sysctl_perf_event_legitimately_concerned > 2;
77728+}
77729+
77730 static inline bool perf_paranoid_tracepoint_raw(void)
77731 {
77732- return sysctl_perf_event_paranoid > -1;
77733+ return sysctl_perf_event_legitimately_concerned > -1;
77734 }
77735
77736 static inline bool perf_paranoid_cpu(void)
77737 {
77738- return sysctl_perf_event_paranoid > 0;
77739+ return sysctl_perf_event_legitimately_concerned > 0;
77740 }
77741
77742 static inline bool perf_paranoid_kernel(void)
77743 {
77744- return sysctl_perf_event_paranoid > 1;
77745+ return sysctl_perf_event_legitimately_concerned > 1;
77746 }
77747
77748 extern void perf_event_init(void);
77749@@ -845,7 +850,7 @@ struct perf_pmu_events_attr {
77750 struct device_attribute attr;
77751 u64 id;
77752 const char *event_str;
77753-};
77754+} __do_const;
77755
77756 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
77757 static struct perf_pmu_events_attr _var = { \
77758diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
77759index b8809fe..ae4ccd0 100644
77760--- a/include/linux/pipe_fs_i.h
77761+++ b/include/linux/pipe_fs_i.h
77762@@ -47,10 +47,10 @@ struct pipe_inode_info {
77763 struct mutex mutex;
77764 wait_queue_head_t wait;
77765 unsigned int nrbufs, curbuf, buffers;
77766- unsigned int readers;
77767- unsigned int writers;
77768- unsigned int files;
77769- unsigned int waiting_writers;
77770+ atomic_t readers;
77771+ atomic_t writers;
77772+ atomic_t files;
77773+ atomic_t waiting_writers;
77774 unsigned int r_counter;
77775 unsigned int w_counter;
77776 struct page *tmp_page;
77777diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
77778index 5f28cae..3d23723 100644
77779--- a/include/linux/platform_data/usb-ehci-s5p.h
77780+++ b/include/linux/platform_data/usb-ehci-s5p.h
77781@@ -14,7 +14,7 @@
77782 struct s5p_ehci_platdata {
77783 int (*phy_init)(struct platform_device *pdev, int type);
77784 int (*phy_exit)(struct platform_device *pdev, int type);
77785-};
77786+} __no_const;
77787
77788 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
77789
77790diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
77791index c256c59..8ea94c7 100644
77792--- a/include/linux/platform_data/usb-ohci-exynos.h
77793+++ b/include/linux/platform_data/usb-ohci-exynos.h
77794@@ -14,7 +14,7 @@
77795 struct exynos4_ohci_platdata {
77796 int (*phy_init)(struct platform_device *pdev, int type);
77797 int (*phy_exit)(struct platform_device *pdev, int type);
77798-};
77799+} __no_const;
77800
77801 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
77802
77803diff --git a/include/linux/pm.h b/include/linux/pm.h
77804index a224c7f..92d8a97 100644
77805--- a/include/linux/pm.h
77806+++ b/include/linux/pm.h
77807@@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
77808 struct dev_pm_domain {
77809 struct dev_pm_ops ops;
77810 };
77811+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
77812
77813 /*
77814 * The PM_EVENT_ messages are also used by drivers implementing the legacy
77815diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
77816index 7c1d252..c5c773e 100644
77817--- a/include/linux/pm_domain.h
77818+++ b/include/linux/pm_domain.h
77819@@ -48,7 +48,7 @@ struct gpd_dev_ops {
77820
77821 struct gpd_cpu_data {
77822 unsigned int saved_exit_latency;
77823- struct cpuidle_state *idle_state;
77824+ cpuidle_state_no_const *idle_state;
77825 };
77826
77827 struct generic_pm_domain {
77828diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
77829index 6fa7cea..7bf6415 100644
77830--- a/include/linux/pm_runtime.h
77831+++ b/include/linux/pm_runtime.h
77832@@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
77833
77834 static inline void pm_runtime_mark_last_busy(struct device *dev)
77835 {
77836- ACCESS_ONCE(dev->power.last_busy) = jiffies;
77837+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
77838 }
77839
77840 #else /* !CONFIG_PM_RUNTIME */
77841diff --git a/include/linux/pnp.h b/include/linux/pnp.h
77842index 195aafc..49a7bc2 100644
77843--- a/include/linux/pnp.h
77844+++ b/include/linux/pnp.h
77845@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
77846 struct pnp_fixup {
77847 char id[7];
77848 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
77849-};
77850+} __do_const;
77851
77852 /* config parameters */
77853 #define PNP_CONFIG_NORMAL 0x0001
77854diff --git a/include/linux/poison.h b/include/linux/poison.h
77855index 2110a81..13a11bb 100644
77856--- a/include/linux/poison.h
77857+++ b/include/linux/poison.h
77858@@ -19,8 +19,8 @@
77859 * under normal circumstances, used to verify that nobody uses
77860 * non-initialized list entries.
77861 */
77862-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
77863-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
77864+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
77865+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
77866
77867 /********** include/linux/timer.h **********/
77868 /*
77869diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
77870index d8b187c3..9a9257a 100644
77871--- a/include/linux/power/smartreflex.h
77872+++ b/include/linux/power/smartreflex.h
77873@@ -238,7 +238,7 @@ struct omap_sr_class_data {
77874 int (*notify)(struct omap_sr *sr, u32 status);
77875 u8 notify_flags;
77876 u8 class_type;
77877-};
77878+} __do_const;
77879
77880 /**
77881 * struct omap_sr_nvalue_table - Smartreflex n-target value info
77882diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
77883index 4ea1d37..80f4b33 100644
77884--- a/include/linux/ppp-comp.h
77885+++ b/include/linux/ppp-comp.h
77886@@ -84,7 +84,7 @@ struct compressor {
77887 struct module *owner;
77888 /* Extra skb space needed by the compressor algorithm */
77889 unsigned int comp_extra;
77890-};
77891+} __do_const;
77892
77893 /*
77894 * The return value from decompress routine is the length of the
77895diff --git a/include/linux/preempt.h b/include/linux/preempt.h
77896index f5d4723..a6ea2fa 100644
77897--- a/include/linux/preempt.h
77898+++ b/include/linux/preempt.h
77899@@ -18,8 +18,13 @@
77900 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
77901 #endif
77902
77903+#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
77904+#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
77905+
77906 #define inc_preempt_count() add_preempt_count(1)
77907+#define raw_inc_preempt_count() raw_add_preempt_count(1)
77908 #define dec_preempt_count() sub_preempt_count(1)
77909+#define raw_dec_preempt_count() raw_sub_preempt_count(1)
77910
77911 #define preempt_count() (current_thread_info()->preempt_count)
77912
77913@@ -64,6 +69,12 @@ do { \
77914 barrier(); \
77915 } while (0)
77916
77917+#define raw_preempt_disable() \
77918+do { \
77919+ raw_inc_preempt_count(); \
77920+ barrier(); \
77921+} while (0)
77922+
77923 #define sched_preempt_enable_no_resched() \
77924 do { \
77925 barrier(); \
77926@@ -72,6 +83,12 @@ do { \
77927
77928 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
77929
77930+#define raw_preempt_enable_no_resched() \
77931+do { \
77932+ barrier(); \
77933+ raw_dec_preempt_count(); \
77934+} while (0)
77935+
77936 #define preempt_enable() \
77937 do { \
77938 preempt_enable_no_resched(); \
77939@@ -116,8 +133,10 @@ do { \
77940 * region.
77941 */
77942 #define preempt_disable() barrier()
77943+#define raw_preempt_disable() barrier()
77944 #define sched_preempt_enable_no_resched() barrier()
77945 #define preempt_enable_no_resched() barrier()
77946+#define raw_preempt_enable_no_resched() barrier()
77947 #define preempt_enable() barrier()
77948
77949 #define preempt_disable_notrace() barrier()
77950diff --git a/include/linux/printk.h b/include/linux/printk.h
77951index e6131a78..8e9fb61 100644
77952--- a/include/linux/printk.h
77953+++ b/include/linux/printk.h
77954@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
77955 void early_printk(const char *s, ...) { }
77956 #endif
77957
77958+extern int kptr_restrict;
77959+
77960 #ifdef CONFIG_PRINTK
77961 asmlinkage __printf(5, 0)
77962 int vprintk_emit(int facility, int level,
77963@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
77964
77965 extern int printk_delay_msec;
77966 extern int dmesg_restrict;
77967-extern int kptr_restrict;
77968
77969 extern void wake_up_klogd(void);
77970
77971diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
77972index 608e60a..c26f864 100644
77973--- a/include/linux/proc_fs.h
77974+++ b/include/linux/proc_fs.h
77975@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
77976 return proc_create_data(name, mode, parent, proc_fops, NULL);
77977 }
77978
77979+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
77980+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
77981+{
77982+#ifdef CONFIG_GRKERNSEC_PROC_USER
77983+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
77984+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77985+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
77986+#else
77987+ return proc_create_data(name, mode, parent, proc_fops, NULL);
77988+#endif
77989+}
77990+
77991+
77992 extern void proc_set_size(struct proc_dir_entry *, loff_t);
77993 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
77994 extern void *PDE_DATA(const struct inode *);
77995diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
77996index 34a1e10..03a6d03 100644
77997--- a/include/linux/proc_ns.h
77998+++ b/include/linux/proc_ns.h
77999@@ -14,7 +14,7 @@ struct proc_ns_operations {
78000 void (*put)(void *ns);
78001 int (*install)(struct nsproxy *nsproxy, void *ns);
78002 unsigned int (*inum)(void *ns);
78003-};
78004+} __do_const;
78005
78006 struct proc_ns {
78007 void *ns;
78008diff --git a/include/linux/quota.h b/include/linux/quota.h
78009index cc7494a..1e27036 100644
78010--- a/include/linux/quota.h
78011+++ b/include/linux/quota.h
78012@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
78013
78014 extern bool qid_eq(struct kqid left, struct kqid right);
78015 extern bool qid_lt(struct kqid left, struct kqid right);
78016-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
78017+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
78018 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
78019 extern bool qid_valid(struct kqid qid);
78020
78021diff --git a/include/linux/random.h b/include/linux/random.h
78022index bf9085e..1e8bbcf 100644
78023--- a/include/linux/random.h
78024+++ b/include/linux/random.h
78025@@ -10,9 +10,19 @@
78026
78027
78028 extern void add_device_randomness(const void *, unsigned int);
78029+
78030+static inline void add_latent_entropy(void)
78031+{
78032+
78033+#ifdef LATENT_ENTROPY_PLUGIN
78034+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
78035+#endif
78036+
78037+}
78038+
78039 extern void add_input_randomness(unsigned int type, unsigned int code,
78040- unsigned int value);
78041-extern void add_interrupt_randomness(int irq, int irq_flags);
78042+ unsigned int value) __latent_entropy;
78043+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
78044
78045 extern void get_random_bytes(void *buf, int nbytes);
78046 extern void get_random_bytes_arch(void *buf, int nbytes);
78047@@ -23,16 +33,21 @@ extern int random_int_secret_init(void);
78048 extern const struct file_operations random_fops, urandom_fops;
78049 #endif
78050
78051-unsigned int get_random_int(void);
78052+unsigned int __intentional_overflow(-1) get_random_int(void);
78053 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
78054
78055-u32 prandom_u32(void);
78056+u32 prandom_u32(void) __intentional_overflow(-1);
78057 void prandom_bytes(void *buf, int nbytes);
78058 void prandom_seed(u32 seed);
78059
78060 u32 prandom_u32_state(struct rnd_state *);
78061 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
78062
78063+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
78064+{
78065+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
78066+}
78067+
78068 /*
78069 * Handle minimum values for seeds
78070 */
78071diff --git a/include/linux/rculist.h b/include/linux/rculist.h
78072index 4106721..132d42c 100644
78073--- a/include/linux/rculist.h
78074+++ b/include/linux/rculist.h
78075@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
78076 struct list_head *prev, struct list_head *next);
78077 #endif
78078
78079+extern void __pax_list_add_rcu(struct list_head *new,
78080+ struct list_head *prev, struct list_head *next);
78081+
78082 /**
78083 * list_add_rcu - add a new entry to rcu-protected list
78084 * @new: new entry to be added
78085@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
78086 __list_add_rcu(new, head, head->next);
78087 }
78088
78089+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
78090+{
78091+ __pax_list_add_rcu(new, head, head->next);
78092+}
78093+
78094 /**
78095 * list_add_tail_rcu - add a new entry to rcu-protected list
78096 * @new: new entry to be added
78097@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
78098 __list_add_rcu(new, head->prev, head);
78099 }
78100
78101+static inline void pax_list_add_tail_rcu(struct list_head *new,
78102+ struct list_head *head)
78103+{
78104+ __pax_list_add_rcu(new, head->prev, head);
78105+}
78106+
78107 /**
78108 * list_del_rcu - deletes entry from list without re-initialization
78109 * @entry: the element to delete from the list.
78110@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
78111 entry->prev = LIST_POISON2;
78112 }
78113
78114+extern void pax_list_del_rcu(struct list_head *entry);
78115+
78116 /**
78117 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
78118 * @n: the element to delete from the hash list.
78119diff --git a/include/linux/reboot.h b/include/linux/reboot.h
78120index 8e00f9f..9449b55 100644
78121--- a/include/linux/reboot.h
78122+++ b/include/linux/reboot.h
78123@@ -43,9 +43,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
78124 * Architecture-specific implementations of sys_reboot commands.
78125 */
78126
78127-extern void machine_restart(char *cmd);
78128-extern void machine_halt(void);
78129-extern void machine_power_off(void);
78130+extern void machine_restart(char *cmd) __noreturn;
78131+extern void machine_halt(void) __noreturn;
78132+extern void machine_power_off(void) __noreturn;
78133
78134 extern void machine_shutdown(void);
78135 struct pt_regs;
78136@@ -56,9 +56,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
78137 */
78138
78139 extern void kernel_restart_prepare(char *cmd);
78140-extern void kernel_restart(char *cmd);
78141-extern void kernel_halt(void);
78142-extern void kernel_power_off(void);
78143+extern void kernel_restart(char *cmd) __noreturn;
78144+extern void kernel_halt(void) __noreturn;
78145+extern void kernel_power_off(void) __noreturn;
78146
78147 extern int C_A_D; /* for sysctl */
78148 void ctrl_alt_del(void);
78149@@ -72,7 +72,7 @@ extern int orderly_poweroff(bool force);
78150 * Emergency restart, callable from an interrupt handler.
78151 */
78152
78153-extern void emergency_restart(void);
78154+extern void emergency_restart(void) __noreturn;
78155 #include <asm/emergency-restart.h>
78156
78157 #endif /* _LINUX_REBOOT_H */
78158diff --git a/include/linux/regset.h b/include/linux/regset.h
78159index 8e0c9fe..ac4d221 100644
78160--- a/include/linux/regset.h
78161+++ b/include/linux/regset.h
78162@@ -161,7 +161,8 @@ struct user_regset {
78163 unsigned int align;
78164 unsigned int bias;
78165 unsigned int core_note_type;
78166-};
78167+} __do_const;
78168+typedef struct user_regset __no_const user_regset_no_const;
78169
78170 /**
78171 * struct user_regset_view - available regsets
78172diff --git a/include/linux/relay.h b/include/linux/relay.h
78173index d7c8359..818daf5 100644
78174--- a/include/linux/relay.h
78175+++ b/include/linux/relay.h
78176@@ -157,7 +157,7 @@ struct rchan_callbacks
78177 * The callback should return 0 if successful, negative if not.
78178 */
78179 int (*remove_buf_file)(struct dentry *dentry);
78180-};
78181+} __no_const;
78182
78183 /*
78184 * CONFIG_RELAY kernel API, kernel/relay.c
78185diff --git a/include/linux/rio.h b/include/linux/rio.h
78186index b71d573..2f940bd 100644
78187--- a/include/linux/rio.h
78188+++ b/include/linux/rio.h
78189@@ -355,7 +355,7 @@ struct rio_ops {
78190 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
78191 u64 rstart, u32 size, u32 flags);
78192 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
78193-};
78194+} __no_const;
78195
78196 #define RIO_RESOURCE_MEM 0x00000100
78197 #define RIO_RESOURCE_DOORBELL 0x00000200
78198diff --git a/include/linux/rmap.h b/include/linux/rmap.h
78199index 6dacb93..6174423 100644
78200--- a/include/linux/rmap.h
78201+++ b/include/linux/rmap.h
78202@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
78203 void anon_vma_init(void); /* create anon_vma_cachep */
78204 int anon_vma_prepare(struct vm_area_struct *);
78205 void unlink_anon_vmas(struct vm_area_struct *);
78206-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
78207-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
78208+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
78209+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
78210
78211 static inline void anon_vma_merge(struct vm_area_struct *vma,
78212 struct vm_area_struct *next)
78213diff --git a/include/linux/sched.h b/include/linux/sched.h
78214index b1e963e..114b8fd 100644
78215--- a/include/linux/sched.h
78216+++ b/include/linux/sched.h
78217@@ -62,6 +62,7 @@ struct bio_list;
78218 struct fs_struct;
78219 struct perf_event_context;
78220 struct blk_plug;
78221+struct linux_binprm;
78222
78223 /*
78224 * List of flags we want to share for kernel threads,
78225@@ -295,7 +296,7 @@ extern char __sched_text_start[], __sched_text_end[];
78226 extern int in_sched_functions(unsigned long addr);
78227
78228 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
78229-extern signed long schedule_timeout(signed long timeout);
78230+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
78231 extern signed long schedule_timeout_interruptible(signed long timeout);
78232 extern signed long schedule_timeout_killable(signed long timeout);
78233 extern signed long schedule_timeout_uninterruptible(signed long timeout);
78234@@ -306,6 +307,19 @@ struct nsproxy;
78235 struct user_namespace;
78236
78237 #ifdef CONFIG_MMU
78238+
78239+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
78240+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
78241+#else
78242+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
78243+{
78244+ return 0;
78245+}
78246+#endif
78247+
78248+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
78249+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
78250+
78251 extern void arch_pick_mmap_layout(struct mm_struct *mm);
78252 extern unsigned long
78253 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
78254@@ -585,6 +599,17 @@ struct signal_struct {
78255 #ifdef CONFIG_TASKSTATS
78256 struct taskstats *stats;
78257 #endif
78258+
78259+#ifdef CONFIG_GRKERNSEC
78260+ u32 curr_ip;
78261+ u32 saved_ip;
78262+ u32 gr_saddr;
78263+ u32 gr_daddr;
78264+ u16 gr_sport;
78265+ u16 gr_dport;
78266+ u8 used_accept:1;
78267+#endif
78268+
78269 #ifdef CONFIG_AUDIT
78270 unsigned audit_tty;
78271 unsigned audit_tty_log_passwd;
78272@@ -665,6 +690,14 @@ struct user_struct {
78273 struct key *session_keyring; /* UID's default session keyring */
78274 #endif
78275
78276+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78277+ unsigned char kernel_banned;
78278+#endif
78279+#ifdef CONFIG_GRKERNSEC_BRUTE
78280+ unsigned char suid_banned;
78281+ unsigned long suid_ban_expires;
78282+#endif
78283+
78284 /* Hash table maintenance information */
78285 struct hlist_node uidhash_node;
78286 kuid_t uid;
78287@@ -1150,8 +1183,8 @@ struct task_struct {
78288 struct list_head thread_group;
78289
78290 struct completion *vfork_done; /* for vfork() */
78291- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
78292- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
78293+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
78294+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
78295
78296 cputime_t utime, stime, utimescaled, stimescaled;
78297 cputime_t gtime;
78298@@ -1176,11 +1209,6 @@ struct task_struct {
78299 struct task_cputime cputime_expires;
78300 struct list_head cpu_timers[3];
78301
78302-/* process credentials */
78303- const struct cred __rcu *real_cred; /* objective and real subjective task
78304- * credentials (COW) */
78305- const struct cred __rcu *cred; /* effective (overridable) subjective task
78306- * credentials (COW) */
78307 char comm[TASK_COMM_LEN]; /* executable name excluding path
78308 - access with [gs]et_task_comm (which lock
78309 it with task_lock())
78310@@ -1197,6 +1225,10 @@ struct task_struct {
78311 #endif
78312 /* CPU-specific state of this task */
78313 struct thread_struct thread;
78314+/* thread_info moved to task_struct */
78315+#ifdef CONFIG_X86
78316+ struct thread_info tinfo;
78317+#endif
78318 /* filesystem information */
78319 struct fs_struct *fs;
78320 /* open file information */
78321@@ -1270,6 +1302,10 @@ struct task_struct {
78322 gfp_t lockdep_reclaim_gfp;
78323 #endif
78324
78325+/* process credentials */
78326+ const struct cred __rcu *real_cred; /* objective and real subjective task
78327+ * credentials (COW) */
78328+
78329 /* journalling filesystem info */
78330 void *journal_info;
78331
78332@@ -1308,6 +1344,10 @@ struct task_struct {
78333 /* cg_list protected by css_set_lock and tsk->alloc_lock */
78334 struct list_head cg_list;
78335 #endif
78336+
78337+ const struct cred __rcu *cred; /* effective (overridable) subjective task
78338+ * credentials (COW) */
78339+
78340 #ifdef CONFIG_FUTEX
78341 struct robust_list_head __user *robust_list;
78342 #ifdef CONFIG_COMPAT
78343@@ -1411,8 +1451,78 @@ struct task_struct {
78344 unsigned int sequential_io;
78345 unsigned int sequential_io_avg;
78346 #endif
78347+
78348+#ifdef CONFIG_GRKERNSEC
78349+ /* grsecurity */
78350+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78351+ u64 exec_id;
78352+#endif
78353+#ifdef CONFIG_GRKERNSEC_SETXID
78354+ const struct cred *delayed_cred;
78355+#endif
78356+ struct dentry *gr_chroot_dentry;
78357+ struct acl_subject_label *acl;
78358+ struct acl_subject_label *tmpacl;
78359+ struct acl_role_label *role;
78360+ struct file *exec_file;
78361+ unsigned long brute_expires;
78362+ u16 acl_role_id;
78363+ u8 inherited;
78364+ /* is this the task that authenticated to the special role */
78365+ u8 acl_sp_role;
78366+ u8 is_writable;
78367+ u8 brute;
78368+ u8 gr_is_chrooted;
78369+#endif
78370+
78371 };
78372
78373+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
78374+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
78375+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
78376+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
78377+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
78378+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
78379+
78380+#ifdef CONFIG_PAX_SOFTMODE
78381+extern int pax_softmode;
78382+#endif
78383+
78384+extern int pax_check_flags(unsigned long *);
78385+
78386+/* if tsk != current then task_lock must be held on it */
78387+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
78388+static inline unsigned long pax_get_flags(struct task_struct *tsk)
78389+{
78390+ if (likely(tsk->mm))
78391+ return tsk->mm->pax_flags;
78392+ else
78393+ return 0UL;
78394+}
78395+
78396+/* if tsk != current then task_lock must be held on it */
78397+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
78398+{
78399+ if (likely(tsk->mm)) {
78400+ tsk->mm->pax_flags = flags;
78401+ return 0;
78402+ }
78403+ return -EINVAL;
78404+}
78405+#endif
78406+
78407+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
78408+extern void pax_set_initial_flags(struct linux_binprm *bprm);
78409+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
78410+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
78411+#endif
78412+
78413+struct path;
78414+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
78415+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
78416+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
78417+extern void pax_report_refcount_overflow(struct pt_regs *regs);
78418+
78419 /* Future-safe accessor for struct task_struct's cpus_allowed. */
78420 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
78421
78422@@ -1471,7 +1581,7 @@ struct pid_namespace;
78423 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
78424 struct pid_namespace *ns);
78425
78426-static inline pid_t task_pid_nr(struct task_struct *tsk)
78427+static inline pid_t task_pid_nr(const struct task_struct *tsk)
78428 {
78429 return tsk->pid;
78430 }
78431@@ -1921,7 +2031,9 @@ void yield(void);
78432 extern struct exec_domain default_exec_domain;
78433
78434 union thread_union {
78435+#ifndef CONFIG_X86
78436 struct thread_info thread_info;
78437+#endif
78438 unsigned long stack[THREAD_SIZE/sizeof(long)];
78439 };
78440
78441@@ -1954,6 +2066,7 @@ extern struct pid_namespace init_pid_ns;
78442 */
78443
78444 extern struct task_struct *find_task_by_vpid(pid_t nr);
78445+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
78446 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
78447 struct pid_namespace *ns);
78448
78449@@ -2118,7 +2231,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
78450 extern void exit_itimers(struct signal_struct *);
78451 extern void flush_itimer_signals(void);
78452
78453-extern void do_group_exit(int);
78454+extern __noreturn void do_group_exit(int);
78455
78456 extern int allow_signal(int);
78457 extern int disallow_signal(int);
78458@@ -2309,9 +2422,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
78459
78460 #endif
78461
78462-static inline int object_is_on_stack(void *obj)
78463+static inline int object_starts_on_stack(void *obj)
78464 {
78465- void *stack = task_stack_page(current);
78466+ const void *stack = task_stack_page(current);
78467
78468 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
78469 }
78470diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
78471index bf8086b..962b035 100644
78472--- a/include/linux/sched/sysctl.h
78473+++ b/include/linux/sched/sysctl.h
78474@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
78475 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
78476
78477 extern int sysctl_max_map_count;
78478+extern unsigned long sysctl_heap_stack_gap;
78479
78480 extern unsigned int sysctl_sched_latency;
78481 extern unsigned int sysctl_sched_min_granularity;
78482diff --git a/include/linux/security.h b/include/linux/security.h
78483index 9d37e2b..43368e4 100644
78484--- a/include/linux/security.h
78485+++ b/include/linux/security.h
78486@@ -27,6 +27,7 @@
78487 #include <linux/slab.h>
78488 #include <linux/err.h>
78489 #include <linux/string.h>
78490+#include <linux/grsecurity.h>
78491
78492 struct linux_binprm;
78493 struct cred;
78494@@ -116,8 +117,6 @@ struct seq_file;
78495
78496 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
78497
78498-void reset_security_ops(void);
78499-
78500 #ifdef CONFIG_MMU
78501 extern unsigned long mmap_min_addr;
78502 extern unsigned long dac_mmap_min_addr;
78503diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
78504index dc368b8..e895209 100644
78505--- a/include/linux/semaphore.h
78506+++ b/include/linux/semaphore.h
78507@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
78508 }
78509
78510 extern void down(struct semaphore *sem);
78511-extern int __must_check down_interruptible(struct semaphore *sem);
78512+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
78513 extern int __must_check down_killable(struct semaphore *sem);
78514 extern int __must_check down_trylock(struct semaphore *sem);
78515 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
78516diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
78517index 4e32edc..f8f2d18 100644
78518--- a/include/linux/seq_file.h
78519+++ b/include/linux/seq_file.h
78520@@ -26,6 +26,9 @@ struct seq_file {
78521 struct mutex lock;
78522 const struct seq_operations *op;
78523 int poll_event;
78524+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78525+ u64 exec_id;
78526+#endif
78527 #ifdef CONFIG_USER_NS
78528 struct user_namespace *user_ns;
78529 #endif
78530@@ -38,6 +41,7 @@ struct seq_operations {
78531 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
78532 int (*show) (struct seq_file *m, void *v);
78533 };
78534+typedef struct seq_operations __no_const seq_operations_no_const;
78535
78536 #define SEQ_SKIP 1
78537
78538diff --git a/include/linux/shm.h b/include/linux/shm.h
78539index 429c199..4d42e38 100644
78540--- a/include/linux/shm.h
78541+++ b/include/linux/shm.h
78542@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
78543
78544 /* The task created the shm object. NULL if the task is dead. */
78545 struct task_struct *shm_creator;
78546+#ifdef CONFIG_GRKERNSEC
78547+ time_t shm_createtime;
78548+ pid_t shm_lapid;
78549+#endif
78550 };
78551
78552 /* shm_mode upper byte flags */
78553diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
78554index f66f346..2e304d5 100644
78555--- a/include/linux/skbuff.h
78556+++ b/include/linux/skbuff.h
78557@@ -639,7 +639,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
78558 extern struct sk_buff *__alloc_skb(unsigned int size,
78559 gfp_t priority, int flags, int node);
78560 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
78561-static inline struct sk_buff *alloc_skb(unsigned int size,
78562+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
78563 gfp_t priority)
78564 {
78565 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
78566@@ -755,7 +755,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
78567 */
78568 static inline int skb_queue_empty(const struct sk_buff_head *list)
78569 {
78570- return list->next == (struct sk_buff *)list;
78571+ return list->next == (const struct sk_buff *)list;
78572 }
78573
78574 /**
78575@@ -768,7 +768,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
78576 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
78577 const struct sk_buff *skb)
78578 {
78579- return skb->next == (struct sk_buff *)list;
78580+ return skb->next == (const struct sk_buff *)list;
78581 }
78582
78583 /**
78584@@ -781,7 +781,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
78585 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
78586 const struct sk_buff *skb)
78587 {
78588- return skb->prev == (struct sk_buff *)list;
78589+ return skb->prev == (const struct sk_buff *)list;
78590 }
78591
78592 /**
78593@@ -1741,7 +1741,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
78594 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
78595 */
78596 #ifndef NET_SKB_PAD
78597-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
78598+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
78599 #endif
78600
78601 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
78602@@ -2339,7 +2339,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
78603 int noblock, int *err);
78604 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
78605 struct poll_table_struct *wait);
78606-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
78607+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
78608 int offset, struct iovec *to,
78609 int size);
78610 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
78611@@ -2618,6 +2618,9 @@ static inline void nf_reset(struct sk_buff *skb)
78612 nf_bridge_put(skb->nf_bridge);
78613 skb->nf_bridge = NULL;
78614 #endif
78615+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
78616+ skb->nf_trace = 0;
78617+#endif
78618 }
78619
78620 static inline void nf_reset_trace(struct sk_buff *skb)
78621diff --git a/include/linux/slab.h b/include/linux/slab.h
78622index 74f1058..914b7da 100644
78623--- a/include/linux/slab.h
78624+++ b/include/linux/slab.h
78625@@ -14,15 +14,29 @@
78626 #include <linux/gfp.h>
78627 #include <linux/types.h>
78628 #include <linux/workqueue.h>
78629-
78630+#include <linux/err.h>
78631
78632 /*
78633 * Flags to pass to kmem_cache_create().
78634 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
78635 */
78636 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
78637+
78638+#ifdef CONFIG_PAX_USERCOPY_SLABS
78639+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
78640+#else
78641+#define SLAB_USERCOPY 0x00000000UL
78642+#endif
78643+
78644 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
78645 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
78646+
78647+#ifdef CONFIG_PAX_MEMORY_SANITIZE
78648+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
78649+#else
78650+#define SLAB_NO_SANITIZE 0x00000000UL
78651+#endif
78652+
78653 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
78654 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
78655 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
78656@@ -91,10 +105,13 @@
78657 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
78658 * Both make kfree a no-op.
78659 */
78660-#define ZERO_SIZE_PTR ((void *)16)
78661+#define ZERO_SIZE_PTR \
78662+({ \
78663+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
78664+ (void *)(-MAX_ERRNO-1L); \
78665+})
78666
78667-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
78668- (unsigned long)ZERO_SIZE_PTR)
78669+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
78670
78671 #include <linux/kmemleak.h>
78672
78673@@ -135,6 +152,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
78674 void kfree(const void *);
78675 void kzfree(const void *);
78676 size_t ksize(const void *);
78677+const char *check_heap_object(const void *ptr, unsigned long n);
78678+bool is_usercopy_object(const void *ptr);
78679
78680 /*
78681 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
78682@@ -167,7 +186,7 @@ struct kmem_cache {
78683 unsigned int align; /* Alignment as calculated */
78684 unsigned long flags; /* Active flags on the slab */
78685 const char *name; /* Slab name for sysfs */
78686- int refcount; /* Use counter */
78687+ atomic_t refcount; /* Use counter */
78688 void (*ctor)(void *); /* Called on object slot creation */
78689 struct list_head list; /* List of all slab caches on the system */
78690 };
78691@@ -241,6 +260,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
78692 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
78693 #endif
78694
78695+#ifdef CONFIG_PAX_USERCOPY_SLABS
78696+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
78697+#endif
78698+
78699 /*
78700 * Figure out which kmalloc slab an allocation of a certain size
78701 * belongs to.
78702@@ -249,7 +272,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
78703 * 2 = 120 .. 192 bytes
78704 * n = 2^(n-1) .. 2^n -1
78705 */
78706-static __always_inline int kmalloc_index(size_t size)
78707+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
78708 {
78709 if (!size)
78710 return 0;
78711@@ -292,11 +315,11 @@ static __always_inline int kmalloc_index(size_t size)
78712 }
78713 #endif /* !CONFIG_SLOB */
78714
78715-void *__kmalloc(size_t size, gfp_t flags);
78716+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
78717 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
78718
78719 #ifdef CONFIG_NUMA
78720-void *__kmalloc_node(size_t size, gfp_t flags, int node);
78721+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
78722 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
78723 #else
78724 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
78725diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
78726index e9346b4..1494959 100644
78727--- a/include/linux/slab_def.h
78728+++ b/include/linux/slab_def.h
78729@@ -36,7 +36,7 @@ struct kmem_cache {
78730 /* 4) cache creation/removal */
78731 const char *name;
78732 struct list_head list;
78733- int refcount;
78734+ atomic_t refcount;
78735 int object_size;
78736 int align;
78737
78738@@ -52,10 +52,14 @@ struct kmem_cache {
78739 unsigned long node_allocs;
78740 unsigned long node_frees;
78741 unsigned long node_overflow;
78742- atomic_t allochit;
78743- atomic_t allocmiss;
78744- atomic_t freehit;
78745- atomic_t freemiss;
78746+ atomic_unchecked_t allochit;
78747+ atomic_unchecked_t allocmiss;
78748+ atomic_unchecked_t freehit;
78749+ atomic_unchecked_t freemiss;
78750+#ifdef CONFIG_PAX_MEMORY_SANITIZE
78751+ atomic_unchecked_t sanitized;
78752+ atomic_unchecked_t not_sanitized;
78753+#endif
78754
78755 /*
78756 * If debugging is enabled, then the allocator can add additional
78757diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
78758index cc0b67e..a0329b1 100644
78759--- a/include/linux/slub_def.h
78760+++ b/include/linux/slub_def.h
78761@@ -74,7 +74,7 @@ struct kmem_cache {
78762 struct kmem_cache_order_objects max;
78763 struct kmem_cache_order_objects min;
78764 gfp_t allocflags; /* gfp flags to use on each alloc */
78765- int refcount; /* Refcount for slab cache destroy */
78766+ atomic_t refcount; /* Refcount for slab cache destroy */
78767 void (*ctor)(void *);
78768 int inuse; /* Offset to metadata */
78769 int align; /* Alignment */
78770diff --git a/include/linux/smp.h b/include/linux/smp.h
78771index 731f523..3340268 100644
78772--- a/include/linux/smp.h
78773+++ b/include/linux/smp.h
78774@@ -186,7 +186,9 @@ static inline void __smp_call_function_single(int cpuid,
78775 #endif
78776
78777 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
78778+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
78779 #define put_cpu() preempt_enable()
78780+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
78781
78782 /*
78783 * Callback to arch code if there's nosmp or maxcpus=0 on the
78784diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
78785index 54f91d3..be2c379 100644
78786--- a/include/linux/sock_diag.h
78787+++ b/include/linux/sock_diag.h
78788@@ -11,7 +11,7 @@ struct sock;
78789 struct sock_diag_handler {
78790 __u8 family;
78791 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
78792-};
78793+} __do_const;
78794
78795 int sock_diag_register(const struct sock_diag_handler *h);
78796 void sock_diag_unregister(const struct sock_diag_handler *h);
78797diff --git a/include/linux/sonet.h b/include/linux/sonet.h
78798index 680f9a3..f13aeb0 100644
78799--- a/include/linux/sonet.h
78800+++ b/include/linux/sonet.h
78801@@ -7,7 +7,7 @@
78802 #include <uapi/linux/sonet.h>
78803
78804 struct k_sonet_stats {
78805-#define __HANDLE_ITEM(i) atomic_t i
78806+#define __HANDLE_ITEM(i) atomic_unchecked_t i
78807 __SONET_ITEMS
78808 #undef __HANDLE_ITEM
78809 };
78810diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
78811index 07d8e53..dc934c9 100644
78812--- a/include/linux/sunrpc/addr.h
78813+++ b/include/linux/sunrpc/addr.h
78814@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
78815 {
78816 switch (sap->sa_family) {
78817 case AF_INET:
78818- return ntohs(((struct sockaddr_in *)sap)->sin_port);
78819+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
78820 case AF_INET6:
78821- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
78822+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
78823 }
78824 return 0;
78825 }
78826@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
78827 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
78828 const struct sockaddr *src)
78829 {
78830- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
78831+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
78832 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
78833
78834 dsin->sin_family = ssin->sin_family;
78835@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
78836 if (sa->sa_family != AF_INET6)
78837 return 0;
78838
78839- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
78840+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
78841 }
78842
78843 #endif /* _LINUX_SUNRPC_ADDR_H */
78844diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
78845index 6740801..c535f27 100644
78846--- a/include/linux/sunrpc/clnt.h
78847+++ b/include/linux/sunrpc/clnt.h
78848@@ -96,7 +96,7 @@ struct rpc_procinfo {
78849 unsigned int p_timer; /* Which RTT timer to use */
78850 u32 p_statidx; /* Which procedure to account */
78851 const char * p_name; /* name of procedure */
78852-};
78853+} __do_const;
78854
78855 #ifdef __KERNEL__
78856
78857diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
78858index 6eecfc2..7ada79d 100644
78859--- a/include/linux/sunrpc/svc.h
78860+++ b/include/linux/sunrpc/svc.h
78861@@ -410,7 +410,7 @@ struct svc_procedure {
78862 unsigned int pc_count; /* call count */
78863 unsigned int pc_cachetype; /* cache info (NFS) */
78864 unsigned int pc_xdrressize; /* maximum size of XDR reply */
78865-};
78866+} __do_const;
78867
78868 /*
78869 * Function prototypes.
78870diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
78871index 0b8e3e6..33e0a01 100644
78872--- a/include/linux/sunrpc/svc_rdma.h
78873+++ b/include/linux/sunrpc/svc_rdma.h
78874@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
78875 extern unsigned int svcrdma_max_requests;
78876 extern unsigned int svcrdma_max_req_size;
78877
78878-extern atomic_t rdma_stat_recv;
78879-extern atomic_t rdma_stat_read;
78880-extern atomic_t rdma_stat_write;
78881-extern atomic_t rdma_stat_sq_starve;
78882-extern atomic_t rdma_stat_rq_starve;
78883-extern atomic_t rdma_stat_rq_poll;
78884-extern atomic_t rdma_stat_rq_prod;
78885-extern atomic_t rdma_stat_sq_poll;
78886-extern atomic_t rdma_stat_sq_prod;
78887+extern atomic_unchecked_t rdma_stat_recv;
78888+extern atomic_unchecked_t rdma_stat_read;
78889+extern atomic_unchecked_t rdma_stat_write;
78890+extern atomic_unchecked_t rdma_stat_sq_starve;
78891+extern atomic_unchecked_t rdma_stat_rq_starve;
78892+extern atomic_unchecked_t rdma_stat_rq_poll;
78893+extern atomic_unchecked_t rdma_stat_rq_prod;
78894+extern atomic_unchecked_t rdma_stat_sq_poll;
78895+extern atomic_unchecked_t rdma_stat_sq_prod;
78896
78897 #define RPCRDMA_VERSION 1
78898
78899diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
78900index 8d71d65..f79586e 100644
78901--- a/include/linux/sunrpc/svcauth.h
78902+++ b/include/linux/sunrpc/svcauth.h
78903@@ -120,7 +120,7 @@ struct auth_ops {
78904 int (*release)(struct svc_rqst *rq);
78905 void (*domain_release)(struct auth_domain *);
78906 int (*set_client)(struct svc_rqst *rq);
78907-};
78908+} __do_const;
78909
78910 #define SVC_GARBAGE 1
78911 #define SVC_SYSERR 2
78912diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
78913index a5ffd32..0935dea 100644
78914--- a/include/linux/swiotlb.h
78915+++ b/include/linux/swiotlb.h
78916@@ -60,7 +60,8 @@ extern void
78917
78918 extern void
78919 swiotlb_free_coherent(struct device *hwdev, size_t size,
78920- void *vaddr, dma_addr_t dma_handle);
78921+ void *vaddr, dma_addr_t dma_handle,
78922+ struct dma_attrs *attrs);
78923
78924 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
78925 unsigned long offset, size_t size,
78926diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
78927index 7fac04e..de57300 100644
78928--- a/include/linux/syscalls.h
78929+++ b/include/linux/syscalls.h
78930@@ -97,8 +97,14 @@ struct sigaltstack;
78931 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
78932
78933 #define __SC_DECL(t, a) t a
78934+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
78935 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
78936-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
78937+#define __SC_LONG(t, a) __typeof( \
78938+ __builtin_choose_expr( \
78939+ sizeof(t) > sizeof(int), \
78940+ (t) 0, \
78941+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
78942+ )) a
78943 #define __SC_CAST(t, a) (t) a
78944 #define __SC_ARGS(t, a) a
78945 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
78946@@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
78947 asmlinkage long sys_fsync(unsigned int fd);
78948 asmlinkage long sys_fdatasync(unsigned int fd);
78949 asmlinkage long sys_bdflush(int func, long data);
78950-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
78951- char __user *type, unsigned long flags,
78952+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
78953+ const char __user *type, unsigned long flags,
78954 void __user *data);
78955-asmlinkage long sys_umount(char __user *name, int flags);
78956-asmlinkage long sys_oldumount(char __user *name);
78957+asmlinkage long sys_umount(const char __user *name, int flags);
78958+asmlinkage long sys_oldumount(const char __user *name);
78959 asmlinkage long sys_truncate(const char __user *path, long length);
78960 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
78961 asmlinkage long sys_stat(const char __user *filename,
78962@@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
78963 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
78964 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
78965 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
78966- struct sockaddr __user *, int);
78967+ struct sockaddr __user *, int) __intentional_overflow(0);
78968 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
78969 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
78970 unsigned int vlen, unsigned flags);
78971diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
78972index 27b3b0b..e093dd9 100644
78973--- a/include/linux/syscore_ops.h
78974+++ b/include/linux/syscore_ops.h
78975@@ -16,7 +16,7 @@ struct syscore_ops {
78976 int (*suspend)(void);
78977 void (*resume)(void);
78978 void (*shutdown)(void);
78979-};
78980+} __do_const;
78981
78982 extern void register_syscore_ops(struct syscore_ops *ops);
78983 extern void unregister_syscore_ops(struct syscore_ops *ops);
78984diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
78985index 14a8ff2..af52bad 100644
78986--- a/include/linux/sysctl.h
78987+++ b/include/linux/sysctl.h
78988@@ -34,13 +34,13 @@ struct ctl_table_root;
78989 struct ctl_table_header;
78990 struct ctl_dir;
78991
78992-typedef struct ctl_table ctl_table;
78993-
78994 typedef int proc_handler (struct ctl_table *ctl, int write,
78995 void __user *buffer, size_t *lenp, loff_t *ppos);
78996
78997 extern int proc_dostring(struct ctl_table *, int,
78998 void __user *, size_t *, loff_t *);
78999+extern int proc_dostring_modpriv(struct ctl_table *, int,
79000+ void __user *, size_t *, loff_t *);
79001 extern int proc_dointvec(struct ctl_table *, int,
79002 void __user *, size_t *, loff_t *);
79003 extern int proc_dointvec_minmax(struct ctl_table *, int,
79004@@ -115,7 +115,9 @@ struct ctl_table
79005 struct ctl_table_poll *poll;
79006 void *extra1;
79007 void *extra2;
79008-};
79009+} __do_const;
79010+typedef struct ctl_table __no_const ctl_table_no_const;
79011+typedef struct ctl_table ctl_table;
79012
79013 struct ctl_node {
79014 struct rb_node node;
79015diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
79016index 11baec7..706f99f 100644
79017--- a/include/linux/sysfs.h
79018+++ b/include/linux/sysfs.h
79019@@ -33,7 +33,8 @@ struct attribute {
79020 struct lock_class_key *key;
79021 struct lock_class_key skey;
79022 #endif
79023-};
79024+} __do_const;
79025+typedef struct attribute __no_const attribute_no_const;
79026
79027 /**
79028 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
79029@@ -62,7 +63,8 @@ struct attribute_group {
79030 struct attribute *, int);
79031 struct attribute **attrs;
79032 struct bin_attribute **bin_attrs;
79033-};
79034+} __do_const;
79035+typedef struct attribute_group __no_const attribute_group_no_const;
79036
79037 /**
79038 * Use these macros to make defining attributes easier. See include/linux/device.h
79039@@ -126,7 +128,8 @@ struct bin_attribute {
79040 char *, loff_t, size_t);
79041 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
79042 struct vm_area_struct *vma);
79043-};
79044+} __do_const;
79045+typedef struct bin_attribute __no_const bin_attribute_no_const;
79046
79047 /**
79048 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
79049diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
79050index 7faf933..9b85a0c 100644
79051--- a/include/linux/sysrq.h
79052+++ b/include/linux/sysrq.h
79053@@ -16,6 +16,7 @@
79054
79055 #include <linux/errno.h>
79056 #include <linux/types.h>
79057+#include <linux/compiler.h>
79058
79059 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
79060 #define SYSRQ_DEFAULT_ENABLE 1
79061@@ -36,7 +37,7 @@ struct sysrq_key_op {
79062 char *help_msg;
79063 char *action_msg;
79064 int enable_mask;
79065-};
79066+} __do_const;
79067
79068 #ifdef CONFIG_MAGIC_SYSRQ
79069
79070diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
79071index 4ae6f32..425d3e1 100644
79072--- a/include/linux/thread_info.h
79073+++ b/include/linux/thread_info.h
79074@@ -150,6 +150,15 @@ static inline bool test_and_clear_restore_sigmask(void)
79075 #error "no set_restore_sigmask() provided and default one won't work"
79076 #endif
79077
79078+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
79079+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
79080+{
79081+#ifndef CONFIG_PAX_USERCOPY_DEBUG
79082+ if (!__builtin_constant_p(n))
79083+#endif
79084+ __check_object_size(ptr, n, to_user);
79085+}
79086+
79087 #endif /* __KERNEL__ */
79088
79089 #endif /* _LINUX_THREAD_INFO_H */
79090diff --git a/include/linux/tty.h b/include/linux/tty.h
79091index 64f8646..1515fc7 100644
79092--- a/include/linux/tty.h
79093+++ b/include/linux/tty.h
79094@@ -197,7 +197,7 @@ struct tty_port {
79095 const struct tty_port_operations *ops; /* Port operations */
79096 spinlock_t lock; /* Lock protecting tty field */
79097 int blocked_open; /* Waiting to open */
79098- int count; /* Usage count */
79099+ atomic_t count; /* Usage count */
79100 wait_queue_head_t open_wait; /* Open waiters */
79101 wait_queue_head_t close_wait; /* Close waiters */
79102 wait_queue_head_t delta_msr_wait; /* Modem status change */
79103@@ -546,7 +546,7 @@ extern int tty_port_open(struct tty_port *port,
79104 struct tty_struct *tty, struct file *filp);
79105 static inline int tty_port_users(struct tty_port *port)
79106 {
79107- return port->count + port->blocked_open;
79108+ return atomic_read(&port->count) + port->blocked_open;
79109 }
79110
79111 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
79112diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
79113index 756a609..b302dd6 100644
79114--- a/include/linux/tty_driver.h
79115+++ b/include/linux/tty_driver.h
79116@@ -285,7 +285,7 @@ struct tty_operations {
79117 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
79118 #endif
79119 const struct file_operations *proc_fops;
79120-};
79121+} __do_const;
79122
79123 struct tty_driver {
79124 int magic; /* magic number for this structure */
79125diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
79126index f15c898..207b7d1 100644
79127--- a/include/linux/tty_ldisc.h
79128+++ b/include/linux/tty_ldisc.h
79129@@ -211,7 +211,7 @@ struct tty_ldisc_ops {
79130
79131 struct module *owner;
79132
79133- int refcount;
79134+ atomic_t refcount;
79135 };
79136
79137 struct tty_ldisc {
79138diff --git a/include/linux/types.h b/include/linux/types.h
79139index 4d118ba..c3ee9bf 100644
79140--- a/include/linux/types.h
79141+++ b/include/linux/types.h
79142@@ -176,10 +176,26 @@ typedef struct {
79143 int counter;
79144 } atomic_t;
79145
79146+#ifdef CONFIG_PAX_REFCOUNT
79147+typedef struct {
79148+ int counter;
79149+} atomic_unchecked_t;
79150+#else
79151+typedef atomic_t atomic_unchecked_t;
79152+#endif
79153+
79154 #ifdef CONFIG_64BIT
79155 typedef struct {
79156 long counter;
79157 } atomic64_t;
79158+
79159+#ifdef CONFIG_PAX_REFCOUNT
79160+typedef struct {
79161+ long counter;
79162+} atomic64_unchecked_t;
79163+#else
79164+typedef atomic64_t atomic64_unchecked_t;
79165+#endif
79166 #endif
79167
79168 struct list_head {
79169diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
79170index 5ca0951..ab496a5 100644
79171--- a/include/linux/uaccess.h
79172+++ b/include/linux/uaccess.h
79173@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
79174 long ret; \
79175 mm_segment_t old_fs = get_fs(); \
79176 \
79177- set_fs(KERNEL_DS); \
79178 pagefault_disable(); \
79179- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
79180- pagefault_enable(); \
79181+ set_fs(KERNEL_DS); \
79182+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
79183 set_fs(old_fs); \
79184+ pagefault_enable(); \
79185 ret; \
79186 })
79187
79188diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
79189index 8e522cbc..aa8572d 100644
79190--- a/include/linux/uidgid.h
79191+++ b/include/linux/uidgid.h
79192@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
79193
79194 #endif /* CONFIG_USER_NS */
79195
79196+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
79197+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
79198+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
79199+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
79200+
79201 #endif /* _LINUX_UIDGID_H */
79202diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
79203index 99c1b4d..562e6f3 100644
79204--- a/include/linux/unaligned/access_ok.h
79205+++ b/include/linux/unaligned/access_ok.h
79206@@ -4,34 +4,34 @@
79207 #include <linux/kernel.h>
79208 #include <asm/byteorder.h>
79209
79210-static inline u16 get_unaligned_le16(const void *p)
79211+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
79212 {
79213- return le16_to_cpup((__le16 *)p);
79214+ return le16_to_cpup((const __le16 *)p);
79215 }
79216
79217-static inline u32 get_unaligned_le32(const void *p)
79218+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
79219 {
79220- return le32_to_cpup((__le32 *)p);
79221+ return le32_to_cpup((const __le32 *)p);
79222 }
79223
79224-static inline u64 get_unaligned_le64(const void *p)
79225+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
79226 {
79227- return le64_to_cpup((__le64 *)p);
79228+ return le64_to_cpup((const __le64 *)p);
79229 }
79230
79231-static inline u16 get_unaligned_be16(const void *p)
79232+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
79233 {
79234- return be16_to_cpup((__be16 *)p);
79235+ return be16_to_cpup((const __be16 *)p);
79236 }
79237
79238-static inline u32 get_unaligned_be32(const void *p)
79239+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
79240 {
79241- return be32_to_cpup((__be32 *)p);
79242+ return be32_to_cpup((const __be32 *)p);
79243 }
79244
79245-static inline u64 get_unaligned_be64(const void *p)
79246+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
79247 {
79248- return be64_to_cpup((__be64 *)p);
79249+ return be64_to_cpup((const __be64 *)p);
79250 }
79251
79252 static inline void put_unaligned_le16(u16 val, void *p)
79253diff --git a/include/linux/usb.h b/include/linux/usb.h
79254index 6b02370..2355ffa 100644
79255--- a/include/linux/usb.h
79256+++ b/include/linux/usb.h
79257@@ -563,7 +563,7 @@ struct usb_device {
79258 int maxchild;
79259
79260 u32 quirks;
79261- atomic_t urbnum;
79262+ atomic_unchecked_t urbnum;
79263
79264 unsigned long active_duration;
79265
79266@@ -1639,7 +1639,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
79267
79268 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
79269 __u8 request, __u8 requesttype, __u16 value, __u16 index,
79270- void *data, __u16 size, int timeout);
79271+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
79272 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
79273 void *data, int len, int *actual_length, int timeout);
79274 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
79275diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
79276index e452ba6..78f8e80 100644
79277--- a/include/linux/usb/renesas_usbhs.h
79278+++ b/include/linux/usb/renesas_usbhs.h
79279@@ -39,7 +39,7 @@ enum {
79280 */
79281 struct renesas_usbhs_driver_callback {
79282 int (*notify_hotplug)(struct platform_device *pdev);
79283-};
79284+} __no_const;
79285
79286 /*
79287 * callback functions for platform
79288diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
79289index 6f8fbcf..8259001 100644
79290--- a/include/linux/vermagic.h
79291+++ b/include/linux/vermagic.h
79292@@ -25,9 +25,35 @@
79293 #define MODULE_ARCH_VERMAGIC ""
79294 #endif
79295
79296+#ifdef CONFIG_PAX_REFCOUNT
79297+#define MODULE_PAX_REFCOUNT "REFCOUNT "
79298+#else
79299+#define MODULE_PAX_REFCOUNT ""
79300+#endif
79301+
79302+#ifdef CONSTIFY_PLUGIN
79303+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
79304+#else
79305+#define MODULE_CONSTIFY_PLUGIN ""
79306+#endif
79307+
79308+#ifdef STACKLEAK_PLUGIN
79309+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
79310+#else
79311+#define MODULE_STACKLEAK_PLUGIN ""
79312+#endif
79313+
79314+#ifdef CONFIG_GRKERNSEC
79315+#define MODULE_GRSEC "GRSEC "
79316+#else
79317+#define MODULE_GRSEC ""
79318+#endif
79319+
79320 #define VERMAGIC_STRING \
79321 UTS_RELEASE " " \
79322 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
79323 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
79324- MODULE_ARCH_VERMAGIC
79325+ MODULE_ARCH_VERMAGIC \
79326+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
79327+ MODULE_GRSEC
79328
79329diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
79330index 502073a..a7de024 100644
79331--- a/include/linux/vga_switcheroo.h
79332+++ b/include/linux/vga_switcheroo.h
79333@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
79334
79335 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
79336
79337-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
79338-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
79339+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
79340+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
79341 #else
79342
79343 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
79344@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
79345
79346 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
79347
79348-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
79349-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
79350+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
79351+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
79352
79353 #endif
79354 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
79355diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
79356index 4b8a891..cb8df6e 100644
79357--- a/include/linux/vmalloc.h
79358+++ b/include/linux/vmalloc.h
79359@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
79360 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
79361 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
79362 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
79363+
79364+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79365+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
79366+#endif
79367+
79368 /* bits [20..32] reserved for arch specific ioremap internals */
79369
79370 /*
79371@@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
79372
79373 /* for /dev/kmem */
79374 extern long vread(char *buf, char *addr, unsigned long count);
79375-extern long vwrite(char *buf, char *addr, unsigned long count);
79376+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
79377
79378 /*
79379 * Internals. Dont't use..
79380diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
79381index e4b9480..5a5f65a 100644
79382--- a/include/linux/vmstat.h
79383+++ b/include/linux/vmstat.h
79384@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
79385 /*
79386 * Zone based page accounting with per cpu differentials.
79387 */
79388-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79389+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79390
79391 static inline void zone_page_state_add(long x, struct zone *zone,
79392 enum zone_stat_item item)
79393 {
79394- atomic_long_add(x, &zone->vm_stat[item]);
79395- atomic_long_add(x, &vm_stat[item]);
79396+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
79397+ atomic_long_add_unchecked(x, &vm_stat[item]);
79398 }
79399
79400-static inline unsigned long global_page_state(enum zone_stat_item item)
79401+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
79402 {
79403- long x = atomic_long_read(&vm_stat[item]);
79404+ long x = atomic_long_read_unchecked(&vm_stat[item]);
79405 #ifdef CONFIG_SMP
79406 if (x < 0)
79407 x = 0;
79408@@ -109,10 +109,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
79409 return x;
79410 }
79411
79412-static inline unsigned long zone_page_state(struct zone *zone,
79413+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
79414 enum zone_stat_item item)
79415 {
79416- long x = atomic_long_read(&zone->vm_stat[item]);
79417+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
79418 #ifdef CONFIG_SMP
79419 if (x < 0)
79420 x = 0;
79421@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
79422 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
79423 enum zone_stat_item item)
79424 {
79425- long x = atomic_long_read(&zone->vm_stat[item]);
79426+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
79427
79428 #ifdef CONFIG_SMP
79429 int cpu;
79430@@ -220,8 +220,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
79431
79432 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
79433 {
79434- atomic_long_inc(&zone->vm_stat[item]);
79435- atomic_long_inc(&vm_stat[item]);
79436+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
79437+ atomic_long_inc_unchecked(&vm_stat[item]);
79438 }
79439
79440 static inline void __inc_zone_page_state(struct page *page,
79441@@ -232,8 +232,8 @@ static inline void __inc_zone_page_state(struct page *page,
79442
79443 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
79444 {
79445- atomic_long_dec(&zone->vm_stat[item]);
79446- atomic_long_dec(&vm_stat[item]);
79447+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
79448+ atomic_long_dec_unchecked(&vm_stat[item]);
79449 }
79450
79451 static inline void __dec_zone_page_state(struct page *page,
79452diff --git a/include/linux/xattr.h b/include/linux/xattr.h
79453index 91b0a68..0e9adf6 100644
79454--- a/include/linux/xattr.h
79455+++ b/include/linux/xattr.h
79456@@ -28,7 +28,7 @@ struct xattr_handler {
79457 size_t size, int handler_flags);
79458 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
79459 size_t size, int flags, int handler_flags);
79460-};
79461+} __do_const;
79462
79463 struct xattr {
79464 const char *name;
79465@@ -37,6 +37,9 @@ struct xattr {
79466 };
79467
79468 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
79469+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
79470+ssize_t pax_getxattr(struct dentry *, void *, size_t);
79471+#endif
79472 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
79473 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
79474 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
79475diff --git a/include/linux/zlib.h b/include/linux/zlib.h
79476index 9c5a6b4..09c9438 100644
79477--- a/include/linux/zlib.h
79478+++ b/include/linux/zlib.h
79479@@ -31,6 +31,7 @@
79480 #define _ZLIB_H
79481
79482 #include <linux/zconf.h>
79483+#include <linux/compiler.h>
79484
79485 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
79486 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
79487@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
79488
79489 /* basic functions */
79490
79491-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
79492+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
79493 /*
79494 Returns the number of bytes that needs to be allocated for a per-
79495 stream workspace with the specified parameters. A pointer to this
79496diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
79497index c768c9f..bdcaa5a 100644
79498--- a/include/media/v4l2-dev.h
79499+++ b/include/media/v4l2-dev.h
79500@@ -76,7 +76,7 @@ struct v4l2_file_operations {
79501 int (*mmap) (struct file *, struct vm_area_struct *);
79502 int (*open) (struct file *);
79503 int (*release) (struct file *);
79504-};
79505+} __do_const;
79506
79507 /*
79508 * Newer version of video_device, handled by videodev2.c
79509diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
79510index c9b1593..a572459 100644
79511--- a/include/media/v4l2-device.h
79512+++ b/include/media/v4l2-device.h
79513@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
79514 this function returns 0. If the name ends with a digit (e.g. cx18),
79515 then the name will be set to cx18-0 since cx180 looks really odd. */
79516 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
79517- atomic_t *instance);
79518+ atomic_unchecked_t *instance);
79519
79520 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
79521 Since the parent disappears this ensures that v4l2_dev doesn't have an
79522diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
79523index 9a36d92..0aafe2a 100644
79524--- a/include/net/9p/transport.h
79525+++ b/include/net/9p/transport.h
79526@@ -60,7 +60,7 @@ struct p9_trans_module {
79527 int (*cancel) (struct p9_client *, struct p9_req_t *req);
79528 int (*zc_request)(struct p9_client *, struct p9_req_t *,
79529 char *, char *, int , int, int, int);
79530-};
79531+} __do_const;
79532
79533 void v9fs_register_trans(struct p9_trans_module *m);
79534 void v9fs_unregister_trans(struct p9_trans_module *m);
79535diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
79536index 1a966af..2767cf6 100644
79537--- a/include/net/bluetooth/l2cap.h
79538+++ b/include/net/bluetooth/l2cap.h
79539@@ -551,7 +551,7 @@ struct l2cap_ops {
79540 void (*defer) (struct l2cap_chan *chan);
79541 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
79542 unsigned long len, int nb);
79543-};
79544+} __do_const;
79545
79546 struct l2cap_conn {
79547 struct hci_conn *hcon;
79548diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
79549index f2ae33d..c457cf0 100644
79550--- a/include/net/caif/cfctrl.h
79551+++ b/include/net/caif/cfctrl.h
79552@@ -52,7 +52,7 @@ struct cfctrl_rsp {
79553 void (*radioset_rsp)(void);
79554 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
79555 struct cflayer *client_layer);
79556-};
79557+} __no_const;
79558
79559 /* Link Setup Parameters for CAIF-Links. */
79560 struct cfctrl_link_param {
79561@@ -101,8 +101,8 @@ struct cfctrl_request_info {
79562 struct cfctrl {
79563 struct cfsrvl serv;
79564 struct cfctrl_rsp res;
79565- atomic_t req_seq_no;
79566- atomic_t rsp_seq_no;
79567+ atomic_unchecked_t req_seq_no;
79568+ atomic_unchecked_t rsp_seq_no;
79569 struct list_head list;
79570 /* Protects from simultaneous access to first_req list */
79571 spinlock_t info_list_lock;
79572diff --git a/include/net/flow.h b/include/net/flow.h
79573index 628e11b..4c475df 100644
79574--- a/include/net/flow.h
79575+++ b/include/net/flow.h
79576@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
79577
79578 extern void flow_cache_flush(void);
79579 extern void flow_cache_flush_deferred(void);
79580-extern atomic_t flow_cache_genid;
79581+extern atomic_unchecked_t flow_cache_genid;
79582
79583 #endif
79584diff --git a/include/net/genetlink.h b/include/net/genetlink.h
79585index 8e0b6c8..73cf605 100644
79586--- a/include/net/genetlink.h
79587+++ b/include/net/genetlink.h
79588@@ -120,7 +120,7 @@ struct genl_ops {
79589 struct netlink_callback *cb);
79590 int (*done)(struct netlink_callback *cb);
79591 struct list_head ops_list;
79592-};
79593+} __do_const;
79594
79595 extern int __genl_register_family(struct genl_family *family);
79596
79597diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
79598index 734d9b5..48a9a4b 100644
79599--- a/include/net/gro_cells.h
79600+++ b/include/net/gro_cells.h
79601@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
79602 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
79603
79604 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
79605- atomic_long_inc(&dev->rx_dropped);
79606+ atomic_long_inc_unchecked(&dev->rx_dropped);
79607 kfree_skb(skb);
79608 return;
79609 }
79610diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
79611index de2c785..0588a6b 100644
79612--- a/include/net/inet_connection_sock.h
79613+++ b/include/net/inet_connection_sock.h
79614@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
79615 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
79616 int (*bind_conflict)(const struct sock *sk,
79617 const struct inet_bind_bucket *tb, bool relax);
79618-};
79619+} __do_const;
79620
79621 /** inet_connection_sock - INET connection oriented sock
79622 *
79623diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
79624index 53f464d..0bd0b49 100644
79625--- a/include/net/inetpeer.h
79626+++ b/include/net/inetpeer.h
79627@@ -47,8 +47,8 @@ struct inet_peer {
79628 */
79629 union {
79630 struct {
79631- atomic_t rid; /* Frag reception counter */
79632- atomic_t ip_id_count; /* IP ID for the next packet */
79633+ atomic_unchecked_t rid; /* Frag reception counter */
79634+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
79635 };
79636 struct rcu_head rcu;
79637 struct inet_peer *gc_next;
79638@@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
79639 /* can be called with or without local BH being disabled */
79640 static inline int inet_getid(struct inet_peer *p, int more)
79641 {
79642- int old, new;
79643+ int id;
79644 more++;
79645 inet_peer_refcheck(p);
79646- do {
79647- old = atomic_read(&p->ip_id_count);
79648- new = old + more;
79649- if (!new)
79650- new = 1;
79651- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
79652- return new;
79653+ id = atomic_add_return_unchecked(more, &p->ip_id_count);
79654+ if (!id)
79655+ id = atomic_inc_return_unchecked(&p->ip_id_count);
79656+ return id;
79657 }
79658
79659 #endif /* _NET_INETPEER_H */
79660diff --git a/include/net/ip.h b/include/net/ip.h
79661index 301f10c..b52cdaf 100644
79662--- a/include/net/ip.h
79663+++ b/include/net/ip.h
79664@@ -212,7 +212,7 @@ extern struct local_ports {
79665 } sysctl_local_ports;
79666 extern void inet_get_local_port_range(int *low, int *high);
79667
79668-extern unsigned long *sysctl_local_reserved_ports;
79669+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
79670 static inline int inet_is_reserved_local_port(int port)
79671 {
79672 return test_bit(port, sysctl_local_reserved_ports);
79673diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
79674index cbf2be3..3683f6d 100644
79675--- a/include/net/ip_fib.h
79676+++ b/include/net/ip_fib.h
79677@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
79678
79679 #define FIB_RES_SADDR(net, res) \
79680 ((FIB_RES_NH(res).nh_saddr_genid == \
79681- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
79682+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
79683 FIB_RES_NH(res).nh_saddr : \
79684 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
79685 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
79686diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
79687index 772252d..1e69799 100644
79688--- a/include/net/ip_vs.h
79689+++ b/include/net/ip_vs.h
79690@@ -558,7 +558,7 @@ struct ip_vs_conn {
79691 struct ip_vs_conn *control; /* Master control connection */
79692 atomic_t n_control; /* Number of controlled ones */
79693 struct ip_vs_dest *dest; /* real server */
79694- atomic_t in_pkts; /* incoming packet counter */
79695+ atomic_unchecked_t in_pkts; /* incoming packet counter */
79696
79697 /* packet transmitter for different forwarding methods. If it
79698 mangles the packet, it must return NF_DROP or better NF_STOLEN,
79699@@ -705,7 +705,7 @@ struct ip_vs_dest {
79700 __be16 port; /* port number of the server */
79701 union nf_inet_addr addr; /* IP address of the server */
79702 volatile unsigned int flags; /* dest status flags */
79703- atomic_t conn_flags; /* flags to copy to conn */
79704+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
79705 atomic_t weight; /* server weight */
79706
79707 atomic_t refcnt; /* reference counter */
79708@@ -960,11 +960,11 @@ struct netns_ipvs {
79709 /* ip_vs_lblc */
79710 int sysctl_lblc_expiration;
79711 struct ctl_table_header *lblc_ctl_header;
79712- struct ctl_table *lblc_ctl_table;
79713+ ctl_table_no_const *lblc_ctl_table;
79714 /* ip_vs_lblcr */
79715 int sysctl_lblcr_expiration;
79716 struct ctl_table_header *lblcr_ctl_header;
79717- struct ctl_table *lblcr_ctl_table;
79718+ ctl_table_no_const *lblcr_ctl_table;
79719 /* ip_vs_est */
79720 struct list_head est_list; /* estimator list */
79721 spinlock_t est_lock;
79722diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
79723index 80ffde3..968b0f4 100644
79724--- a/include/net/irda/ircomm_tty.h
79725+++ b/include/net/irda/ircomm_tty.h
79726@@ -35,6 +35,7 @@
79727 #include <linux/termios.h>
79728 #include <linux/timer.h>
79729 #include <linux/tty.h> /* struct tty_struct */
79730+#include <asm/local.h>
79731
79732 #include <net/irda/irias_object.h>
79733 #include <net/irda/ircomm_core.h>
79734diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
79735index 714cc9a..ea05f3e 100644
79736--- a/include/net/iucv/af_iucv.h
79737+++ b/include/net/iucv/af_iucv.h
79738@@ -149,7 +149,7 @@ struct iucv_skb_cb {
79739 struct iucv_sock_list {
79740 struct hlist_head head;
79741 rwlock_t lock;
79742- atomic_t autobind_name;
79743+ atomic_unchecked_t autobind_name;
79744 };
79745
79746 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
79747diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
79748index df83f69..9b640b8 100644
79749--- a/include/net/llc_c_ac.h
79750+++ b/include/net/llc_c_ac.h
79751@@ -87,7 +87,7 @@
79752 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
79753 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
79754
79755-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
79756+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
79757
79758 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
79759 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
79760diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
79761index 6ca3113..f8026dd 100644
79762--- a/include/net/llc_c_ev.h
79763+++ b/include/net/llc_c_ev.h
79764@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
79765 return (struct llc_conn_state_ev *)skb->cb;
79766 }
79767
79768-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
79769-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
79770+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
79771+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
79772
79773 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
79774 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
79775diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
79776index 0e79cfb..f46db31 100644
79777--- a/include/net/llc_c_st.h
79778+++ b/include/net/llc_c_st.h
79779@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
79780 u8 next_state;
79781 llc_conn_ev_qfyr_t *ev_qualifiers;
79782 llc_conn_action_t *ev_actions;
79783-};
79784+} __do_const;
79785
79786 struct llc_conn_state {
79787 u8 current_state;
79788diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
79789index 37a3bbd..55a4241 100644
79790--- a/include/net/llc_s_ac.h
79791+++ b/include/net/llc_s_ac.h
79792@@ -23,7 +23,7 @@
79793 #define SAP_ACT_TEST_IND 9
79794
79795 /* All action functions must look like this */
79796-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
79797+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
79798
79799 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
79800 struct sk_buff *skb);
79801diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
79802index 567c681..cd73ac0 100644
79803--- a/include/net/llc_s_st.h
79804+++ b/include/net/llc_s_st.h
79805@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
79806 llc_sap_ev_t ev;
79807 u8 next_state;
79808 llc_sap_action_t *ev_actions;
79809-};
79810+} __do_const;
79811
79812 struct llc_sap_state {
79813 u8 curr_state;
79814diff --git a/include/net/mac80211.h b/include/net/mac80211.h
79815index cc6035f..a8406fc 100644
79816--- a/include/net/mac80211.h
79817+++ b/include/net/mac80211.h
79818@@ -4361,7 +4361,7 @@ struct rate_control_ops {
79819 void (*add_sta_debugfs)(void *priv, void *priv_sta,
79820 struct dentry *dir);
79821 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
79822-};
79823+} __do_const;
79824
79825 static inline int rate_supported(struct ieee80211_sta *sta,
79826 enum ieee80211_band band,
79827diff --git a/include/net/neighbour.h b/include/net/neighbour.h
79828index 536501a..7c6193c 100644
79829--- a/include/net/neighbour.h
79830+++ b/include/net/neighbour.h
79831@@ -123,7 +123,7 @@ struct neigh_ops {
79832 void (*error_report)(struct neighbour *, struct sk_buff *);
79833 int (*output)(struct neighbour *, struct sk_buff *);
79834 int (*connected_output)(struct neighbour *, struct sk_buff *);
79835-};
79836+} __do_const;
79837
79838 struct pneigh_entry {
79839 struct pneigh_entry *next;
79840diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
79841index 9d22f08..980fbf8 100644
79842--- a/include/net/net_namespace.h
79843+++ b/include/net/net_namespace.h
79844@@ -120,7 +120,7 @@ struct net {
79845 struct netns_ipvs *ipvs;
79846 #endif
79847 struct sock *diag_nlsk;
79848- atomic_t fnhe_genid;
79849+ atomic_unchecked_t fnhe_genid;
79850 };
79851
79852 /*
79853@@ -277,7 +277,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
79854 #define __net_init __init
79855 #define __net_exit __exit_refok
79856 #define __net_initdata __initdata
79857+#ifdef CONSTIFY_PLUGIN
79858 #define __net_initconst __initconst
79859+#else
79860+#define __net_initconst __initdata
79861+#endif
79862 #endif
79863
79864 struct pernet_operations {
79865@@ -287,7 +291,7 @@ struct pernet_operations {
79866 void (*exit_batch)(struct list_head *net_exit_list);
79867 int *id;
79868 size_t size;
79869-};
79870+} __do_const;
79871
79872 /*
79873 * Use these carefully. If you implement a network device and it
79874@@ -335,23 +339,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
79875
79876 static inline int rt_genid_ipv4(struct net *net)
79877 {
79878- return atomic_read(&net->ipv4.rt_genid);
79879+ return atomic_read_unchecked(&net->ipv4.rt_genid);
79880 }
79881
79882 static inline void rt_genid_bump_ipv4(struct net *net)
79883 {
79884- atomic_inc(&net->ipv4.rt_genid);
79885+ atomic_inc_unchecked(&net->ipv4.rt_genid);
79886 }
79887
79888 #if IS_ENABLED(CONFIG_IPV6)
79889 static inline int rt_genid_ipv6(struct net *net)
79890 {
79891- return atomic_read(&net->ipv6.rt_genid);
79892+ return atomic_read_unchecked(&net->ipv6.rt_genid);
79893 }
79894
79895 static inline void rt_genid_bump_ipv6(struct net *net)
79896 {
79897- atomic_inc(&net->ipv6.rt_genid);
79898+ atomic_inc_unchecked(&net->ipv6.rt_genid);
79899 }
79900 #else
79901 static inline int rt_genid_ipv6(struct net *net)
79902@@ -373,12 +377,12 @@ static inline void rt_genid_bump_all(struct net *net)
79903
79904 static inline int fnhe_genid(struct net *net)
79905 {
79906- return atomic_read(&net->fnhe_genid);
79907+ return atomic_read_unchecked(&net->fnhe_genid);
79908 }
79909
79910 static inline void fnhe_genid_bump(struct net *net)
79911 {
79912- atomic_inc(&net->fnhe_genid);
79913+ atomic_inc_unchecked(&net->fnhe_genid);
79914 }
79915
79916 #endif /* __NET_NET_NAMESPACE_H */
79917diff --git a/include/net/netdma.h b/include/net/netdma.h
79918index 8ba8ce2..99b7fff 100644
79919--- a/include/net/netdma.h
79920+++ b/include/net/netdma.h
79921@@ -24,7 +24,7 @@
79922 #include <linux/dmaengine.h>
79923 #include <linux/skbuff.h>
79924
79925-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
79926+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
79927 struct sk_buff *skb, int offset, struct iovec *to,
79928 size_t len, struct dma_pinned_list *pinned_list);
79929
79930diff --git a/include/net/netlink.h b/include/net/netlink.h
79931index 9690b0f..87aded7 100644
79932--- a/include/net/netlink.h
79933+++ b/include/net/netlink.h
79934@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
79935 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
79936 {
79937 if (mark)
79938- skb_trim(skb, (unsigned char *) mark - skb->data);
79939+ skb_trim(skb, (const unsigned char *) mark - skb->data);
79940 }
79941
79942 /**
79943diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
79944index c9c0c53..53f24c3 100644
79945--- a/include/net/netns/conntrack.h
79946+++ b/include/net/netns/conntrack.h
79947@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
79948 struct nf_proto_net {
79949 #ifdef CONFIG_SYSCTL
79950 struct ctl_table_header *ctl_table_header;
79951- struct ctl_table *ctl_table;
79952+ ctl_table_no_const *ctl_table;
79953 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
79954 struct ctl_table_header *ctl_compat_header;
79955- struct ctl_table *ctl_compat_table;
79956+ ctl_table_no_const *ctl_compat_table;
79957 #endif
79958 #endif
79959 unsigned int users;
79960@@ -58,7 +58,7 @@ struct nf_ip_net {
79961 struct nf_icmp_net icmpv6;
79962 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
79963 struct ctl_table_header *ctl_table_header;
79964- struct ctl_table *ctl_table;
79965+ ctl_table_no_const *ctl_table;
79966 #endif
79967 };
79968
79969diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
79970index bf2ec22..5e7f9d9 100644
79971--- a/include/net/netns/ipv4.h
79972+++ b/include/net/netns/ipv4.h
79973@@ -67,7 +67,7 @@ struct netns_ipv4 {
79974 kgid_t sysctl_ping_group_range[2];
79975 long sysctl_tcp_mem[3];
79976
79977- atomic_t dev_addr_genid;
79978+ atomic_unchecked_t dev_addr_genid;
79979
79980 #ifdef CONFIG_IP_MROUTE
79981 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
79982@@ -77,6 +77,6 @@ struct netns_ipv4 {
79983 struct fib_rules_ops *mr_rules_ops;
79984 #endif
79985 #endif
79986- atomic_t rt_genid;
79987+ atomic_unchecked_t rt_genid;
79988 };
79989 #endif
79990diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
79991index 0fb2401..477d81c 100644
79992--- a/include/net/netns/ipv6.h
79993+++ b/include/net/netns/ipv6.h
79994@@ -71,8 +71,8 @@ struct netns_ipv6 {
79995 struct fib_rules_ops *mr6_rules_ops;
79996 #endif
79997 #endif
79998- atomic_t dev_addr_genid;
79999- atomic_t rt_genid;
80000+ atomic_unchecked_t dev_addr_genid;
80001+ atomic_unchecked_t rt_genid;
80002 };
80003
80004 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
80005diff --git a/include/net/ping.h b/include/net/ping.h
80006index 2b496e9..935fd8d 100644
80007--- a/include/net/ping.h
80008+++ b/include/net/ping.h
80009@@ -56,7 +56,7 @@ struct ping_iter_state {
80010 extern struct proto ping_prot;
80011 extern struct ping_table ping_table;
80012 #if IS_ENABLED(CONFIG_IPV6)
80013-extern struct pingv6_ops pingv6_ops;
80014+extern struct pingv6_ops *pingv6_ops;
80015 #endif
80016
80017 struct pingfakehdr {
80018diff --git a/include/net/protocol.h b/include/net/protocol.h
80019index 047c047..b9dad15 100644
80020--- a/include/net/protocol.h
80021+++ b/include/net/protocol.h
80022@@ -44,7 +44,7 @@ struct net_protocol {
80023 void (*err_handler)(struct sk_buff *skb, u32 info);
80024 unsigned int no_policy:1,
80025 netns_ok:1;
80026-};
80027+} __do_const;
80028
80029 #if IS_ENABLED(CONFIG_IPV6)
80030 struct inet6_protocol {
80031@@ -57,7 +57,7 @@ struct inet6_protocol {
80032 u8 type, u8 code, int offset,
80033 __be32 info);
80034 unsigned int flags; /* INET6_PROTO_xxx */
80035-};
80036+} __do_const;
80037
80038 #define INET6_PROTO_NOPOLICY 0x1
80039 #define INET6_PROTO_FINAL 0x2
80040diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
80041index 7026648..584cc8c 100644
80042--- a/include/net/rtnetlink.h
80043+++ b/include/net/rtnetlink.h
80044@@ -81,7 +81,7 @@ struct rtnl_link_ops {
80045 const struct net_device *dev);
80046 unsigned int (*get_num_tx_queues)(void);
80047 unsigned int (*get_num_rx_queues)(void);
80048-};
80049+} __do_const;
80050
80051 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
80052 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
80053diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
80054index 4ef75af..5aa073a 100644
80055--- a/include/net/sctp/sm.h
80056+++ b/include/net/sctp/sm.h
80057@@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
80058 typedef struct {
80059 sctp_state_fn_t *fn;
80060 const char *name;
80061-} sctp_sm_table_entry_t;
80062+} __do_const sctp_sm_table_entry_t;
80063
80064 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
80065 * currently in use.
80066@@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
80067 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
80068
80069 /* Extern declarations for major data structures. */
80070-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
80071+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
80072
80073
80074 /* Get the size of a DATA chunk payload. */
80075diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
80076index 2174d8d..71d5257 100644
80077--- a/include/net/sctp/structs.h
80078+++ b/include/net/sctp/structs.h
80079@@ -508,7 +508,7 @@ struct sctp_pf {
80080 struct sctp_association *asoc);
80081 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
80082 struct sctp_af *af;
80083-};
80084+} __do_const;
80085
80086
80087 /* Structure to track chunk fragments that have been acked, but peer
80088diff --git a/include/net/sock.h b/include/net/sock.h
80089index 808cbc2..8617e9c 100644
80090--- a/include/net/sock.h
80091+++ b/include/net/sock.h
80092@@ -332,7 +332,7 @@ struct sock {
80093 unsigned int sk_napi_id;
80094 unsigned int sk_ll_usec;
80095 #endif
80096- atomic_t sk_drops;
80097+ atomic_unchecked_t sk_drops;
80098 int sk_rcvbuf;
80099
80100 struct sk_filter __rcu *sk_filter;
80101@@ -1194,7 +1194,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
80102 return ret >> PAGE_SHIFT;
80103 }
80104
80105-static inline long
80106+static inline long __intentional_overflow(-1)
80107 sk_memory_allocated(const struct sock *sk)
80108 {
80109 struct proto *prot = sk->sk_prot;
80110@@ -1821,7 +1821,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
80111 }
80112
80113 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
80114- char __user *from, char *to,
80115+ char __user *from, unsigned char *to,
80116 int copy, int offset)
80117 {
80118 if (skb->ip_summed == CHECKSUM_NONE) {
80119@@ -2083,7 +2083,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
80120 }
80121 }
80122
80123-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
80124+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
80125
80126 /**
80127 * sk_page_frag - return an appropriate page_frag
80128diff --git a/include/net/tcp.h b/include/net/tcp.h
80129index b1aa324..b8530ea 100644
80130--- a/include/net/tcp.h
80131+++ b/include/net/tcp.h
80132@@ -527,7 +527,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
80133 extern void tcp_xmit_retransmit_queue(struct sock *);
80134 extern void tcp_simple_retransmit(struct sock *);
80135 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
80136-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
80137+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
80138
80139 extern void tcp_send_probe0(struct sock *);
80140 extern void tcp_send_partial(struct sock *);
80141@@ -699,8 +699,8 @@ struct tcp_skb_cb {
80142 struct inet6_skb_parm h6;
80143 #endif
80144 } header; /* For incoming frames */
80145- __u32 seq; /* Starting sequence number */
80146- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
80147+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
80148+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
80149 __u32 when; /* used to compute rtt's */
80150 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
80151
80152@@ -714,7 +714,7 @@ struct tcp_skb_cb {
80153
80154 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
80155 /* 1 byte hole */
80156- __u32 ack_seq; /* Sequence number ACK'd */
80157+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
80158 };
80159
80160 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
80161diff --git a/include/net/xfrm.h b/include/net/xfrm.h
80162index e253bf0..2278b4b 100644
80163--- a/include/net/xfrm.h
80164+++ b/include/net/xfrm.h
80165@@ -287,7 +287,6 @@ struct xfrm_dst;
80166 struct xfrm_policy_afinfo {
80167 unsigned short family;
80168 struct dst_ops *dst_ops;
80169- void (*garbage_collect)(struct net *net);
80170 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
80171 const xfrm_address_t *saddr,
80172 const xfrm_address_t *daddr);
80173@@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
80174 struct net_device *dev,
80175 const struct flowi *fl);
80176 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
80177-};
80178+} __do_const;
80179
80180 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
80181 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
80182@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
80183 int (*transport_finish)(struct sk_buff *skb,
80184 int async);
80185 void (*local_error)(struct sk_buff *skb, u32 mtu);
80186-};
80187+} __do_const;
80188
80189 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
80190 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
80191@@ -427,7 +426,7 @@ struct xfrm_mode {
80192 struct module *owner;
80193 unsigned int encap;
80194 int flags;
80195-};
80196+} __do_const;
80197
80198 /* Flags for xfrm_mode. */
80199 enum {
80200@@ -524,7 +523,7 @@ struct xfrm_policy {
80201 struct timer_list timer;
80202
80203 struct flow_cache_object flo;
80204- atomic_t genid;
80205+ atomic_unchecked_t genid;
80206 u32 priority;
80207 u32 index;
80208 struct xfrm_mark mark;
80209@@ -1164,6 +1163,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
80210 }
80211
80212 extern void xfrm_garbage_collect(struct net *net);
80213+extern void xfrm_garbage_collect_deferred(struct net *net);
80214
80215 #else
80216
80217@@ -1202,6 +1202,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
80218 static inline void xfrm_garbage_collect(struct net *net)
80219 {
80220 }
80221+static inline void xfrm_garbage_collect_deferred(struct net *net)
80222+{
80223+}
80224 #endif
80225
80226 static __inline__
80227diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
80228index 1017e0b..227aa4d 100644
80229--- a/include/rdma/iw_cm.h
80230+++ b/include/rdma/iw_cm.h
80231@@ -122,7 +122,7 @@ struct iw_cm_verbs {
80232 int backlog);
80233
80234 int (*destroy_listen)(struct iw_cm_id *cm_id);
80235-};
80236+} __no_const;
80237
80238 /**
80239 * iw_create_cm_id - Create an IW CM identifier.
80240diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
80241index e1379b4..67eafbe 100644
80242--- a/include/scsi/libfc.h
80243+++ b/include/scsi/libfc.h
80244@@ -762,6 +762,7 @@ struct libfc_function_template {
80245 */
80246 void (*disc_stop_final) (struct fc_lport *);
80247 };
80248+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
80249
80250 /**
80251 * struct fc_disc - Discovery context
80252@@ -866,7 +867,7 @@ struct fc_lport {
80253 struct fc_vport *vport;
80254
80255 /* Operational Information */
80256- struct libfc_function_template tt;
80257+ libfc_function_template_no_const tt;
80258 u8 link_up;
80259 u8 qfull;
80260 enum fc_lport_state state;
80261diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
80262index d65fbec..f80fef2 100644
80263--- a/include/scsi/scsi_device.h
80264+++ b/include/scsi/scsi_device.h
80265@@ -180,9 +180,9 @@ struct scsi_device {
80266 unsigned int max_device_blocked; /* what device_blocked counts down from */
80267 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
80268
80269- atomic_t iorequest_cnt;
80270- atomic_t iodone_cnt;
80271- atomic_t ioerr_cnt;
80272+ atomic_unchecked_t iorequest_cnt;
80273+ atomic_unchecked_t iodone_cnt;
80274+ atomic_unchecked_t ioerr_cnt;
80275
80276 struct device sdev_gendev,
80277 sdev_dev;
80278diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
80279index b797e8f..8e2c3aa 100644
80280--- a/include/scsi/scsi_transport_fc.h
80281+++ b/include/scsi/scsi_transport_fc.h
80282@@ -751,7 +751,8 @@ struct fc_function_template {
80283 unsigned long show_host_system_hostname:1;
80284
80285 unsigned long disable_target_scan:1;
80286-};
80287+} __do_const;
80288+typedef struct fc_function_template __no_const fc_function_template_no_const;
80289
80290
80291 /**
80292diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
80293index ae6c3b8..fd748ac 100644
80294--- a/include/sound/compress_driver.h
80295+++ b/include/sound/compress_driver.h
80296@@ -128,7 +128,7 @@ struct snd_compr_ops {
80297 struct snd_compr_caps *caps);
80298 int (*get_codec_caps) (struct snd_compr_stream *stream,
80299 struct snd_compr_codec_caps *codec);
80300-};
80301+} __no_const;
80302
80303 /**
80304 * struct snd_compr: Compressed device
80305diff --git a/include/sound/soc.h b/include/sound/soc.h
80306index d22cb0a..c6ba150 100644
80307--- a/include/sound/soc.h
80308+++ b/include/sound/soc.h
80309@@ -780,7 +780,7 @@ struct snd_soc_codec_driver {
80310 /* probe ordering - for components with runtime dependencies */
80311 int probe_order;
80312 int remove_order;
80313-};
80314+} __do_const;
80315
80316 /* SoC platform interface */
80317 struct snd_soc_platform_driver {
80318@@ -826,7 +826,7 @@ struct snd_soc_platform_driver {
80319 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
80320 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
80321 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
80322-};
80323+} __do_const;
80324
80325 struct snd_soc_platform {
80326 const char *name;
80327diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
80328index 5bdb8b7..bb1096c 100644
80329--- a/include/target/target_core_base.h
80330+++ b/include/target/target_core_base.h
80331@@ -663,7 +663,7 @@ struct se_device {
80332 spinlock_t stats_lock;
80333 /* Active commands on this virtual SE device */
80334 atomic_t simple_cmds;
80335- atomic_t dev_ordered_id;
80336+ atomic_unchecked_t dev_ordered_id;
80337 atomic_t dev_ordered_sync;
80338 atomic_t dev_qf_count;
80339 int export_count;
80340diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
80341new file mode 100644
80342index 0000000..fb634b7
80343--- /dev/null
80344+++ b/include/trace/events/fs.h
80345@@ -0,0 +1,53 @@
80346+#undef TRACE_SYSTEM
80347+#define TRACE_SYSTEM fs
80348+
80349+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
80350+#define _TRACE_FS_H
80351+
80352+#include <linux/fs.h>
80353+#include <linux/tracepoint.h>
80354+
80355+TRACE_EVENT(do_sys_open,
80356+
80357+ TP_PROTO(const char *filename, int flags, int mode),
80358+
80359+ TP_ARGS(filename, flags, mode),
80360+
80361+ TP_STRUCT__entry(
80362+ __string( filename, filename )
80363+ __field( int, flags )
80364+ __field( int, mode )
80365+ ),
80366+
80367+ TP_fast_assign(
80368+ __assign_str(filename, filename);
80369+ __entry->flags = flags;
80370+ __entry->mode = mode;
80371+ ),
80372+
80373+ TP_printk("\"%s\" %x %o",
80374+ __get_str(filename), __entry->flags, __entry->mode)
80375+);
80376+
80377+TRACE_EVENT(open_exec,
80378+
80379+ TP_PROTO(const char *filename),
80380+
80381+ TP_ARGS(filename),
80382+
80383+ TP_STRUCT__entry(
80384+ __string( filename, filename )
80385+ ),
80386+
80387+ TP_fast_assign(
80388+ __assign_str(filename, filename);
80389+ ),
80390+
80391+ TP_printk("\"%s\"",
80392+ __get_str(filename))
80393+);
80394+
80395+#endif /* _TRACE_FS_H */
80396+
80397+/* This part must be outside protection */
80398+#include <trace/define_trace.h>
80399diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
80400index 1c09820..7f5ec79 100644
80401--- a/include/trace/events/irq.h
80402+++ b/include/trace/events/irq.h
80403@@ -36,7 +36,7 @@ struct softirq_action;
80404 */
80405 TRACE_EVENT(irq_handler_entry,
80406
80407- TP_PROTO(int irq, struct irqaction *action),
80408+ TP_PROTO(int irq, const struct irqaction *action),
80409
80410 TP_ARGS(irq, action),
80411
80412@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
80413 */
80414 TRACE_EVENT(irq_handler_exit,
80415
80416- TP_PROTO(int irq, struct irqaction *action, int ret),
80417+ TP_PROTO(int irq, const struct irqaction *action, int ret),
80418
80419 TP_ARGS(irq, action, ret),
80420
80421diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
80422index 7caf44c..23c6f27 100644
80423--- a/include/uapi/linux/a.out.h
80424+++ b/include/uapi/linux/a.out.h
80425@@ -39,6 +39,14 @@ enum machine_type {
80426 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
80427 };
80428
80429+/* Constants for the N_FLAGS field */
80430+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
80431+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
80432+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
80433+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
80434+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
80435+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
80436+
80437 #if !defined (N_MAGIC)
80438 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
80439 #endif
80440diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
80441index d876736..ccce5c0 100644
80442--- a/include/uapi/linux/byteorder/little_endian.h
80443+++ b/include/uapi/linux/byteorder/little_endian.h
80444@@ -42,51 +42,51 @@
80445
80446 static inline __le64 __cpu_to_le64p(const __u64 *p)
80447 {
80448- return (__force __le64)*p;
80449+ return (__force const __le64)*p;
80450 }
80451-static inline __u64 __le64_to_cpup(const __le64 *p)
80452+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
80453 {
80454- return (__force __u64)*p;
80455+ return (__force const __u64)*p;
80456 }
80457 static inline __le32 __cpu_to_le32p(const __u32 *p)
80458 {
80459- return (__force __le32)*p;
80460+ return (__force const __le32)*p;
80461 }
80462 static inline __u32 __le32_to_cpup(const __le32 *p)
80463 {
80464- return (__force __u32)*p;
80465+ return (__force const __u32)*p;
80466 }
80467 static inline __le16 __cpu_to_le16p(const __u16 *p)
80468 {
80469- return (__force __le16)*p;
80470+ return (__force const __le16)*p;
80471 }
80472 static inline __u16 __le16_to_cpup(const __le16 *p)
80473 {
80474- return (__force __u16)*p;
80475+ return (__force const __u16)*p;
80476 }
80477 static inline __be64 __cpu_to_be64p(const __u64 *p)
80478 {
80479- return (__force __be64)__swab64p(p);
80480+ return (__force const __be64)__swab64p(p);
80481 }
80482 static inline __u64 __be64_to_cpup(const __be64 *p)
80483 {
80484- return __swab64p((__u64 *)p);
80485+ return __swab64p((const __u64 *)p);
80486 }
80487 static inline __be32 __cpu_to_be32p(const __u32 *p)
80488 {
80489- return (__force __be32)__swab32p(p);
80490+ return (__force const __be32)__swab32p(p);
80491 }
80492-static inline __u32 __be32_to_cpup(const __be32 *p)
80493+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
80494 {
80495- return __swab32p((__u32 *)p);
80496+ return __swab32p((const __u32 *)p);
80497 }
80498 static inline __be16 __cpu_to_be16p(const __u16 *p)
80499 {
80500- return (__force __be16)__swab16p(p);
80501+ return (__force const __be16)__swab16p(p);
80502 }
80503 static inline __u16 __be16_to_cpup(const __be16 *p)
80504 {
80505- return __swab16p((__u16 *)p);
80506+ return __swab16p((const __u16 *)p);
80507 }
80508 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
80509 #define __le64_to_cpus(x) do { (void)(x); } while (0)
80510diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
80511index ef6103b..d4e65dd 100644
80512--- a/include/uapi/linux/elf.h
80513+++ b/include/uapi/linux/elf.h
80514@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
80515 #define PT_GNU_EH_FRAME 0x6474e550
80516
80517 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
80518+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
80519+
80520+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
80521+
80522+/* Constants for the e_flags field */
80523+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
80524+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
80525+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
80526+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
80527+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
80528+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
80529
80530 /*
80531 * Extended Numbering
80532@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
80533 #define DT_DEBUG 21
80534 #define DT_TEXTREL 22
80535 #define DT_JMPREL 23
80536+#define DT_FLAGS 30
80537+ #define DF_TEXTREL 0x00000004
80538 #define DT_ENCODING 32
80539 #define OLD_DT_LOOS 0x60000000
80540 #define DT_LOOS 0x6000000d
80541@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
80542 #define PF_W 0x2
80543 #define PF_X 0x1
80544
80545+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
80546+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
80547+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
80548+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
80549+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
80550+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
80551+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
80552+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
80553+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
80554+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
80555+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
80556+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
80557+
80558 typedef struct elf32_phdr{
80559 Elf32_Word p_type;
80560 Elf32_Off p_offset;
80561@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
80562 #define EI_OSABI 7
80563 #define EI_PAD 8
80564
80565+#define EI_PAX 14
80566+
80567 #define ELFMAG0 0x7f /* EI_MAG */
80568 #define ELFMAG1 'E'
80569 #define ELFMAG2 'L'
80570diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
80571index aa169c4..6a2771d 100644
80572--- a/include/uapi/linux/personality.h
80573+++ b/include/uapi/linux/personality.h
80574@@ -30,6 +30,7 @@ enum {
80575 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
80576 ADDR_NO_RANDOMIZE | \
80577 ADDR_COMPAT_LAYOUT | \
80578+ ADDR_LIMIT_3GB | \
80579 MMAP_PAGE_ZERO)
80580
80581 /*
80582diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
80583index 7530e74..e714828 100644
80584--- a/include/uapi/linux/screen_info.h
80585+++ b/include/uapi/linux/screen_info.h
80586@@ -43,7 +43,8 @@ struct screen_info {
80587 __u16 pages; /* 0x32 */
80588 __u16 vesa_attributes; /* 0x34 */
80589 __u32 capabilities; /* 0x36 */
80590- __u8 _reserved[6]; /* 0x3a */
80591+ __u16 vesapm_size; /* 0x3a */
80592+ __u8 _reserved[4]; /* 0x3c */
80593 } __attribute__((packed));
80594
80595 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
80596diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
80597index 0e011eb..82681b1 100644
80598--- a/include/uapi/linux/swab.h
80599+++ b/include/uapi/linux/swab.h
80600@@ -43,7 +43,7 @@
80601 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
80602 */
80603
80604-static inline __attribute_const__ __u16 __fswab16(__u16 val)
80605+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
80606 {
80607 #ifdef __HAVE_BUILTIN_BSWAP16__
80608 return __builtin_bswap16(val);
80609@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
80610 #endif
80611 }
80612
80613-static inline __attribute_const__ __u32 __fswab32(__u32 val)
80614+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
80615 {
80616 #ifdef __HAVE_BUILTIN_BSWAP32__
80617 return __builtin_bswap32(val);
80618@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
80619 #endif
80620 }
80621
80622-static inline __attribute_const__ __u64 __fswab64(__u64 val)
80623+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
80624 {
80625 #ifdef __HAVE_BUILTIN_BSWAP64__
80626 return __builtin_bswap64(val);
80627diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
80628index 6d67213..8dab561 100644
80629--- a/include/uapi/linux/sysctl.h
80630+++ b/include/uapi/linux/sysctl.h
80631@@ -155,7 +155,11 @@ enum
80632 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
80633 };
80634
80635-
80636+#ifdef CONFIG_PAX_SOFTMODE
80637+enum {
80638+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
80639+};
80640+#endif
80641
80642 /* CTL_VM names: */
80643 enum
80644diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
80645index e4629b9..6958086 100644
80646--- a/include/uapi/linux/xattr.h
80647+++ b/include/uapi/linux/xattr.h
80648@@ -63,5 +63,9 @@
80649 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
80650 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
80651
80652+/* User namespace */
80653+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
80654+#define XATTR_PAX_FLAGS_SUFFIX "flags"
80655+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
80656
80657 #endif /* _UAPI_LINUX_XATTR_H */
80658diff --git a/include/video/udlfb.h b/include/video/udlfb.h
80659index f9466fa..f4e2b81 100644
80660--- a/include/video/udlfb.h
80661+++ b/include/video/udlfb.h
80662@@ -53,10 +53,10 @@ struct dlfb_data {
80663 u32 pseudo_palette[256];
80664 int blank_mode; /*one of FB_BLANK_ */
80665 /* blit-only rendering path metrics, exposed through sysfs */
80666- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
80667- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
80668- atomic_t bytes_sent; /* to usb, after compression including overhead */
80669- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
80670+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
80671+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
80672+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
80673+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
80674 };
80675
80676 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
80677diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
80678index 30f5362..8ed8ac9 100644
80679--- a/include/video/uvesafb.h
80680+++ b/include/video/uvesafb.h
80681@@ -122,6 +122,7 @@ struct uvesafb_par {
80682 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
80683 u8 pmi_setpal; /* PMI for palette changes */
80684 u16 *pmi_base; /* protected mode interface location */
80685+ u8 *pmi_code; /* protected mode code location */
80686 void *pmi_start;
80687 void *pmi_pal;
80688 u8 *vbe_state_orig; /*
80689diff --git a/init/Kconfig b/init/Kconfig
80690index 3ecd8a1..627843f 100644
80691--- a/init/Kconfig
80692+++ b/init/Kconfig
80693@@ -1086,6 +1086,7 @@ endif # CGROUPS
80694
80695 config CHECKPOINT_RESTORE
80696 bool "Checkpoint/restore support" if EXPERT
80697+ depends on !GRKERNSEC
80698 default n
80699 help
80700 Enables additional kernel features in a sake of checkpoint/restore.
80701@@ -1557,7 +1558,7 @@ config SLUB_DEBUG
80702
80703 config COMPAT_BRK
80704 bool "Disable heap randomization"
80705- default y
80706+ default n
80707 help
80708 Randomizing heap placement makes heap exploits harder, but it
80709 also breaks ancient binaries (including anything libc5 based).
80710@@ -1832,7 +1833,7 @@ config INIT_ALL_POSSIBLE
80711 config STOP_MACHINE
80712 bool
80713 default y
80714- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
80715+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
80716 help
80717 Need stop_machine() primitive.
80718
80719diff --git a/init/Makefile b/init/Makefile
80720index 7bc47ee..6da2dc7 100644
80721--- a/init/Makefile
80722+++ b/init/Makefile
80723@@ -2,6 +2,9 @@
80724 # Makefile for the linux kernel.
80725 #
80726
80727+ccflags-y := $(GCC_PLUGINS_CFLAGS)
80728+asflags-y := $(GCC_PLUGINS_AFLAGS)
80729+
80730 obj-y := main.o version.o mounts.o
80731 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
80732 obj-y += noinitramfs.o
80733diff --git a/init/do_mounts.c b/init/do_mounts.c
80734index a51cddc..25c2768 100644
80735--- a/init/do_mounts.c
80736+++ b/init/do_mounts.c
80737@@ -357,11 +357,11 @@ static void __init get_fs_names(char *page)
80738 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
80739 {
80740 struct super_block *s;
80741- int err = sys_mount(name, "/root", fs, flags, data);
80742+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
80743 if (err)
80744 return err;
80745
80746- sys_chdir("/root");
80747+ sys_chdir((const char __force_user *)"/root");
80748 s = current->fs->pwd.dentry->d_sb;
80749 ROOT_DEV = s->s_dev;
80750 printk(KERN_INFO
80751@@ -482,18 +482,18 @@ void __init change_floppy(char *fmt, ...)
80752 va_start(args, fmt);
80753 vsprintf(buf, fmt, args);
80754 va_end(args);
80755- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
80756+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
80757 if (fd >= 0) {
80758 sys_ioctl(fd, FDEJECT, 0);
80759 sys_close(fd);
80760 }
80761 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
80762- fd = sys_open("/dev/console", O_RDWR, 0);
80763+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
80764 if (fd >= 0) {
80765 sys_ioctl(fd, TCGETS, (long)&termios);
80766 termios.c_lflag &= ~ICANON;
80767 sys_ioctl(fd, TCSETSF, (long)&termios);
80768- sys_read(fd, &c, 1);
80769+ sys_read(fd, (char __user *)&c, 1);
80770 termios.c_lflag |= ICANON;
80771 sys_ioctl(fd, TCSETSF, (long)&termios);
80772 sys_close(fd);
80773@@ -587,8 +587,8 @@ void __init prepare_namespace(void)
80774 mount_root();
80775 out:
80776 devtmpfs_mount("dev");
80777- sys_mount(".", "/", NULL, MS_MOVE, NULL);
80778- sys_chroot(".");
80779+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
80780+ sys_chroot((const char __force_user *)".");
80781 }
80782
80783 static bool is_tmpfs;
80784diff --git a/init/do_mounts.h b/init/do_mounts.h
80785index f5b978a..69dbfe8 100644
80786--- a/init/do_mounts.h
80787+++ b/init/do_mounts.h
80788@@ -15,15 +15,15 @@ extern int root_mountflags;
80789
80790 static inline int create_dev(char *name, dev_t dev)
80791 {
80792- sys_unlink(name);
80793- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
80794+ sys_unlink((char __force_user *)name);
80795+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
80796 }
80797
80798 #if BITS_PER_LONG == 32
80799 static inline u32 bstat(char *name)
80800 {
80801 struct stat64 stat;
80802- if (sys_stat64(name, &stat) != 0)
80803+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
80804 return 0;
80805 if (!S_ISBLK(stat.st_mode))
80806 return 0;
80807@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
80808 static inline u32 bstat(char *name)
80809 {
80810 struct stat stat;
80811- if (sys_newstat(name, &stat) != 0)
80812+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
80813 return 0;
80814 if (!S_ISBLK(stat.st_mode))
80815 return 0;
80816diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
80817index 3e0878e..8a9d7a0 100644
80818--- a/init/do_mounts_initrd.c
80819+++ b/init/do_mounts_initrd.c
80820@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
80821 {
80822 sys_unshare(CLONE_FS | CLONE_FILES);
80823 /* stdin/stdout/stderr for /linuxrc */
80824- sys_open("/dev/console", O_RDWR, 0);
80825+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
80826 sys_dup(0);
80827 sys_dup(0);
80828 /* move initrd over / and chdir/chroot in initrd root */
80829- sys_chdir("/root");
80830- sys_mount(".", "/", NULL, MS_MOVE, NULL);
80831- sys_chroot(".");
80832+ sys_chdir((const char __force_user *)"/root");
80833+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
80834+ sys_chroot((const char __force_user *)".");
80835 sys_setsid();
80836 return 0;
80837 }
80838@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
80839 create_dev("/dev/root.old", Root_RAM0);
80840 /* mount initrd on rootfs' /root */
80841 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
80842- sys_mkdir("/old", 0700);
80843- sys_chdir("/old");
80844+ sys_mkdir((const char __force_user *)"/old", 0700);
80845+ sys_chdir((const char __force_user *)"/old");
80846
80847 /* try loading default modules from initrd */
80848 load_default_modules();
80849@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
80850 current->flags &= ~PF_FREEZER_SKIP;
80851
80852 /* move initrd to rootfs' /old */
80853- sys_mount("..", ".", NULL, MS_MOVE, NULL);
80854+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
80855 /* switch root and cwd back to / of rootfs */
80856- sys_chroot("..");
80857+ sys_chroot((const char __force_user *)"..");
80858
80859 if (new_decode_dev(real_root_dev) == Root_RAM0) {
80860- sys_chdir("/old");
80861+ sys_chdir((const char __force_user *)"/old");
80862 return;
80863 }
80864
80865- sys_chdir("/");
80866+ sys_chdir((const char __force_user *)"/");
80867 ROOT_DEV = new_decode_dev(real_root_dev);
80868 mount_root();
80869
80870 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
80871- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
80872+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
80873 if (!error)
80874 printk("okay\n");
80875 else {
80876- int fd = sys_open("/dev/root.old", O_RDWR, 0);
80877+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
80878 if (error == -ENOENT)
80879 printk("/initrd does not exist. Ignored.\n");
80880 else
80881 printk("failed\n");
80882 printk(KERN_NOTICE "Unmounting old root\n");
80883- sys_umount("/old", MNT_DETACH);
80884+ sys_umount((char __force_user *)"/old", MNT_DETACH);
80885 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
80886 if (fd < 0) {
80887 error = fd;
80888@@ -127,11 +127,11 @@ int __init initrd_load(void)
80889 * mounted in the normal path.
80890 */
80891 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
80892- sys_unlink("/initrd.image");
80893+ sys_unlink((const char __force_user *)"/initrd.image");
80894 handle_initrd();
80895 return 1;
80896 }
80897 }
80898- sys_unlink("/initrd.image");
80899+ sys_unlink((const char __force_user *)"/initrd.image");
80900 return 0;
80901 }
80902diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
80903index 8cb6db5..d729f50 100644
80904--- a/init/do_mounts_md.c
80905+++ b/init/do_mounts_md.c
80906@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
80907 partitioned ? "_d" : "", minor,
80908 md_setup_args[ent].device_names);
80909
80910- fd = sys_open(name, 0, 0);
80911+ fd = sys_open((char __force_user *)name, 0, 0);
80912 if (fd < 0) {
80913 printk(KERN_ERR "md: open failed - cannot start "
80914 "array %s\n", name);
80915@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
80916 * array without it
80917 */
80918 sys_close(fd);
80919- fd = sys_open(name, 0, 0);
80920+ fd = sys_open((char __force_user *)name, 0, 0);
80921 sys_ioctl(fd, BLKRRPART, 0);
80922 }
80923 sys_close(fd);
80924@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
80925
80926 wait_for_device_probe();
80927
80928- fd = sys_open("/dev/md0", 0, 0);
80929+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
80930 if (fd >= 0) {
80931 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
80932 sys_close(fd);
80933diff --git a/init/init_task.c b/init/init_task.c
80934index ba0a7f36..2bcf1d5 100644
80935--- a/init/init_task.c
80936+++ b/init/init_task.c
80937@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
80938 * Initial thread structure. Alignment of this is handled by a special
80939 * linker map entry.
80940 */
80941+#ifdef CONFIG_X86
80942+union thread_union init_thread_union __init_task_data;
80943+#else
80944 union thread_union init_thread_union __init_task_data =
80945 { INIT_THREAD_INFO(init_task) };
80946+#endif
80947diff --git a/init/initramfs.c b/init/initramfs.c
80948index a67ef9d..2d17ed9 100644
80949--- a/init/initramfs.c
80950+++ b/init/initramfs.c
80951@@ -84,7 +84,7 @@ static void __init free_hash(void)
80952 }
80953 }
80954
80955-static long __init do_utime(char *filename, time_t mtime)
80956+static long __init do_utime(char __force_user *filename, time_t mtime)
80957 {
80958 struct timespec t[2];
80959
80960@@ -119,7 +119,7 @@ static void __init dir_utime(void)
80961 struct dir_entry *de, *tmp;
80962 list_for_each_entry_safe(de, tmp, &dir_list, list) {
80963 list_del(&de->list);
80964- do_utime(de->name, de->mtime);
80965+ do_utime((char __force_user *)de->name, de->mtime);
80966 kfree(de->name);
80967 kfree(de);
80968 }
80969@@ -281,7 +281,7 @@ static int __init maybe_link(void)
80970 if (nlink >= 2) {
80971 char *old = find_link(major, minor, ino, mode, collected);
80972 if (old)
80973- return (sys_link(old, collected) < 0) ? -1 : 1;
80974+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
80975 }
80976 return 0;
80977 }
80978@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
80979 {
80980 struct stat st;
80981
80982- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
80983+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
80984 if (S_ISDIR(st.st_mode))
80985- sys_rmdir(path);
80986+ sys_rmdir((char __force_user *)path);
80987 else
80988- sys_unlink(path);
80989+ sys_unlink((char __force_user *)path);
80990 }
80991 }
80992
80993@@ -315,7 +315,7 @@ static int __init do_name(void)
80994 int openflags = O_WRONLY|O_CREAT;
80995 if (ml != 1)
80996 openflags |= O_TRUNC;
80997- wfd = sys_open(collected, openflags, mode);
80998+ wfd = sys_open((char __force_user *)collected, openflags, mode);
80999
81000 if (wfd >= 0) {
81001 sys_fchown(wfd, uid, gid);
81002@@ -327,17 +327,17 @@ static int __init do_name(void)
81003 }
81004 }
81005 } else if (S_ISDIR(mode)) {
81006- sys_mkdir(collected, mode);
81007- sys_chown(collected, uid, gid);
81008- sys_chmod(collected, mode);
81009+ sys_mkdir((char __force_user *)collected, mode);
81010+ sys_chown((char __force_user *)collected, uid, gid);
81011+ sys_chmod((char __force_user *)collected, mode);
81012 dir_add(collected, mtime);
81013 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
81014 S_ISFIFO(mode) || S_ISSOCK(mode)) {
81015 if (maybe_link() == 0) {
81016- sys_mknod(collected, mode, rdev);
81017- sys_chown(collected, uid, gid);
81018- sys_chmod(collected, mode);
81019- do_utime(collected, mtime);
81020+ sys_mknod((char __force_user *)collected, mode, rdev);
81021+ sys_chown((char __force_user *)collected, uid, gid);
81022+ sys_chmod((char __force_user *)collected, mode);
81023+ do_utime((char __force_user *)collected, mtime);
81024 }
81025 }
81026 return 0;
81027@@ -346,15 +346,15 @@ static int __init do_name(void)
81028 static int __init do_copy(void)
81029 {
81030 if (count >= body_len) {
81031- sys_write(wfd, victim, body_len);
81032+ sys_write(wfd, (char __force_user *)victim, body_len);
81033 sys_close(wfd);
81034- do_utime(vcollected, mtime);
81035+ do_utime((char __force_user *)vcollected, mtime);
81036 kfree(vcollected);
81037 eat(body_len);
81038 state = SkipIt;
81039 return 0;
81040 } else {
81041- sys_write(wfd, victim, count);
81042+ sys_write(wfd, (char __force_user *)victim, count);
81043 body_len -= count;
81044 eat(count);
81045 return 1;
81046@@ -365,9 +365,9 @@ static int __init do_symlink(void)
81047 {
81048 collected[N_ALIGN(name_len) + body_len] = '\0';
81049 clean_path(collected, 0);
81050- sys_symlink(collected + N_ALIGN(name_len), collected);
81051- sys_lchown(collected, uid, gid);
81052- do_utime(collected, mtime);
81053+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
81054+ sys_lchown((char __force_user *)collected, uid, gid);
81055+ do_utime((char __force_user *)collected, mtime);
81056 state = SkipIt;
81057 next_state = Reset;
81058 return 0;
81059@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
81060 {
81061 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
81062 if (err)
81063- panic(err); /* Failed to decompress INTERNAL initramfs */
81064+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
81065 if (initrd_start) {
81066 #ifdef CONFIG_BLK_DEV_RAM
81067 int fd;
81068diff --git a/init/main.c b/init/main.c
81069index 63d3e8f..50bd5f8 100644
81070--- a/init/main.c
81071+++ b/init/main.c
81072@@ -103,6 +103,8 @@ static inline void mark_rodata_ro(void) { }
81073 extern void tc_init(void);
81074 #endif
81075
81076+extern void grsecurity_init(void);
81077+
81078 /*
81079 * Debug helper: via this flag we know that we are in 'early bootup code'
81080 * where only the boot processor is running with IRQ disabled. This means
81081@@ -156,6 +158,75 @@ static int __init set_reset_devices(char *str)
81082
81083 __setup("reset_devices", set_reset_devices);
81084
81085+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
81086+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
81087+static int __init setup_grsec_proc_gid(char *str)
81088+{
81089+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
81090+ return 1;
81091+}
81092+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
81093+#endif
81094+
81095+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
81096+unsigned long pax_user_shadow_base __read_only;
81097+EXPORT_SYMBOL(pax_user_shadow_base);
81098+extern char pax_enter_kernel_user[];
81099+extern char pax_exit_kernel_user[];
81100+#endif
81101+
81102+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
81103+static int __init setup_pax_nouderef(char *str)
81104+{
81105+#ifdef CONFIG_X86_32
81106+ unsigned int cpu;
81107+ struct desc_struct *gdt;
81108+
81109+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
81110+ gdt = get_cpu_gdt_table(cpu);
81111+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
81112+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
81113+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
81114+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
81115+ }
81116+ loadsegment(ds, __KERNEL_DS);
81117+ loadsegment(es, __KERNEL_DS);
81118+ loadsegment(ss, __KERNEL_DS);
81119+#else
81120+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
81121+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
81122+ clone_pgd_mask = ~(pgdval_t)0UL;
81123+ pax_user_shadow_base = 0UL;
81124+ setup_clear_cpu_cap(X86_FEATURE_PCID);
81125+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
81126+#endif
81127+
81128+ return 0;
81129+}
81130+early_param("pax_nouderef", setup_pax_nouderef);
81131+
81132+#ifdef CONFIG_X86_64
81133+static int __init setup_pax_weakuderef(char *str)
81134+{
81135+ if (clone_pgd_mask != ~(pgdval_t)0UL)
81136+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
81137+ return 1;
81138+}
81139+__setup("pax_weakuderef", setup_pax_weakuderef);
81140+#endif
81141+#endif
81142+
81143+#ifdef CONFIG_PAX_SOFTMODE
81144+int pax_softmode;
81145+
81146+static int __init setup_pax_softmode(char *str)
81147+{
81148+ get_option(&str, &pax_softmode);
81149+ return 1;
81150+}
81151+__setup("pax_softmode=", setup_pax_softmode);
81152+#endif
81153+
81154 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
81155 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
81156 static const char *panic_later, *panic_param;
81157@@ -682,25 +753,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
81158 {
81159 int count = preempt_count();
81160 int ret;
81161- char msgbuf[64];
81162+ const char *msg1 = "", *msg2 = "";
81163
81164 if (initcall_debug)
81165 ret = do_one_initcall_debug(fn);
81166 else
81167 ret = fn();
81168
81169- msgbuf[0] = 0;
81170-
81171 if (preempt_count() != count) {
81172- sprintf(msgbuf, "preemption imbalance ");
81173+ msg1 = " preemption imbalance";
81174 preempt_count() = count;
81175 }
81176 if (irqs_disabled()) {
81177- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
81178+ msg2 = " disabled interrupts";
81179 local_irq_enable();
81180 }
81181- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
81182+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
81183
81184+ add_latent_entropy();
81185 return ret;
81186 }
81187
81188@@ -807,10 +877,14 @@ static int run_init_process(const char *init_filename)
81189 {
81190 argv_init[0] = init_filename;
81191 return do_execve(init_filename,
81192- (const char __user *const __user *)argv_init,
81193- (const char __user *const __user *)envp_init);
81194+ (const char __user *const __force_user *)argv_init,
81195+ (const char __user *const __force_user *)envp_init);
81196 }
81197
81198+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
81199+extern int gr_init_ran;
81200+#endif
81201+
81202 static noinline void __init kernel_init_freeable(void);
81203
81204 static int __ref kernel_init(void *unused)
81205@@ -831,6 +905,11 @@ static int __ref kernel_init(void *unused)
81206 pr_err("Failed to execute %s\n", ramdisk_execute_command);
81207 }
81208
81209+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
81210+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
81211+ gr_init_ran = 1;
81212+#endif
81213+
81214 /*
81215 * We try each of these until one succeeds.
81216 *
81217@@ -885,7 +964,7 @@ static noinline void __init kernel_init_freeable(void)
81218 do_basic_setup();
81219
81220 /* Open the /dev/console on the rootfs, this should never fail */
81221- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
81222+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
81223 pr_err("Warning: unable to open an initial console.\n");
81224
81225 (void) sys_dup(0);
81226@@ -898,11 +977,13 @@ static noinline void __init kernel_init_freeable(void)
81227 if (!ramdisk_execute_command)
81228 ramdisk_execute_command = "/init";
81229
81230- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
81231+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
81232 ramdisk_execute_command = NULL;
81233 prepare_namespace();
81234 }
81235
81236+ grsecurity_init();
81237+
81238 /*
81239 * Ok, we have completed the initial bootup, and
81240 * we're essentially up and running. Get rid of the
81241diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
81242index b0e99de..09f385c 100644
81243--- a/ipc/ipc_sysctl.c
81244+++ b/ipc/ipc_sysctl.c
81245@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
81246 static int proc_ipc_dointvec(ctl_table *table, int write,
81247 void __user *buffer, size_t *lenp, loff_t *ppos)
81248 {
81249- struct ctl_table ipc_table;
81250+ ctl_table_no_const ipc_table;
81251
81252 memcpy(&ipc_table, table, sizeof(ipc_table));
81253 ipc_table.data = get_ipc(table);
81254@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
81255 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
81256 void __user *buffer, size_t *lenp, loff_t *ppos)
81257 {
81258- struct ctl_table ipc_table;
81259+ ctl_table_no_const ipc_table;
81260
81261 memcpy(&ipc_table, table, sizeof(ipc_table));
81262 ipc_table.data = get_ipc(table);
81263@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
81264 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
81265 void __user *buffer, size_t *lenp, loff_t *ppos)
81266 {
81267- struct ctl_table ipc_table;
81268+ ctl_table_no_const ipc_table;
81269 size_t lenp_bef = *lenp;
81270 int rc;
81271
81272@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
81273 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
81274 void __user *buffer, size_t *lenp, loff_t *ppos)
81275 {
81276- struct ctl_table ipc_table;
81277+ ctl_table_no_const ipc_table;
81278 memcpy(&ipc_table, table, sizeof(ipc_table));
81279 ipc_table.data = get_ipc(table);
81280
81281@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
81282 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
81283 void __user *buffer, size_t *lenp, loff_t *ppos)
81284 {
81285- struct ctl_table ipc_table;
81286+ ctl_table_no_const ipc_table;
81287 size_t lenp_bef = *lenp;
81288 int oldval;
81289 int rc;
81290diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
81291index 383d638..943fdbb 100644
81292--- a/ipc/mq_sysctl.c
81293+++ b/ipc/mq_sysctl.c
81294@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
81295 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
81296 void __user *buffer, size_t *lenp, loff_t *ppos)
81297 {
81298- struct ctl_table mq_table;
81299+ ctl_table_no_const mq_table;
81300 memcpy(&mq_table, table, sizeof(mq_table));
81301 mq_table.data = get_mq(table);
81302
81303diff --git a/ipc/mqueue.c b/ipc/mqueue.c
81304index ae1996d..a35f2cc 100644
81305--- a/ipc/mqueue.c
81306+++ b/ipc/mqueue.c
81307@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
81308 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
81309 info->attr.mq_msgsize);
81310
81311+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
81312 spin_lock(&mq_lock);
81313 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
81314 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
81315diff --git a/ipc/msg.c b/ipc/msg.c
81316index 558aa91..359e718 100644
81317--- a/ipc/msg.c
81318+++ b/ipc/msg.c
81319@@ -297,18 +297,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
81320 return security_msg_queue_associate(msq, msgflg);
81321 }
81322
81323+static struct ipc_ops msg_ops = {
81324+ .getnew = newque,
81325+ .associate = msg_security,
81326+ .more_checks = NULL
81327+};
81328+
81329 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
81330 {
81331 struct ipc_namespace *ns;
81332- struct ipc_ops msg_ops;
81333 struct ipc_params msg_params;
81334
81335 ns = current->nsproxy->ipc_ns;
81336
81337- msg_ops.getnew = newque;
81338- msg_ops.associate = msg_security;
81339- msg_ops.more_checks = NULL;
81340-
81341 msg_params.key = key;
81342 msg_params.flg = msgflg;
81343
81344diff --git a/ipc/sem.c b/ipc/sem.c
81345index db9d241..bc8427c 100644
81346--- a/ipc/sem.c
81347+++ b/ipc/sem.c
81348@@ -562,10 +562,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
81349 return 0;
81350 }
81351
81352+static struct ipc_ops sem_ops = {
81353+ .getnew = newary,
81354+ .associate = sem_security,
81355+ .more_checks = sem_more_checks
81356+};
81357+
81358 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
81359 {
81360 struct ipc_namespace *ns;
81361- struct ipc_ops sem_ops;
81362 struct ipc_params sem_params;
81363
81364 ns = current->nsproxy->ipc_ns;
81365@@ -573,10 +578,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
81366 if (nsems < 0 || nsems > ns->sc_semmsl)
81367 return -EINVAL;
81368
81369- sem_ops.getnew = newary;
81370- sem_ops.associate = sem_security;
81371- sem_ops.more_checks = sem_more_checks;
81372-
81373 sem_params.key = key;
81374 sem_params.flg = semflg;
81375 sem_params.u.nsems = nsems;
81376diff --git a/ipc/shm.c b/ipc/shm.c
81377index 7a51443..3a257d8 100644
81378--- a/ipc/shm.c
81379+++ b/ipc/shm.c
81380@@ -72,6 +72,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
81381 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
81382 #endif
81383
81384+#ifdef CONFIG_GRKERNSEC
81385+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81386+ const time_t shm_createtime, const kuid_t cuid,
81387+ const int shmid);
81388+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81389+ const time_t shm_createtime);
81390+#endif
81391+
81392 void shm_init_ns(struct ipc_namespace *ns)
81393 {
81394 ns->shm_ctlmax = SHMMAX;
81395@@ -554,6 +562,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
81396 shp->shm_lprid = 0;
81397 shp->shm_atim = shp->shm_dtim = 0;
81398 shp->shm_ctim = get_seconds();
81399+#ifdef CONFIG_GRKERNSEC
81400+ {
81401+ struct timespec timeval;
81402+ do_posix_clock_monotonic_gettime(&timeval);
81403+
81404+ shp->shm_createtime = timeval.tv_sec;
81405+ }
81406+#endif
81407 shp->shm_segsz = size;
81408 shp->shm_nattch = 0;
81409 shp->shm_file = file;
81410@@ -607,18 +623,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
81411 return 0;
81412 }
81413
81414+static struct ipc_ops shm_ops = {
81415+ .getnew = newseg,
81416+ .associate = shm_security,
81417+ .more_checks = shm_more_checks
81418+};
81419+
81420 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
81421 {
81422 struct ipc_namespace *ns;
81423- struct ipc_ops shm_ops;
81424 struct ipc_params shm_params;
81425
81426 ns = current->nsproxy->ipc_ns;
81427
81428- shm_ops.getnew = newseg;
81429- shm_ops.associate = shm_security;
81430- shm_ops.more_checks = shm_more_checks;
81431-
81432 shm_params.key = key;
81433 shm_params.flg = shmflg;
81434 shm_params.u.size = size;
81435@@ -1089,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
81436 f_mode = FMODE_READ | FMODE_WRITE;
81437 }
81438 if (shmflg & SHM_EXEC) {
81439+
81440+#ifdef CONFIG_PAX_MPROTECT
81441+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
81442+ goto out;
81443+#endif
81444+
81445 prot |= PROT_EXEC;
81446 acc_mode |= S_IXUGO;
81447 }
81448@@ -1113,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
81449 if (err)
81450 goto out_unlock;
81451
81452+#ifdef CONFIG_GRKERNSEC
81453+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
81454+ shp->shm_perm.cuid, shmid) ||
81455+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
81456+ err = -EACCES;
81457+ goto out_unlock;
81458+ }
81459+#endif
81460+
81461 ipc_lock_object(&shp->shm_perm);
81462
81463 /* check if shm_destroy() is tearing down shp */
81464@@ -1125,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
81465 path = shp->shm_file->f_path;
81466 path_get(&path);
81467 shp->shm_nattch++;
81468+#ifdef CONFIG_GRKERNSEC
81469+ shp->shm_lapid = current->pid;
81470+#endif
81471 size = i_size_read(path.dentry->d_inode);
81472 ipc_unlock_object(&shp->shm_perm);
81473 rcu_read_unlock();
81474diff --git a/ipc/util.c b/ipc/util.c
81475index 7684f41..f7da711 100644
81476--- a/ipc/util.c
81477+++ b/ipc/util.c
81478@@ -71,6 +71,8 @@ struct ipc_proc_iface {
81479 int (*show)(struct seq_file *, void *);
81480 };
81481
81482+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
81483+
81484 static void ipc_memory_notifier(struct work_struct *work)
81485 {
81486 ipcns_notify(IPCNS_MEMCHANGED);
81487@@ -560,6 +562,9 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
81488 granted_mode >>= 6;
81489 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
81490 granted_mode >>= 3;
81491+ else if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
81492+ return -1;
81493+
81494 /* is there some bit set in requested_mode but not in granted_mode? */
81495 if ((requested_mode & ~granted_mode & 0007) &&
81496 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
81497diff --git a/kernel/acct.c b/kernel/acct.c
81498index 8d6e145..33e0b1e 100644
81499--- a/kernel/acct.c
81500+++ b/kernel/acct.c
81501@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
81502 */
81503 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
81504 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
81505- file->f_op->write(file, (char *)&ac,
81506+ file->f_op->write(file, (char __force_user *)&ac,
81507 sizeof(acct_t), &file->f_pos);
81508 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
81509 set_fs(fs);
81510diff --git a/kernel/audit.c b/kernel/audit.c
81511index 7ddfd8a..49766eb 100644
81512--- a/kernel/audit.c
81513+++ b/kernel/audit.c
81514@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
81515 3) suppressed due to audit_rate_limit
81516 4) suppressed due to audit_backlog_limit
81517 */
81518-static atomic_t audit_lost = ATOMIC_INIT(0);
81519+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
81520
81521 /* The netlink socket. */
81522 static struct sock *audit_sock;
81523@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
81524 unsigned long now;
81525 int print;
81526
81527- atomic_inc(&audit_lost);
81528+ atomic_inc_unchecked(&audit_lost);
81529
81530 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
81531
81532@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
81533 printk(KERN_WARNING
81534 "audit: audit_lost=%d audit_rate_limit=%d "
81535 "audit_backlog_limit=%d\n",
81536- atomic_read(&audit_lost),
81537+ atomic_read_unchecked(&audit_lost),
81538 audit_rate_limit,
81539 audit_backlog_limit);
81540 audit_panic(message);
81541@@ -665,7 +665,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
81542 status_set.pid = audit_pid;
81543 status_set.rate_limit = audit_rate_limit;
81544 status_set.backlog_limit = audit_backlog_limit;
81545- status_set.lost = atomic_read(&audit_lost);
81546+ status_set.lost = atomic_read_unchecked(&audit_lost);
81547 status_set.backlog = skb_queue_len(&audit_skb_queue);
81548 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
81549 &status_set, sizeof(status_set));
81550@@ -1252,7 +1252,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
81551 int i, avail, new_len;
81552 unsigned char *ptr;
81553 struct sk_buff *skb;
81554- static const unsigned char *hex = "0123456789ABCDEF";
81555+ static const unsigned char hex[] = "0123456789ABCDEF";
81556
81557 if (!ab)
81558 return;
81559diff --git a/kernel/auditsc.c b/kernel/auditsc.c
81560index 9845cb3..3ec9369 100644
81561--- a/kernel/auditsc.c
81562+++ b/kernel/auditsc.c
81563@@ -1962,7 +1962,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
81564 }
81565
81566 /* global counter which is incremented every time something logs in */
81567-static atomic_t session_id = ATOMIC_INIT(0);
81568+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
81569
81570 /**
81571 * audit_set_loginuid - set current task's audit_context loginuid
81572@@ -1986,7 +1986,7 @@ int audit_set_loginuid(kuid_t loginuid)
81573 return -EPERM;
81574 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
81575
81576- sessionid = atomic_inc_return(&session_id);
81577+ sessionid = atomic_inc_return_unchecked(&session_id);
81578 if (context && context->in_syscall) {
81579 struct audit_buffer *ab;
81580
81581diff --git a/kernel/capability.c b/kernel/capability.c
81582index 4e66bf9..cdccecf 100644
81583--- a/kernel/capability.c
81584+++ b/kernel/capability.c
81585@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
81586 * before modification is attempted and the application
81587 * fails.
81588 */
81589+ if (tocopy > ARRAY_SIZE(kdata))
81590+ return -EFAULT;
81591+
81592 if (copy_to_user(dataptr, kdata, tocopy
81593 * sizeof(struct __user_cap_data_struct))) {
81594 return -EFAULT;
81595@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
81596 int ret;
81597
81598 rcu_read_lock();
81599- ret = security_capable(__task_cred(t), ns, cap);
81600+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
81601+ gr_task_is_capable(t, __task_cred(t), cap);
81602 rcu_read_unlock();
81603
81604- return (ret == 0);
81605+ return ret;
81606 }
81607
81608 /**
81609@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
81610 int ret;
81611
81612 rcu_read_lock();
81613- ret = security_capable_noaudit(__task_cred(t), ns, cap);
81614+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
81615 rcu_read_unlock();
81616
81617- return (ret == 0);
81618+ return ret;
81619 }
81620
81621 /**
81622@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
81623 BUG();
81624 }
81625
81626- if (security_capable(current_cred(), ns, cap) == 0) {
81627+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
81628 current->flags |= PF_SUPERPRIV;
81629 return true;
81630 }
81631@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
81632 }
81633 EXPORT_SYMBOL(ns_capable);
81634
81635+bool ns_capable_nolog(struct user_namespace *ns, int cap)
81636+{
81637+ if (unlikely(!cap_valid(cap))) {
81638+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
81639+ BUG();
81640+ }
81641+
81642+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
81643+ current->flags |= PF_SUPERPRIV;
81644+ return true;
81645+ }
81646+ return false;
81647+}
81648+EXPORT_SYMBOL(ns_capable_nolog);
81649+
81650 /**
81651 * file_ns_capable - Determine if the file's opener had a capability in effect
81652 * @file: The file we want to check
81653@@ -432,6 +451,12 @@ bool capable(int cap)
81654 }
81655 EXPORT_SYMBOL(capable);
81656
81657+bool capable_nolog(int cap)
81658+{
81659+ return ns_capable_nolog(&init_user_ns, cap);
81660+}
81661+EXPORT_SYMBOL(capable_nolog);
81662+
81663 /**
81664 * inode_capable - Check superior capability over inode
81665 * @inode: The inode in question
81666@@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
81667 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
81668 }
81669 EXPORT_SYMBOL(inode_capable);
81670+
81671+bool inode_capable_nolog(const struct inode *inode, int cap)
81672+{
81673+ struct user_namespace *ns = current_user_ns();
81674+
81675+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
81676+}
81677+EXPORT_SYMBOL(inode_capable_nolog);
81678diff --git a/kernel/cgroup.c b/kernel/cgroup.c
81679index 5c9127d..f871169 100644
81680--- a/kernel/cgroup.c
81681+++ b/kernel/cgroup.c
81682@@ -5844,7 +5844,7 @@ static int cgroup_css_links_read(struct cgroup_subsys_state *css,
81683 struct css_set *cset = link->cset;
81684 struct task_struct *task;
81685 int count = 0;
81686- seq_printf(seq, "css_set %p\n", cset);
81687+ seq_printf(seq, "css_set %pK\n", cset);
81688 list_for_each_entry(task, &cset->tasks, cg_list) {
81689 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
81690 seq_puts(seq, " ...\n");
81691diff --git a/kernel/compat.c b/kernel/compat.c
81692index 0a09e48..f44f3f0 100644
81693--- a/kernel/compat.c
81694+++ b/kernel/compat.c
81695@@ -13,6 +13,7 @@
81696
81697 #include <linux/linkage.h>
81698 #include <linux/compat.h>
81699+#include <linux/module.h>
81700 #include <linux/errno.h>
81701 #include <linux/time.h>
81702 #include <linux/signal.h>
81703@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
81704 mm_segment_t oldfs;
81705 long ret;
81706
81707- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
81708+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
81709 oldfs = get_fs();
81710 set_fs(KERNEL_DS);
81711 ret = hrtimer_nanosleep_restart(restart);
81712@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
81713 oldfs = get_fs();
81714 set_fs(KERNEL_DS);
81715 ret = hrtimer_nanosleep(&tu,
81716- rmtp ? (struct timespec __user *)&rmt : NULL,
81717+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
81718 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
81719 set_fs(oldfs);
81720
81721@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
81722 mm_segment_t old_fs = get_fs();
81723
81724 set_fs(KERNEL_DS);
81725- ret = sys_sigpending((old_sigset_t __user *) &s);
81726+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
81727 set_fs(old_fs);
81728 if (ret == 0)
81729 ret = put_user(s, set);
81730@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
81731 mm_segment_t old_fs = get_fs();
81732
81733 set_fs(KERNEL_DS);
81734- ret = sys_old_getrlimit(resource, &r);
81735+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
81736 set_fs(old_fs);
81737
81738 if (!ret) {
81739@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
81740 set_fs (KERNEL_DS);
81741 ret = sys_wait4(pid,
81742 (stat_addr ?
81743- (unsigned int __user *) &status : NULL),
81744- options, (struct rusage __user *) &r);
81745+ (unsigned int __force_user *) &status : NULL),
81746+ options, (struct rusage __force_user *) &r);
81747 set_fs (old_fs);
81748
81749 if (ret > 0) {
81750@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
81751 memset(&info, 0, sizeof(info));
81752
81753 set_fs(KERNEL_DS);
81754- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
81755- uru ? (struct rusage __user *)&ru : NULL);
81756+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
81757+ uru ? (struct rusage __force_user *)&ru : NULL);
81758 set_fs(old_fs);
81759
81760 if ((ret < 0) || (info.si_signo == 0))
81761@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
81762 oldfs = get_fs();
81763 set_fs(KERNEL_DS);
81764 err = sys_timer_settime(timer_id, flags,
81765- (struct itimerspec __user *) &newts,
81766- (struct itimerspec __user *) &oldts);
81767+ (struct itimerspec __force_user *) &newts,
81768+ (struct itimerspec __force_user *) &oldts);
81769 set_fs(oldfs);
81770 if (!err && old && put_compat_itimerspec(old, &oldts))
81771 return -EFAULT;
81772@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
81773 oldfs = get_fs();
81774 set_fs(KERNEL_DS);
81775 err = sys_timer_gettime(timer_id,
81776- (struct itimerspec __user *) &ts);
81777+ (struct itimerspec __force_user *) &ts);
81778 set_fs(oldfs);
81779 if (!err && put_compat_itimerspec(setting, &ts))
81780 return -EFAULT;
81781@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
81782 oldfs = get_fs();
81783 set_fs(KERNEL_DS);
81784 err = sys_clock_settime(which_clock,
81785- (struct timespec __user *) &ts);
81786+ (struct timespec __force_user *) &ts);
81787 set_fs(oldfs);
81788 return err;
81789 }
81790@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
81791 oldfs = get_fs();
81792 set_fs(KERNEL_DS);
81793 err = sys_clock_gettime(which_clock,
81794- (struct timespec __user *) &ts);
81795+ (struct timespec __force_user *) &ts);
81796 set_fs(oldfs);
81797 if (!err && put_compat_timespec(&ts, tp))
81798 return -EFAULT;
81799@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
81800
81801 oldfs = get_fs();
81802 set_fs(KERNEL_DS);
81803- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
81804+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
81805 set_fs(oldfs);
81806
81807 err = compat_put_timex(utp, &txc);
81808@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
81809 oldfs = get_fs();
81810 set_fs(KERNEL_DS);
81811 err = sys_clock_getres(which_clock,
81812- (struct timespec __user *) &ts);
81813+ (struct timespec __force_user *) &ts);
81814 set_fs(oldfs);
81815 if (!err && tp && put_compat_timespec(&ts, tp))
81816 return -EFAULT;
81817@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
81818 long err;
81819 mm_segment_t oldfs;
81820 struct timespec tu;
81821- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
81822+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
81823
81824- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
81825+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
81826 oldfs = get_fs();
81827 set_fs(KERNEL_DS);
81828 err = clock_nanosleep_restart(restart);
81829@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
81830 oldfs = get_fs();
81831 set_fs(KERNEL_DS);
81832 err = sys_clock_nanosleep(which_clock, flags,
81833- (struct timespec __user *) &in,
81834- (struct timespec __user *) &out);
81835+ (struct timespec __force_user *) &in,
81836+ (struct timespec __force_user *) &out);
81837 set_fs(oldfs);
81838
81839 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
81840diff --git a/kernel/configs.c b/kernel/configs.c
81841index c18b1f1..b9a0132 100644
81842--- a/kernel/configs.c
81843+++ b/kernel/configs.c
81844@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
81845 struct proc_dir_entry *entry;
81846
81847 /* create the current config file */
81848+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
81849+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
81850+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
81851+ &ikconfig_file_ops);
81852+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81853+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
81854+ &ikconfig_file_ops);
81855+#endif
81856+#else
81857 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
81858 &ikconfig_file_ops);
81859+#endif
81860+
81861 if (!entry)
81862 return -ENOMEM;
81863
81864diff --git a/kernel/cred.c b/kernel/cred.c
81865index e0573a4..3874e41 100644
81866--- a/kernel/cred.c
81867+++ b/kernel/cred.c
81868@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
81869 validate_creds(cred);
81870 alter_cred_subscribers(cred, -1);
81871 put_cred(cred);
81872+
81873+#ifdef CONFIG_GRKERNSEC_SETXID
81874+ cred = (struct cred *) tsk->delayed_cred;
81875+ if (cred != NULL) {
81876+ tsk->delayed_cred = NULL;
81877+ validate_creds(cred);
81878+ alter_cred_subscribers(cred, -1);
81879+ put_cred(cred);
81880+ }
81881+#endif
81882 }
81883
81884 /**
81885@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
81886 * Always returns 0 thus allowing this function to be tail-called at the end
81887 * of, say, sys_setgid().
81888 */
81889-int commit_creds(struct cred *new)
81890+static int __commit_creds(struct cred *new)
81891 {
81892 struct task_struct *task = current;
81893 const struct cred *old = task->real_cred;
81894@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
81895
81896 get_cred(new); /* we will require a ref for the subj creds too */
81897
81898+ gr_set_role_label(task, new->uid, new->gid);
81899+
81900 /* dumpability changes */
81901 if (!uid_eq(old->euid, new->euid) ||
81902 !gid_eq(old->egid, new->egid) ||
81903@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
81904 put_cred(old);
81905 return 0;
81906 }
81907+#ifdef CONFIG_GRKERNSEC_SETXID
81908+extern int set_user(struct cred *new);
81909+
81910+void gr_delayed_cred_worker(void)
81911+{
81912+ const struct cred *new = current->delayed_cred;
81913+ struct cred *ncred;
81914+
81915+ current->delayed_cred = NULL;
81916+
81917+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
81918+ // from doing get_cred on it when queueing this
81919+ put_cred(new);
81920+ return;
81921+ } else if (new == NULL)
81922+ return;
81923+
81924+ ncred = prepare_creds();
81925+ if (!ncred)
81926+ goto die;
81927+ // uids
81928+ ncred->uid = new->uid;
81929+ ncred->euid = new->euid;
81930+ ncred->suid = new->suid;
81931+ ncred->fsuid = new->fsuid;
81932+ // gids
81933+ ncred->gid = new->gid;
81934+ ncred->egid = new->egid;
81935+ ncred->sgid = new->sgid;
81936+ ncred->fsgid = new->fsgid;
81937+ // groups
81938+ if (set_groups(ncred, new->group_info) < 0) {
81939+ abort_creds(ncred);
81940+ goto die;
81941+ }
81942+ // caps
81943+ ncred->securebits = new->securebits;
81944+ ncred->cap_inheritable = new->cap_inheritable;
81945+ ncred->cap_permitted = new->cap_permitted;
81946+ ncred->cap_effective = new->cap_effective;
81947+ ncred->cap_bset = new->cap_bset;
81948+
81949+ if (set_user(ncred)) {
81950+ abort_creds(ncred);
81951+ goto die;
81952+ }
81953+
81954+ // from doing get_cred on it when queueing this
81955+ put_cred(new);
81956+
81957+ __commit_creds(ncred);
81958+ return;
81959+die:
81960+ // from doing get_cred on it when queueing this
81961+ put_cred(new);
81962+ do_group_exit(SIGKILL);
81963+}
81964+#endif
81965+
81966+int commit_creds(struct cred *new)
81967+{
81968+#ifdef CONFIG_GRKERNSEC_SETXID
81969+ int ret;
81970+ int schedule_it = 0;
81971+ struct task_struct *t;
81972+
81973+ /* we won't get called with tasklist_lock held for writing
81974+ and interrupts disabled as the cred struct in that case is
81975+ init_cred
81976+ */
81977+ if (grsec_enable_setxid && !current_is_single_threaded() &&
81978+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
81979+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
81980+ schedule_it = 1;
81981+ }
81982+ ret = __commit_creds(new);
81983+ if (schedule_it) {
81984+ rcu_read_lock();
81985+ read_lock(&tasklist_lock);
81986+ for (t = next_thread(current); t != current;
81987+ t = next_thread(t)) {
81988+ if (t->delayed_cred == NULL) {
81989+ t->delayed_cred = get_cred(new);
81990+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
81991+ set_tsk_need_resched(t);
81992+ }
81993+ }
81994+ read_unlock(&tasklist_lock);
81995+ rcu_read_unlock();
81996+ }
81997+ return ret;
81998+#else
81999+ return __commit_creds(new);
82000+#endif
82001+}
82002+
82003 EXPORT_SYMBOL(commit_creds);
82004
82005 /**
82006diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
82007index 0506d44..2c20034 100644
82008--- a/kernel/debug/debug_core.c
82009+++ b/kernel/debug/debug_core.c
82010@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
82011 */
82012 static atomic_t masters_in_kgdb;
82013 static atomic_t slaves_in_kgdb;
82014-static atomic_t kgdb_break_tasklet_var;
82015+static atomic_unchecked_t kgdb_break_tasklet_var;
82016 atomic_t kgdb_setting_breakpoint;
82017
82018 struct task_struct *kgdb_usethread;
82019@@ -133,7 +133,7 @@ int kgdb_single_step;
82020 static pid_t kgdb_sstep_pid;
82021
82022 /* to keep track of the CPU which is doing the single stepping*/
82023-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
82024+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
82025
82026 /*
82027 * If you are debugging a problem where roundup (the collection of
82028@@ -541,7 +541,7 @@ return_normal:
82029 * kernel will only try for the value of sstep_tries before
82030 * giving up and continuing on.
82031 */
82032- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
82033+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
82034 (kgdb_info[cpu].task &&
82035 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
82036 atomic_set(&kgdb_active, -1);
82037@@ -635,8 +635,8 @@ cpu_master_loop:
82038 }
82039
82040 kgdb_restore:
82041- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
82042- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
82043+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
82044+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
82045 if (kgdb_info[sstep_cpu].task)
82046 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
82047 else
82048@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
82049 static void kgdb_tasklet_bpt(unsigned long ing)
82050 {
82051 kgdb_breakpoint();
82052- atomic_set(&kgdb_break_tasklet_var, 0);
82053+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
82054 }
82055
82056 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
82057
82058 void kgdb_schedule_breakpoint(void)
82059 {
82060- if (atomic_read(&kgdb_break_tasklet_var) ||
82061+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
82062 atomic_read(&kgdb_active) != -1 ||
82063 atomic_read(&kgdb_setting_breakpoint))
82064 return;
82065- atomic_inc(&kgdb_break_tasklet_var);
82066+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
82067 tasklet_schedule(&kgdb_tasklet_breakpoint);
82068 }
82069 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
82070diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
82071index 00eb8f7..d7e3244 100644
82072--- a/kernel/debug/kdb/kdb_main.c
82073+++ b/kernel/debug/kdb/kdb_main.c
82074@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
82075 continue;
82076
82077 kdb_printf("%-20s%8u 0x%p ", mod->name,
82078- mod->core_size, (void *)mod);
82079+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
82080 #ifdef CONFIG_MODULE_UNLOAD
82081 kdb_printf("%4ld ", module_refcount(mod));
82082 #endif
82083@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
82084 kdb_printf(" (Loading)");
82085 else
82086 kdb_printf(" (Live)");
82087- kdb_printf(" 0x%p", mod->module_core);
82088+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
82089
82090 #ifdef CONFIG_MODULE_UNLOAD
82091 {
82092diff --git a/kernel/events/core.c b/kernel/events/core.c
82093index 953c143..5646bb1 100644
82094--- a/kernel/events/core.c
82095+++ b/kernel/events/core.c
82096@@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
82097 * 0 - disallow raw tracepoint access for unpriv
82098 * 1 - disallow cpu events for unpriv
82099 * 2 - disallow kernel profiling for unpriv
82100+ * 3 - disallow all unpriv perf event use
82101 */
82102-int sysctl_perf_event_paranoid __read_mostly = 1;
82103+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
82104+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
82105+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
82106+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
82107+#else
82108+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
82109+#endif
82110
82111 /* Minimum for 512 kiB + 1 user control page */
82112 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
82113@@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_len_ns)
82114 update_perf_cpu_limits();
82115 }
82116
82117-static atomic64_t perf_event_id;
82118+static atomic64_unchecked_t perf_event_id;
82119
82120 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
82121 enum event_type_t event_type);
82122@@ -2940,7 +2947,7 @@ static void __perf_event_read(void *info)
82123
82124 static inline u64 perf_event_count(struct perf_event *event)
82125 {
82126- return local64_read(&event->count) + atomic64_read(&event->child_count);
82127+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
82128 }
82129
82130 static u64 perf_event_read(struct perf_event *event)
82131@@ -3308,9 +3315,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
82132 mutex_lock(&event->child_mutex);
82133 total += perf_event_read(event);
82134 *enabled += event->total_time_enabled +
82135- atomic64_read(&event->child_total_time_enabled);
82136+ atomic64_read_unchecked(&event->child_total_time_enabled);
82137 *running += event->total_time_running +
82138- atomic64_read(&event->child_total_time_running);
82139+ atomic64_read_unchecked(&event->child_total_time_running);
82140
82141 list_for_each_entry(child, &event->child_list, child_list) {
82142 total += perf_event_read(child);
82143@@ -3725,10 +3732,10 @@ void perf_event_update_userpage(struct perf_event *event)
82144 userpg->offset -= local64_read(&event->hw.prev_count);
82145
82146 userpg->time_enabled = enabled +
82147- atomic64_read(&event->child_total_time_enabled);
82148+ atomic64_read_unchecked(&event->child_total_time_enabled);
82149
82150 userpg->time_running = running +
82151- atomic64_read(&event->child_total_time_running);
82152+ atomic64_read_unchecked(&event->child_total_time_running);
82153
82154 arch_perf_update_userpage(userpg, now);
82155
82156@@ -4279,7 +4286,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
82157
82158 /* Data. */
82159 sp = perf_user_stack_pointer(regs);
82160- rem = __output_copy_user(handle, (void *) sp, dump_size);
82161+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
82162 dyn_size = dump_size - rem;
82163
82164 perf_output_skip(handle, rem);
82165@@ -4370,11 +4377,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
82166 values[n++] = perf_event_count(event);
82167 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
82168 values[n++] = enabled +
82169- atomic64_read(&event->child_total_time_enabled);
82170+ atomic64_read_unchecked(&event->child_total_time_enabled);
82171 }
82172 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
82173 values[n++] = running +
82174- atomic64_read(&event->child_total_time_running);
82175+ atomic64_read_unchecked(&event->child_total_time_running);
82176 }
82177 if (read_format & PERF_FORMAT_ID)
82178 values[n++] = primary_event_id(event);
82179@@ -5112,12 +5119,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
82180 * need to add enough zero bytes after the string to handle
82181 * the 64bit alignment we do later.
82182 */
82183- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
82184+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
82185 if (!buf) {
82186 name = strncpy(tmp, "//enomem", sizeof(tmp));
82187 goto got_name;
82188 }
82189- name = d_path(&file->f_path, buf, PATH_MAX);
82190+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
82191 if (IS_ERR(name)) {
82192 name = strncpy(tmp, "//toolong", sizeof(tmp));
82193 goto got_name;
82194@@ -6639,7 +6646,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
82195 event->parent = parent_event;
82196
82197 event->ns = get_pid_ns(task_active_pid_ns(current));
82198- event->id = atomic64_inc_return(&perf_event_id);
82199+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
82200
82201 event->state = PERF_EVENT_STATE_INACTIVE;
82202
82203@@ -6938,6 +6945,11 @@ SYSCALL_DEFINE5(perf_event_open,
82204 if (flags & ~PERF_FLAG_ALL)
82205 return -EINVAL;
82206
82207+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
82208+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
82209+ return -EACCES;
82210+#endif
82211+
82212 err = perf_copy_attr(attr_uptr, &attr);
82213 if (err)
82214 return err;
82215@@ -7271,10 +7283,10 @@ static void sync_child_event(struct perf_event *child_event,
82216 /*
82217 * Add back the child's count to the parent's count:
82218 */
82219- atomic64_add(child_val, &parent_event->child_count);
82220- atomic64_add(child_event->total_time_enabled,
82221+ atomic64_add_unchecked(child_val, &parent_event->child_count);
82222+ atomic64_add_unchecked(child_event->total_time_enabled,
82223 &parent_event->child_total_time_enabled);
82224- atomic64_add(child_event->total_time_running,
82225+ atomic64_add_unchecked(child_event->total_time_running,
82226 &parent_event->child_total_time_running);
82227
82228 /*
82229diff --git a/kernel/events/internal.h b/kernel/events/internal.h
82230index ca65997..60df03d 100644
82231--- a/kernel/events/internal.h
82232+++ b/kernel/events/internal.h
82233@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
82234 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
82235 }
82236
82237-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
82238-static inline unsigned int \
82239+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
82240+static inline unsigned long \
82241 func_name(struct perf_output_handle *handle, \
82242- const void *buf, unsigned int len) \
82243+ const void user *buf, unsigned long len) \
82244 { \
82245 unsigned long size, written; \
82246 \
82247@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
82248 return n;
82249 }
82250
82251-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
82252+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
82253
82254 #define MEMCPY_SKIP(dst, src, n) (n)
82255
82256-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
82257+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
82258
82259 #ifndef arch_perf_out_copy_user
82260 #define arch_perf_out_copy_user __copy_from_user_inatomic
82261 #endif
82262
82263-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
82264+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
82265
82266 /* Callchain handling */
82267 extern struct perf_callchain_entry *
82268diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
82269index ad8e1bd..fed7ba9 100644
82270--- a/kernel/events/uprobes.c
82271+++ b/kernel/events/uprobes.c
82272@@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
82273 {
82274 struct page *page;
82275 uprobe_opcode_t opcode;
82276- int result;
82277+ long result;
82278
82279 pagefault_disable();
82280 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
82281diff --git a/kernel/exit.c b/kernel/exit.c
82282index a949819..a5f127d 100644
82283--- a/kernel/exit.c
82284+++ b/kernel/exit.c
82285@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
82286 struct task_struct *leader;
82287 int zap_leader;
82288 repeat:
82289+#ifdef CONFIG_NET
82290+ gr_del_task_from_ip_table(p);
82291+#endif
82292+
82293 /* don't need to get the RCU readlock here - the process is dead and
82294 * can't be modifying its own credentials. But shut RCU-lockdep up */
82295 rcu_read_lock();
82296@@ -329,7 +333,7 @@ int allow_signal(int sig)
82297 * know it'll be handled, so that they don't get converted to
82298 * SIGKILL or just silently dropped.
82299 */
82300- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
82301+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
82302 recalc_sigpending();
82303 spin_unlock_irq(&current->sighand->siglock);
82304 return 0;
82305@@ -698,6 +702,8 @@ void do_exit(long code)
82306 struct task_struct *tsk = current;
82307 int group_dead;
82308
82309+ set_fs(USER_DS);
82310+
82311 profile_task_exit(tsk);
82312
82313 WARN_ON(blk_needs_flush_plug(tsk));
82314@@ -714,7 +720,6 @@ void do_exit(long code)
82315 * mm_release()->clear_child_tid() from writing to a user-controlled
82316 * kernel address.
82317 */
82318- set_fs(USER_DS);
82319
82320 ptrace_event(PTRACE_EVENT_EXIT, code);
82321
82322@@ -773,6 +778,9 @@ void do_exit(long code)
82323 tsk->exit_code = code;
82324 taskstats_exit(tsk, group_dead);
82325
82326+ gr_acl_handle_psacct(tsk, code);
82327+ gr_acl_handle_exit();
82328+
82329 exit_mm(tsk);
82330
82331 if (group_dead)
82332@@ -894,7 +902,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
82333 * Take down every thread in the group. This is called by fatal signals
82334 * as well as by sys_exit_group (below).
82335 */
82336-void
82337+__noreturn void
82338 do_group_exit(int exit_code)
82339 {
82340 struct signal_struct *sig = current->signal;
82341diff --git a/kernel/fork.c b/kernel/fork.c
82342index 086fe73..72c1122 100644
82343--- a/kernel/fork.c
82344+++ b/kernel/fork.c
82345@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
82346 *stackend = STACK_END_MAGIC; /* for overflow detection */
82347
82348 #ifdef CONFIG_CC_STACKPROTECTOR
82349- tsk->stack_canary = get_random_int();
82350+ tsk->stack_canary = pax_get_random_long();
82351 #endif
82352
82353 /*
82354@@ -345,12 +345,80 @@ free_tsk:
82355 }
82356
82357 #ifdef CONFIG_MMU
82358-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82359+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
82360+{
82361+ struct vm_area_struct *tmp;
82362+ unsigned long charge;
82363+ struct file *file;
82364+ int retval;
82365+
82366+ charge = 0;
82367+ if (mpnt->vm_flags & VM_ACCOUNT) {
82368+ unsigned long len = vma_pages(mpnt);
82369+
82370+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
82371+ goto fail_nomem;
82372+ charge = len;
82373+ }
82374+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82375+ if (!tmp)
82376+ goto fail_nomem;
82377+ *tmp = *mpnt;
82378+ tmp->vm_mm = mm;
82379+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
82380+ retval = vma_dup_policy(mpnt, tmp);
82381+ if (retval)
82382+ goto fail_nomem_policy;
82383+ if (anon_vma_fork(tmp, mpnt))
82384+ goto fail_nomem_anon_vma_fork;
82385+ tmp->vm_flags &= ~VM_LOCKED;
82386+ tmp->vm_next = tmp->vm_prev = NULL;
82387+ tmp->vm_mirror = NULL;
82388+ file = tmp->vm_file;
82389+ if (file) {
82390+ struct inode *inode = file_inode(file);
82391+ struct address_space *mapping = file->f_mapping;
82392+
82393+ get_file(file);
82394+ if (tmp->vm_flags & VM_DENYWRITE)
82395+ atomic_dec(&inode->i_writecount);
82396+ mutex_lock(&mapping->i_mmap_mutex);
82397+ if (tmp->vm_flags & VM_SHARED)
82398+ mapping->i_mmap_writable++;
82399+ flush_dcache_mmap_lock(mapping);
82400+ /* insert tmp into the share list, just after mpnt */
82401+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
82402+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
82403+ else
82404+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
82405+ flush_dcache_mmap_unlock(mapping);
82406+ mutex_unlock(&mapping->i_mmap_mutex);
82407+ }
82408+
82409+ /*
82410+ * Clear hugetlb-related page reserves for children. This only
82411+ * affects MAP_PRIVATE mappings. Faults generated by the child
82412+ * are not guaranteed to succeed, even if read-only
82413+ */
82414+ if (is_vm_hugetlb_page(tmp))
82415+ reset_vma_resv_huge_pages(tmp);
82416+
82417+ return tmp;
82418+
82419+fail_nomem_anon_vma_fork:
82420+ mpol_put(vma_policy(tmp));
82421+fail_nomem_policy:
82422+ kmem_cache_free(vm_area_cachep, tmp);
82423+fail_nomem:
82424+ vm_unacct_memory(charge);
82425+ return NULL;
82426+}
82427+
82428+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82429 {
82430 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
82431 struct rb_node **rb_link, *rb_parent;
82432 int retval;
82433- unsigned long charge;
82434
82435 uprobe_start_dup_mmap();
82436 down_write(&oldmm->mmap_sem);
82437@@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82438
82439 prev = NULL;
82440 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
82441- struct file *file;
82442-
82443 if (mpnt->vm_flags & VM_DONTCOPY) {
82444 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
82445 -vma_pages(mpnt));
82446 continue;
82447 }
82448- charge = 0;
82449- if (mpnt->vm_flags & VM_ACCOUNT) {
82450- unsigned long len = vma_pages(mpnt);
82451-
82452- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
82453- goto fail_nomem;
82454- charge = len;
82455- }
82456- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82457- if (!tmp)
82458- goto fail_nomem;
82459- *tmp = *mpnt;
82460- INIT_LIST_HEAD(&tmp->anon_vma_chain);
82461- retval = vma_dup_policy(mpnt, tmp);
82462- if (retval)
82463- goto fail_nomem_policy;
82464- tmp->vm_mm = mm;
82465- if (anon_vma_fork(tmp, mpnt))
82466- goto fail_nomem_anon_vma_fork;
82467- tmp->vm_flags &= ~VM_LOCKED;
82468- tmp->vm_next = tmp->vm_prev = NULL;
82469- file = tmp->vm_file;
82470- if (file) {
82471- struct inode *inode = file_inode(file);
82472- struct address_space *mapping = file->f_mapping;
82473-
82474- get_file(file);
82475- if (tmp->vm_flags & VM_DENYWRITE)
82476- atomic_dec(&inode->i_writecount);
82477- mutex_lock(&mapping->i_mmap_mutex);
82478- if (tmp->vm_flags & VM_SHARED)
82479- mapping->i_mmap_writable++;
82480- flush_dcache_mmap_lock(mapping);
82481- /* insert tmp into the share list, just after mpnt */
82482- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
82483- vma_nonlinear_insert(tmp,
82484- &mapping->i_mmap_nonlinear);
82485- else
82486- vma_interval_tree_insert_after(tmp, mpnt,
82487- &mapping->i_mmap);
82488- flush_dcache_mmap_unlock(mapping);
82489- mutex_unlock(&mapping->i_mmap_mutex);
82490+ tmp = dup_vma(mm, oldmm, mpnt);
82491+ if (!tmp) {
82492+ retval = -ENOMEM;
82493+ goto out;
82494 }
82495
82496 /*
82497@@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82498 if (retval)
82499 goto out;
82500 }
82501+
82502+#ifdef CONFIG_PAX_SEGMEXEC
82503+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
82504+ struct vm_area_struct *mpnt_m;
82505+
82506+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
82507+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
82508+
82509+ if (!mpnt->vm_mirror)
82510+ continue;
82511+
82512+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
82513+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
82514+ mpnt->vm_mirror = mpnt_m;
82515+ } else {
82516+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
82517+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
82518+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
82519+ mpnt->vm_mirror->vm_mirror = mpnt;
82520+ }
82521+ }
82522+ BUG_ON(mpnt_m);
82523+ }
82524+#endif
82525+
82526 /* a new mm has just been created */
82527 arch_dup_mmap(oldmm, mm);
82528 retval = 0;
82529@@ -468,14 +521,6 @@ out:
82530 up_write(&oldmm->mmap_sem);
82531 uprobe_end_dup_mmap();
82532 return retval;
82533-fail_nomem_anon_vma_fork:
82534- mpol_put(vma_policy(tmp));
82535-fail_nomem_policy:
82536- kmem_cache_free(vm_area_cachep, tmp);
82537-fail_nomem:
82538- retval = -ENOMEM;
82539- vm_unacct_memory(charge);
82540- goto out;
82541 }
82542
82543 static inline int mm_alloc_pgd(struct mm_struct *mm)
82544@@ -688,8 +733,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
82545 return ERR_PTR(err);
82546
82547 mm = get_task_mm(task);
82548- if (mm && mm != current->mm &&
82549- !ptrace_may_access(task, mode)) {
82550+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
82551+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
82552 mmput(mm);
82553 mm = ERR_PTR(-EACCES);
82554 }
82555@@ -911,13 +956,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
82556 spin_unlock(&fs->lock);
82557 return -EAGAIN;
82558 }
82559- fs->users++;
82560+ atomic_inc(&fs->users);
82561 spin_unlock(&fs->lock);
82562 return 0;
82563 }
82564 tsk->fs = copy_fs_struct(fs);
82565 if (!tsk->fs)
82566 return -ENOMEM;
82567+ /* Carry through gr_chroot_dentry and is_chrooted instead
82568+ of recomputing it here. Already copied when the task struct
82569+ is duplicated. This allows pivot_root to not be treated as
82570+ a chroot
82571+ */
82572+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
82573+
82574 return 0;
82575 }
82576
82577@@ -1128,7 +1180,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
82578 * parts of the process environment (as per the clone
82579 * flags). The actual kick-off is left to the caller.
82580 */
82581-static struct task_struct *copy_process(unsigned long clone_flags,
82582+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
82583 unsigned long stack_start,
82584 unsigned long stack_size,
82585 int __user *child_tidptr,
82586@@ -1200,6 +1252,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
82587 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
82588 #endif
82589 retval = -EAGAIN;
82590+
82591+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
82592+
82593 if (atomic_read(&p->real_cred->user->processes) >=
82594 task_rlimit(p, RLIMIT_NPROC)) {
82595 if (p->real_cred->user != INIT_USER &&
82596@@ -1449,6 +1504,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
82597 goto bad_fork_free_pid;
82598 }
82599
82600+ /* synchronizes with gr_set_acls()
82601+ we need to call this past the point of no return for fork()
82602+ */
82603+ gr_copy_label(p);
82604+
82605 if (likely(p->pid)) {
82606 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
82607
82608@@ -1534,6 +1594,8 @@ bad_fork_cleanup_count:
82609 bad_fork_free:
82610 free_task(p);
82611 fork_out:
82612+ gr_log_forkfail(retval);
82613+
82614 return ERR_PTR(retval);
82615 }
82616
82617@@ -1595,6 +1657,7 @@ long do_fork(unsigned long clone_flags,
82618
82619 p = copy_process(clone_flags, stack_start, stack_size,
82620 child_tidptr, NULL, trace);
82621+ add_latent_entropy();
82622 /*
82623 * Do this prior waking up the new thread - the thread pointer
82624 * might get invalid after that point, if the thread exits quickly.
82625@@ -1609,6 +1672,8 @@ long do_fork(unsigned long clone_flags,
82626 if (clone_flags & CLONE_PARENT_SETTID)
82627 put_user(nr, parent_tidptr);
82628
82629+ gr_handle_brute_check();
82630+
82631 if (clone_flags & CLONE_VFORK) {
82632 p->vfork_done = &vfork;
82633 init_completion(&vfork);
82634@@ -1725,7 +1790,7 @@ void __init proc_caches_init(void)
82635 mm_cachep = kmem_cache_create("mm_struct",
82636 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
82637 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
82638- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
82639+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
82640 mmap_init();
82641 nsproxy_cache_init();
82642 }
82643@@ -1765,7 +1830,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
82644 return 0;
82645
82646 /* don't need lock here; in the worst case we'll do useless copy */
82647- if (fs->users == 1)
82648+ if (atomic_read(&fs->users) == 1)
82649 return 0;
82650
82651 *new_fsp = copy_fs_struct(fs);
82652@@ -1872,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
82653 fs = current->fs;
82654 spin_lock(&fs->lock);
82655 current->fs = new_fs;
82656- if (--fs->users)
82657+ gr_set_chroot_entries(current, &current->fs->root);
82658+ if (atomic_dec_return(&fs->users))
82659 new_fs = NULL;
82660 else
82661 new_fs = fs;
82662diff --git a/kernel/futex.c b/kernel/futex.c
82663index 221a58f..1b8cfce 100644
82664--- a/kernel/futex.c
82665+++ b/kernel/futex.c
82666@@ -54,6 +54,7 @@
82667 #include <linux/mount.h>
82668 #include <linux/pagemap.h>
82669 #include <linux/syscalls.h>
82670+#include <linux/ptrace.h>
82671 #include <linux/signal.h>
82672 #include <linux/export.h>
82673 #include <linux/magic.h>
82674@@ -243,6 +244,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
82675 struct page *page, *page_head;
82676 int err, ro = 0;
82677
82678+#ifdef CONFIG_PAX_SEGMEXEC
82679+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
82680+ return -EFAULT;
82681+#endif
82682+
82683 /*
82684 * The futex address must be "naturally" aligned.
82685 */
82686@@ -441,7 +447,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
82687
82688 static int get_futex_value_locked(u32 *dest, u32 __user *from)
82689 {
82690- int ret;
82691+ unsigned long ret;
82692
82693 pagefault_disable();
82694 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
82695@@ -2734,6 +2740,7 @@ static int __init futex_init(void)
82696 {
82697 u32 curval;
82698 int i;
82699+ mm_segment_t oldfs;
82700
82701 /*
82702 * This will fail and we want it. Some arch implementations do
82703@@ -2745,8 +2752,11 @@ static int __init futex_init(void)
82704 * implementation, the non-functional ones will return
82705 * -ENOSYS.
82706 */
82707+ oldfs = get_fs();
82708+ set_fs(USER_DS);
82709 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
82710 futex_cmpxchg_enabled = 1;
82711+ set_fs(oldfs);
82712
82713 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
82714 plist_head_init(&futex_queues[i].chain);
82715diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
82716index f9f44fd..29885e4 100644
82717--- a/kernel/futex_compat.c
82718+++ b/kernel/futex_compat.c
82719@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
82720 return 0;
82721 }
82722
82723-static void __user *futex_uaddr(struct robust_list __user *entry,
82724+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
82725 compat_long_t futex_offset)
82726 {
82727 compat_uptr_t base = ptr_to_compat(entry);
82728diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
82729index 9b22d03..6295b62 100644
82730--- a/kernel/gcov/base.c
82731+++ b/kernel/gcov/base.c
82732@@ -102,11 +102,6 @@ void gcov_enable_events(void)
82733 }
82734
82735 #ifdef CONFIG_MODULES
82736-static inline int within(void *addr, void *start, unsigned long size)
82737-{
82738- return ((addr >= start) && (addr < start + size));
82739-}
82740-
82741 /* Update list and generate events when modules are unloaded. */
82742 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
82743 void *data)
82744@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
82745 prev = NULL;
82746 /* Remove entries located in module from linked list. */
82747 for (info = gcov_info_head; info; info = info->next) {
82748- if (within(info, mod->module_core, mod->core_size)) {
82749+ if (within_module_core_rw((unsigned long)info, mod)) {
82750 if (prev)
82751 prev->next = info->next;
82752 else
82753diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
82754index 383319b..56ebb13 100644
82755--- a/kernel/hrtimer.c
82756+++ b/kernel/hrtimer.c
82757@@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
82758 local_irq_restore(flags);
82759 }
82760
82761-static void run_hrtimer_softirq(struct softirq_action *h)
82762+static __latent_entropy void run_hrtimer_softirq(void)
82763 {
82764 hrtimer_peek_ahead_timers();
82765 }
82766diff --git a/kernel/irq_work.c b/kernel/irq_work.c
82767index 55fcce6..0e4cf34 100644
82768--- a/kernel/irq_work.c
82769+++ b/kernel/irq_work.c
82770@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
82771 return NOTIFY_OK;
82772 }
82773
82774-static struct notifier_block cpu_notify;
82775+static struct notifier_block cpu_notify = {
82776+ .notifier_call = irq_work_cpu_notify,
82777+ .priority = 0,
82778+};
82779
82780 static __init int irq_work_init_cpu_notifier(void)
82781 {
82782- cpu_notify.notifier_call = irq_work_cpu_notify;
82783- cpu_notify.priority = 0;
82784 register_cpu_notifier(&cpu_notify);
82785 return 0;
82786 }
82787diff --git a/kernel/jump_label.c b/kernel/jump_label.c
82788index 297a924..7290070 100644
82789--- a/kernel/jump_label.c
82790+++ b/kernel/jump_label.c
82791@@ -14,6 +14,7 @@
82792 #include <linux/err.h>
82793 #include <linux/static_key.h>
82794 #include <linux/jump_label_ratelimit.h>
82795+#include <linux/mm.h>
82796
82797 #ifdef HAVE_JUMP_LABEL
82798
82799@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
82800
82801 size = (((unsigned long)stop - (unsigned long)start)
82802 / sizeof(struct jump_entry));
82803+ pax_open_kernel();
82804 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
82805+ pax_close_kernel();
82806 }
82807
82808 static void jump_label_update(struct static_key *key, int enable);
82809@@ -358,10 +361,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
82810 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
82811 struct jump_entry *iter;
82812
82813+ pax_open_kernel();
82814 for (iter = iter_start; iter < iter_stop; iter++) {
82815 if (within_module_init(iter->code, mod))
82816 iter->code = 0;
82817 }
82818+ pax_close_kernel();
82819 }
82820
82821 static int
82822diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
82823index 3127ad5..159d880 100644
82824--- a/kernel/kallsyms.c
82825+++ b/kernel/kallsyms.c
82826@@ -11,6 +11,9 @@
82827 * Changed the compression method from stem compression to "table lookup"
82828 * compression (see scripts/kallsyms.c for a more complete description)
82829 */
82830+#ifdef CONFIG_GRKERNSEC_HIDESYM
82831+#define __INCLUDED_BY_HIDESYM 1
82832+#endif
82833 #include <linux/kallsyms.h>
82834 #include <linux/module.h>
82835 #include <linux/init.h>
82836@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
82837
82838 static inline int is_kernel_inittext(unsigned long addr)
82839 {
82840+ if (system_state != SYSTEM_BOOTING)
82841+ return 0;
82842+
82843 if (addr >= (unsigned long)_sinittext
82844 && addr <= (unsigned long)_einittext)
82845 return 1;
82846 return 0;
82847 }
82848
82849+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
82850+#ifdef CONFIG_MODULES
82851+static inline int is_module_text(unsigned long addr)
82852+{
82853+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
82854+ return 1;
82855+
82856+ addr = ktla_ktva(addr);
82857+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
82858+}
82859+#else
82860+static inline int is_module_text(unsigned long addr)
82861+{
82862+ return 0;
82863+}
82864+#endif
82865+#endif
82866+
82867 static inline int is_kernel_text(unsigned long addr)
82868 {
82869 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
82870@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
82871
82872 static inline int is_kernel(unsigned long addr)
82873 {
82874+
82875+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
82876+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
82877+ return 1;
82878+
82879+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
82880+#else
82881 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
82882+#endif
82883+
82884 return 1;
82885 return in_gate_area_no_mm(addr);
82886 }
82887
82888 static int is_ksym_addr(unsigned long addr)
82889 {
82890+
82891+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
82892+ if (is_module_text(addr))
82893+ return 0;
82894+#endif
82895+
82896 if (all_var)
82897 return is_kernel(addr);
82898
82899@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
82900
82901 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
82902 {
82903- iter->name[0] = '\0';
82904 iter->nameoff = get_symbol_offset(new_pos);
82905 iter->pos = new_pos;
82906 }
82907@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
82908 {
82909 struct kallsym_iter *iter = m->private;
82910
82911+#ifdef CONFIG_GRKERNSEC_HIDESYM
82912+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
82913+ return 0;
82914+#endif
82915+
82916 /* Some debugging symbols have no name. Ignore them. */
82917 if (!iter->name[0])
82918 return 0;
82919@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
82920 */
82921 type = iter->exported ? toupper(iter->type) :
82922 tolower(iter->type);
82923+
82924 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
82925 type, iter->name, iter->module_name);
82926 } else
82927@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
82928 struct kallsym_iter *iter;
82929 int ret;
82930
82931- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
82932+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
82933 if (!iter)
82934 return -ENOMEM;
82935 reset_iter(iter, 0);
82936diff --git a/kernel/kcmp.c b/kernel/kcmp.c
82937index e30ac0f..3528cac 100644
82938--- a/kernel/kcmp.c
82939+++ b/kernel/kcmp.c
82940@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
82941 struct task_struct *task1, *task2;
82942 int ret;
82943
82944+#ifdef CONFIG_GRKERNSEC
82945+ return -ENOSYS;
82946+#endif
82947+
82948 rcu_read_lock();
82949
82950 /*
82951diff --git a/kernel/kexec.c b/kernel/kexec.c
82952index ecd783d..9aa270c 100644
82953--- a/kernel/kexec.c
82954+++ b/kernel/kexec.c
82955@@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
82956 unsigned long flags)
82957 {
82958 struct compat_kexec_segment in;
82959- struct kexec_segment out, __user *ksegments;
82960+ struct kexec_segment out;
82961+ struct kexec_segment __user *ksegments;
82962 unsigned long i, result;
82963
82964 /* Don't allow clients that don't understand the native
82965diff --git a/kernel/kmod.c b/kernel/kmod.c
82966index b086006..6d2e579 100644
82967--- a/kernel/kmod.c
82968+++ b/kernel/kmod.c
82969@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
82970 kfree(info->argv);
82971 }
82972
82973-static int call_modprobe(char *module_name, int wait)
82974+static int call_modprobe(char *module_name, char *module_param, int wait)
82975 {
82976 struct subprocess_info *info;
82977 static char *envp[] = {
82978@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
82979 NULL
82980 };
82981
82982- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
82983+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
82984 if (!argv)
82985 goto out;
82986
82987@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
82988 argv[1] = "-q";
82989 argv[2] = "--";
82990 argv[3] = module_name; /* check free_modprobe_argv() */
82991- argv[4] = NULL;
82992+ argv[4] = module_param;
82993+ argv[5] = NULL;
82994
82995 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
82996 NULL, free_modprobe_argv, NULL);
82997@@ -129,9 +130,8 @@ out:
82998 * If module auto-loading support is disabled then this function
82999 * becomes a no-operation.
83000 */
83001-int __request_module(bool wait, const char *fmt, ...)
83002+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
83003 {
83004- va_list args;
83005 char module_name[MODULE_NAME_LEN];
83006 unsigned int max_modprobes;
83007 int ret;
83008@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
83009 if (!modprobe_path[0])
83010 return 0;
83011
83012- va_start(args, fmt);
83013- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
83014- va_end(args);
83015+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
83016 if (ret >= MODULE_NAME_LEN)
83017 return -ENAMETOOLONG;
83018
83019@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
83020 if (ret)
83021 return ret;
83022
83023+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83024+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
83025+ /* hack to workaround consolekit/udisks stupidity */
83026+ read_lock(&tasklist_lock);
83027+ if (!strcmp(current->comm, "mount") &&
83028+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
83029+ read_unlock(&tasklist_lock);
83030+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
83031+ return -EPERM;
83032+ }
83033+ read_unlock(&tasklist_lock);
83034+ }
83035+#endif
83036+
83037 /* If modprobe needs a service that is in a module, we get a recursive
83038 * loop. Limit the number of running kmod threads to max_threads/2 or
83039 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
83040@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
83041
83042 trace_module_request(module_name, wait, _RET_IP_);
83043
83044- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
83045+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
83046
83047 atomic_dec(&kmod_concurrent);
83048 return ret;
83049 }
83050+
83051+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
83052+{
83053+ va_list args;
83054+ int ret;
83055+
83056+ va_start(args, fmt);
83057+ ret = ____request_module(wait, module_param, fmt, args);
83058+ va_end(args);
83059+
83060+ return ret;
83061+}
83062+
83063+int __request_module(bool wait, const char *fmt, ...)
83064+{
83065+ va_list args;
83066+ int ret;
83067+
83068+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83069+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
83070+ char module_param[MODULE_NAME_LEN];
83071+
83072+ memset(module_param, 0, sizeof(module_param));
83073+
83074+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
83075+
83076+ va_start(args, fmt);
83077+ ret = ____request_module(wait, module_param, fmt, args);
83078+ va_end(args);
83079+
83080+ return ret;
83081+ }
83082+#endif
83083+
83084+ va_start(args, fmt);
83085+ ret = ____request_module(wait, NULL, fmt, args);
83086+ va_end(args);
83087+
83088+ return ret;
83089+}
83090+
83091 EXPORT_SYMBOL(__request_module);
83092 #endif /* CONFIG_MODULES */
83093
83094@@ -218,6 +271,19 @@ static int ____call_usermodehelper(void *data)
83095 */
83096 set_user_nice(current, 0);
83097
83098+#ifdef CONFIG_GRKERNSEC
83099+ /* this is race-free as far as userland is concerned as we copied
83100+ out the path to be used prior to this point and are now operating
83101+ on that copy
83102+ */
83103+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
83104+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7)) || strstr(sub_info->path, "..")) {
83105+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
83106+ retval = -EPERM;
83107+ goto fail;
83108+ }
83109+#endif
83110+
83111 retval = -ENOMEM;
83112 new = prepare_kernel_cred(current);
83113 if (!new)
83114@@ -260,6 +326,10 @@ static int call_helper(void *data)
83115
83116 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
83117 {
83118+#ifdef CONFIG_GRKERNSEC
83119+ kfree(info->path);
83120+ info->path = info->origpath;
83121+#endif
83122 if (info->cleanup)
83123 (*info->cleanup)(info);
83124 kfree(info);
83125@@ -303,7 +373,7 @@ static int wait_for_helper(void *data)
83126 *
83127 * Thus the __user pointer cast is valid here.
83128 */
83129- sys_wait4(pid, (int __user *)&ret, 0, NULL);
83130+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
83131
83132 /*
83133 * If ret is 0, either ____call_usermodehelper failed and the
83134@@ -542,7 +612,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
83135 goto out;
83136
83137 INIT_WORK(&sub_info->work, __call_usermodehelper);
83138+#ifdef CONFIG_GRKERNSEC
83139+ sub_info->origpath = path;
83140+ sub_info->path = kstrdup(path, gfp_mask);
83141+#else
83142 sub_info->path = path;
83143+#endif
83144 sub_info->argv = argv;
83145 sub_info->envp = envp;
83146
83147@@ -650,7 +725,7 @@ EXPORT_SYMBOL(call_usermodehelper);
83148 static int proc_cap_handler(struct ctl_table *table, int write,
83149 void __user *buffer, size_t *lenp, loff_t *ppos)
83150 {
83151- struct ctl_table t;
83152+ ctl_table_no_const t;
83153 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
83154 kernel_cap_t new_cap;
83155 int err, i;
83156diff --git a/kernel/kprobes.c b/kernel/kprobes.c
83157index a0d367a..11c18b6 100644
83158--- a/kernel/kprobes.c
83159+++ b/kernel/kprobes.c
83160@@ -31,6 +31,9 @@
83161 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
83162 * <prasanna@in.ibm.com> added function-return probes.
83163 */
83164+#ifdef CONFIG_GRKERNSEC_HIDESYM
83165+#define __INCLUDED_BY_HIDESYM 1
83166+#endif
83167 #include <linux/kprobes.h>
83168 #include <linux/hash.h>
83169 #include <linux/init.h>
83170@@ -135,12 +138,12 @@ enum kprobe_slot_state {
83171
83172 static void *alloc_insn_page(void)
83173 {
83174- return module_alloc(PAGE_SIZE);
83175+ return module_alloc_exec(PAGE_SIZE);
83176 }
83177
83178 static void free_insn_page(void *page)
83179 {
83180- module_free(NULL, page);
83181+ module_free_exec(NULL, page);
83182 }
83183
83184 struct kprobe_insn_cache kprobe_insn_slots = {
83185@@ -2066,7 +2069,7 @@ static int __init init_kprobes(void)
83186 {
83187 int i, err = 0;
83188 unsigned long offset = 0, size = 0;
83189- char *modname, namebuf[128];
83190+ char *modname, namebuf[KSYM_NAME_LEN];
83191 const char *symbol_name;
83192 void *addr;
83193 struct kprobe_blackpoint *kb;
83194@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
83195 kprobe_type = "k";
83196
83197 if (sym)
83198- seq_printf(pi, "%p %s %s+0x%x %s ",
83199+ seq_printf(pi, "%pK %s %s+0x%x %s ",
83200 p->addr, kprobe_type, sym, offset,
83201 (modname ? modname : " "));
83202 else
83203- seq_printf(pi, "%p %s %p ",
83204+ seq_printf(pi, "%pK %s %pK ",
83205 p->addr, kprobe_type, p->addr);
83206
83207 if (!pp)
83208@@ -2192,7 +2195,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
83209 const char *sym = NULL;
83210 unsigned int i = *(loff_t *) v;
83211 unsigned long offset = 0;
83212- char *modname, namebuf[128];
83213+ char *modname, namebuf[KSYM_NAME_LEN];
83214
83215 head = &kprobe_table[i];
83216 preempt_disable();
83217diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
83218index 9659d38..bffd520 100644
83219--- a/kernel/ksysfs.c
83220+++ b/kernel/ksysfs.c
83221@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
83222 {
83223 if (count+1 > UEVENT_HELPER_PATH_LEN)
83224 return -ENOENT;
83225+ if (!capable(CAP_SYS_ADMIN))
83226+ return -EPERM;
83227 memcpy(uevent_helper, buf, count);
83228 uevent_helper[count] = '\0';
83229 if (count && uevent_helper[count-1] == '\n')
83230@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
83231 return count;
83232 }
83233
83234-static struct bin_attribute notes_attr = {
83235+static bin_attribute_no_const notes_attr __read_only = {
83236 .attr = {
83237 .name = "notes",
83238 .mode = S_IRUGO,
83239diff --git a/kernel/lockdep.c b/kernel/lockdep.c
83240index e16c45b..6f49c48 100644
83241--- a/kernel/lockdep.c
83242+++ b/kernel/lockdep.c
83243@@ -596,6 +596,10 @@ static int static_obj(void *obj)
83244 end = (unsigned long) &_end,
83245 addr = (unsigned long) obj;
83246
83247+#ifdef CONFIG_PAX_KERNEXEC
83248+ start = ktla_ktva(start);
83249+#endif
83250+
83251 /*
83252 * static variable?
83253 */
83254@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
83255 if (!static_obj(lock->key)) {
83256 debug_locks_off();
83257 printk("INFO: trying to register non-static key.\n");
83258+ printk("lock:%pS key:%pS.\n", lock, lock->key);
83259 printk("the code is fine but needs lockdep annotation.\n");
83260 printk("turning off the locking correctness validator.\n");
83261 dump_stack();
83262@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
83263 if (!class)
83264 return 0;
83265 }
83266- atomic_inc((atomic_t *)&class->ops);
83267+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
83268 if (very_verbose(class)) {
83269 printk("\nacquire class [%p] %s", class->key, class->name);
83270 if (class->name_version > 1)
83271diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
83272index b2c71c5..7b88d63 100644
83273--- a/kernel/lockdep_proc.c
83274+++ b/kernel/lockdep_proc.c
83275@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
83276 return 0;
83277 }
83278
83279- seq_printf(m, "%p", class->key);
83280+ seq_printf(m, "%pK", class->key);
83281 #ifdef CONFIG_DEBUG_LOCKDEP
83282 seq_printf(m, " OPS:%8ld", class->ops);
83283 #endif
83284@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
83285
83286 list_for_each_entry(entry, &class->locks_after, entry) {
83287 if (entry->distance == 1) {
83288- seq_printf(m, " -> [%p] ", entry->class->key);
83289+ seq_printf(m, " -> [%pK] ", entry->class->key);
83290 print_name(m, entry->class);
83291 seq_puts(m, "\n");
83292 }
83293@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
83294 if (!class->key)
83295 continue;
83296
83297- seq_printf(m, "[%p] ", class->key);
83298+ seq_printf(m, "[%pK] ", class->key);
83299 print_name(m, class);
83300 seq_puts(m, "\n");
83301 }
83302@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
83303 if (!i)
83304 seq_line(m, '-', 40-namelen, namelen);
83305
83306- snprintf(ip, sizeof(ip), "[<%p>]",
83307+ snprintf(ip, sizeof(ip), "[<%pK>]",
83308 (void *)class->contention_point[i]);
83309 seq_printf(m, "%40s %14lu %29s %pS\n",
83310 name, stats->contention_point[i],
83311@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
83312 if (!i)
83313 seq_line(m, '-', 40-namelen, namelen);
83314
83315- snprintf(ip, sizeof(ip), "[<%p>]",
83316+ snprintf(ip, sizeof(ip), "[<%pK>]",
83317 (void *)class->contending_point[i]);
83318 seq_printf(m, "%40s %14lu %29s %pS\n",
83319 name, stats->contending_point[i],
83320diff --git a/kernel/module.c b/kernel/module.c
83321index dc58274..3ddfa55 100644
83322--- a/kernel/module.c
83323+++ b/kernel/module.c
83324@@ -61,6 +61,7 @@
83325 #include <linux/pfn.h>
83326 #include <linux/bsearch.h>
83327 #include <linux/fips.h>
83328+#include <linux/grsecurity.h>
83329 #include <uapi/linux/module.h>
83330 #include "module-internal.h"
83331
83332@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
83333
83334 /* Bounds of module allocation, for speeding __module_address.
83335 * Protected by module_mutex. */
83336-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
83337+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
83338+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
83339
83340 int register_module_notifier(struct notifier_block * nb)
83341 {
83342@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
83343 return true;
83344
83345 list_for_each_entry_rcu(mod, &modules, list) {
83346- struct symsearch arr[] = {
83347+ struct symsearch modarr[] = {
83348 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
83349 NOT_GPL_ONLY, false },
83350 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
83351@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
83352 if (mod->state == MODULE_STATE_UNFORMED)
83353 continue;
83354
83355- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
83356+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
83357 return true;
83358 }
83359 return false;
83360@@ -491,7 +493,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
83361 if (!pcpusec->sh_size)
83362 return 0;
83363
83364- if (align > PAGE_SIZE) {
83365+ if (align-1 >= PAGE_SIZE) {
83366 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
83367 mod->name, align, PAGE_SIZE);
83368 align = PAGE_SIZE;
83369@@ -1097,7 +1099,7 @@ struct module_attribute module_uevent =
83370 static ssize_t show_coresize(struct module_attribute *mattr,
83371 struct module_kobject *mk, char *buffer)
83372 {
83373- return sprintf(buffer, "%u\n", mk->mod->core_size);
83374+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
83375 }
83376
83377 static struct module_attribute modinfo_coresize =
83378@@ -1106,7 +1108,7 @@ static struct module_attribute modinfo_coresize =
83379 static ssize_t show_initsize(struct module_attribute *mattr,
83380 struct module_kobject *mk, char *buffer)
83381 {
83382- return sprintf(buffer, "%u\n", mk->mod->init_size);
83383+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
83384 }
83385
83386 static struct module_attribute modinfo_initsize =
83387@@ -1321,7 +1323,7 @@ resolve_symbol_wait(struct module *mod,
83388 */
83389 #ifdef CONFIG_SYSFS
83390
83391-#ifdef CONFIG_KALLSYMS
83392+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83393 static inline bool sect_empty(const Elf_Shdr *sect)
83394 {
83395 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
83396@@ -1461,7 +1463,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
83397 {
83398 unsigned int notes, loaded, i;
83399 struct module_notes_attrs *notes_attrs;
83400- struct bin_attribute *nattr;
83401+ bin_attribute_no_const *nattr;
83402
83403 /* failed to create section attributes, so can't create notes */
83404 if (!mod->sect_attrs)
83405@@ -1573,7 +1575,7 @@ static void del_usage_links(struct module *mod)
83406 static int module_add_modinfo_attrs(struct module *mod)
83407 {
83408 struct module_attribute *attr;
83409- struct module_attribute *temp_attr;
83410+ module_attribute_no_const *temp_attr;
83411 int error = 0;
83412 int i;
83413
83414@@ -1795,21 +1797,21 @@ static void set_section_ro_nx(void *base,
83415
83416 static void unset_module_core_ro_nx(struct module *mod)
83417 {
83418- set_page_attributes(mod->module_core + mod->core_text_size,
83419- mod->module_core + mod->core_size,
83420+ set_page_attributes(mod->module_core_rw,
83421+ mod->module_core_rw + mod->core_size_rw,
83422 set_memory_x);
83423- set_page_attributes(mod->module_core,
83424- mod->module_core + mod->core_ro_size,
83425+ set_page_attributes(mod->module_core_rx,
83426+ mod->module_core_rx + mod->core_size_rx,
83427 set_memory_rw);
83428 }
83429
83430 static void unset_module_init_ro_nx(struct module *mod)
83431 {
83432- set_page_attributes(mod->module_init + mod->init_text_size,
83433- mod->module_init + mod->init_size,
83434+ set_page_attributes(mod->module_init_rw,
83435+ mod->module_init_rw + mod->init_size_rw,
83436 set_memory_x);
83437- set_page_attributes(mod->module_init,
83438- mod->module_init + mod->init_ro_size,
83439+ set_page_attributes(mod->module_init_rx,
83440+ mod->module_init_rx + mod->init_size_rx,
83441 set_memory_rw);
83442 }
83443
83444@@ -1822,14 +1824,14 @@ void set_all_modules_text_rw(void)
83445 list_for_each_entry_rcu(mod, &modules, list) {
83446 if (mod->state == MODULE_STATE_UNFORMED)
83447 continue;
83448- if ((mod->module_core) && (mod->core_text_size)) {
83449- set_page_attributes(mod->module_core,
83450- mod->module_core + mod->core_text_size,
83451+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
83452+ set_page_attributes(mod->module_core_rx,
83453+ mod->module_core_rx + mod->core_size_rx,
83454 set_memory_rw);
83455 }
83456- if ((mod->module_init) && (mod->init_text_size)) {
83457- set_page_attributes(mod->module_init,
83458- mod->module_init + mod->init_text_size,
83459+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
83460+ set_page_attributes(mod->module_init_rx,
83461+ mod->module_init_rx + mod->init_size_rx,
83462 set_memory_rw);
83463 }
83464 }
83465@@ -1845,14 +1847,14 @@ void set_all_modules_text_ro(void)
83466 list_for_each_entry_rcu(mod, &modules, list) {
83467 if (mod->state == MODULE_STATE_UNFORMED)
83468 continue;
83469- if ((mod->module_core) && (mod->core_text_size)) {
83470- set_page_attributes(mod->module_core,
83471- mod->module_core + mod->core_text_size,
83472+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
83473+ set_page_attributes(mod->module_core_rx,
83474+ mod->module_core_rx + mod->core_size_rx,
83475 set_memory_ro);
83476 }
83477- if ((mod->module_init) && (mod->init_text_size)) {
83478- set_page_attributes(mod->module_init,
83479- mod->module_init + mod->init_text_size,
83480+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
83481+ set_page_attributes(mod->module_init_rx,
83482+ mod->module_init_rx + mod->init_size_rx,
83483 set_memory_ro);
83484 }
83485 }
83486@@ -1903,16 +1905,19 @@ static void free_module(struct module *mod)
83487
83488 /* This may be NULL, but that's OK */
83489 unset_module_init_ro_nx(mod);
83490- module_free(mod, mod->module_init);
83491+ module_free(mod, mod->module_init_rw);
83492+ module_free_exec(mod, mod->module_init_rx);
83493 kfree(mod->args);
83494 percpu_modfree(mod);
83495
83496 /* Free lock-classes: */
83497- lockdep_free_key_range(mod->module_core, mod->core_size);
83498+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
83499+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
83500
83501 /* Finally, free the core (containing the module structure) */
83502 unset_module_core_ro_nx(mod);
83503- module_free(mod, mod->module_core);
83504+ module_free_exec(mod, mod->module_core_rx);
83505+ module_free(mod, mod->module_core_rw);
83506
83507 #ifdef CONFIG_MPU
83508 update_protections(current->mm);
83509@@ -1982,9 +1987,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
83510 int ret = 0;
83511 const struct kernel_symbol *ksym;
83512
83513+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83514+ int is_fs_load = 0;
83515+ int register_filesystem_found = 0;
83516+ char *p;
83517+
83518+ p = strstr(mod->args, "grsec_modharden_fs");
83519+ if (p) {
83520+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
83521+ /* copy \0 as well */
83522+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
83523+ is_fs_load = 1;
83524+ }
83525+#endif
83526+
83527 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
83528 const char *name = info->strtab + sym[i].st_name;
83529
83530+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83531+ /* it's a real shame this will never get ripped and copied
83532+ upstream! ;(
83533+ */
83534+ if (is_fs_load && !strcmp(name, "register_filesystem"))
83535+ register_filesystem_found = 1;
83536+#endif
83537+
83538 switch (sym[i].st_shndx) {
83539 case SHN_COMMON:
83540 /* We compiled with -fno-common. These are not
83541@@ -2005,7 +2032,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
83542 ksym = resolve_symbol_wait(mod, info, name);
83543 /* Ok if resolved. */
83544 if (ksym && !IS_ERR(ksym)) {
83545+ pax_open_kernel();
83546 sym[i].st_value = ksym->value;
83547+ pax_close_kernel();
83548 break;
83549 }
83550
83551@@ -2024,11 +2053,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
83552 secbase = (unsigned long)mod_percpu(mod);
83553 else
83554 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
83555+ pax_open_kernel();
83556 sym[i].st_value += secbase;
83557+ pax_close_kernel();
83558 break;
83559 }
83560 }
83561
83562+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83563+ if (is_fs_load && !register_filesystem_found) {
83564+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
83565+ ret = -EPERM;
83566+ }
83567+#endif
83568+
83569 return ret;
83570 }
83571
83572@@ -2112,22 +2150,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
83573 || s->sh_entsize != ~0UL
83574 || strstarts(sname, ".init"))
83575 continue;
83576- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
83577+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
83578+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
83579+ else
83580+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
83581 pr_debug("\t%s\n", sname);
83582 }
83583- switch (m) {
83584- case 0: /* executable */
83585- mod->core_size = debug_align(mod->core_size);
83586- mod->core_text_size = mod->core_size;
83587- break;
83588- case 1: /* RO: text and ro-data */
83589- mod->core_size = debug_align(mod->core_size);
83590- mod->core_ro_size = mod->core_size;
83591- break;
83592- case 3: /* whole core */
83593- mod->core_size = debug_align(mod->core_size);
83594- break;
83595- }
83596 }
83597
83598 pr_debug("Init section allocation order:\n");
83599@@ -2141,23 +2169,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
83600 || s->sh_entsize != ~0UL
83601 || !strstarts(sname, ".init"))
83602 continue;
83603- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
83604- | INIT_OFFSET_MASK);
83605+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
83606+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
83607+ else
83608+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
83609+ s->sh_entsize |= INIT_OFFSET_MASK;
83610 pr_debug("\t%s\n", sname);
83611 }
83612- switch (m) {
83613- case 0: /* executable */
83614- mod->init_size = debug_align(mod->init_size);
83615- mod->init_text_size = mod->init_size;
83616- break;
83617- case 1: /* RO: text and ro-data */
83618- mod->init_size = debug_align(mod->init_size);
83619- mod->init_ro_size = mod->init_size;
83620- break;
83621- case 3: /* whole init */
83622- mod->init_size = debug_align(mod->init_size);
83623- break;
83624- }
83625 }
83626 }
83627
83628@@ -2330,7 +2348,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
83629
83630 /* Put symbol section at end of init part of module. */
83631 symsect->sh_flags |= SHF_ALLOC;
83632- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
83633+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
83634 info->index.sym) | INIT_OFFSET_MASK;
83635 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
83636
83637@@ -2347,13 +2365,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
83638 }
83639
83640 /* Append room for core symbols at end of core part. */
83641- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
83642- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
83643- mod->core_size += strtab_size;
83644+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
83645+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
83646+ mod->core_size_rx += strtab_size;
83647
83648 /* Put string table section at end of init part of module. */
83649 strsect->sh_flags |= SHF_ALLOC;
83650- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
83651+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
83652 info->index.str) | INIT_OFFSET_MASK;
83653 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
83654 }
83655@@ -2371,12 +2389,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
83656 /* Make sure we get permanent strtab: don't use info->strtab. */
83657 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
83658
83659+ pax_open_kernel();
83660+
83661 /* Set types up while we still have access to sections. */
83662 for (i = 0; i < mod->num_symtab; i++)
83663 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
83664
83665- mod->core_symtab = dst = mod->module_core + info->symoffs;
83666- mod->core_strtab = s = mod->module_core + info->stroffs;
83667+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
83668+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
83669 src = mod->symtab;
83670 for (ndst = i = 0; i < mod->num_symtab; i++) {
83671 if (i == 0 ||
83672@@ -2388,6 +2408,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
83673 }
83674 }
83675 mod->core_num_syms = ndst;
83676+
83677+ pax_close_kernel();
83678 }
83679 #else
83680 static inline void layout_symtab(struct module *mod, struct load_info *info)
83681@@ -2421,17 +2443,33 @@ void * __weak module_alloc(unsigned long size)
83682 return vmalloc_exec(size);
83683 }
83684
83685-static void *module_alloc_update_bounds(unsigned long size)
83686+static void *module_alloc_update_bounds_rw(unsigned long size)
83687 {
83688 void *ret = module_alloc(size);
83689
83690 if (ret) {
83691 mutex_lock(&module_mutex);
83692 /* Update module bounds. */
83693- if ((unsigned long)ret < module_addr_min)
83694- module_addr_min = (unsigned long)ret;
83695- if ((unsigned long)ret + size > module_addr_max)
83696- module_addr_max = (unsigned long)ret + size;
83697+ if ((unsigned long)ret < module_addr_min_rw)
83698+ module_addr_min_rw = (unsigned long)ret;
83699+ if ((unsigned long)ret + size > module_addr_max_rw)
83700+ module_addr_max_rw = (unsigned long)ret + size;
83701+ mutex_unlock(&module_mutex);
83702+ }
83703+ return ret;
83704+}
83705+
83706+static void *module_alloc_update_bounds_rx(unsigned long size)
83707+{
83708+ void *ret = module_alloc_exec(size);
83709+
83710+ if (ret) {
83711+ mutex_lock(&module_mutex);
83712+ /* Update module bounds. */
83713+ if ((unsigned long)ret < module_addr_min_rx)
83714+ module_addr_min_rx = (unsigned long)ret;
83715+ if ((unsigned long)ret + size > module_addr_max_rx)
83716+ module_addr_max_rx = (unsigned long)ret + size;
83717 mutex_unlock(&module_mutex);
83718 }
83719 return ret;
83720@@ -2706,8 +2744,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
83721 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
83722 {
83723 const char *modmagic = get_modinfo(info, "vermagic");
83724+ const char *license = get_modinfo(info, "license");
83725 int err;
83726
83727+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
83728+ if (!license || !license_is_gpl_compatible(license))
83729+ return -ENOEXEC;
83730+#endif
83731+
83732 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
83733 modmagic = NULL;
83734
83735@@ -2733,7 +2777,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
83736 }
83737
83738 /* Set up license info based on the info section */
83739- set_license(mod, get_modinfo(info, "license"));
83740+ set_license(mod, license);
83741
83742 return 0;
83743 }
83744@@ -2814,7 +2858,7 @@ static int move_module(struct module *mod, struct load_info *info)
83745 void *ptr;
83746
83747 /* Do the allocs. */
83748- ptr = module_alloc_update_bounds(mod->core_size);
83749+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
83750 /*
83751 * The pointer to this block is stored in the module structure
83752 * which is inside the block. Just mark it as not being a
83753@@ -2824,11 +2868,11 @@ static int move_module(struct module *mod, struct load_info *info)
83754 if (!ptr)
83755 return -ENOMEM;
83756
83757- memset(ptr, 0, mod->core_size);
83758- mod->module_core = ptr;
83759+ memset(ptr, 0, mod->core_size_rw);
83760+ mod->module_core_rw = ptr;
83761
83762- if (mod->init_size) {
83763- ptr = module_alloc_update_bounds(mod->init_size);
83764+ if (mod->init_size_rw) {
83765+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
83766 /*
83767 * The pointer to this block is stored in the module structure
83768 * which is inside the block. This block doesn't need to be
83769@@ -2837,13 +2881,45 @@ static int move_module(struct module *mod, struct load_info *info)
83770 */
83771 kmemleak_ignore(ptr);
83772 if (!ptr) {
83773- module_free(mod, mod->module_core);
83774+ module_free(mod, mod->module_core_rw);
83775 return -ENOMEM;
83776 }
83777- memset(ptr, 0, mod->init_size);
83778- mod->module_init = ptr;
83779+ memset(ptr, 0, mod->init_size_rw);
83780+ mod->module_init_rw = ptr;
83781 } else
83782- mod->module_init = NULL;
83783+ mod->module_init_rw = NULL;
83784+
83785+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
83786+ kmemleak_not_leak(ptr);
83787+ if (!ptr) {
83788+ if (mod->module_init_rw)
83789+ module_free(mod, mod->module_init_rw);
83790+ module_free(mod, mod->module_core_rw);
83791+ return -ENOMEM;
83792+ }
83793+
83794+ pax_open_kernel();
83795+ memset(ptr, 0, mod->core_size_rx);
83796+ pax_close_kernel();
83797+ mod->module_core_rx = ptr;
83798+
83799+ if (mod->init_size_rx) {
83800+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
83801+ kmemleak_ignore(ptr);
83802+ if (!ptr && mod->init_size_rx) {
83803+ module_free_exec(mod, mod->module_core_rx);
83804+ if (mod->module_init_rw)
83805+ module_free(mod, mod->module_init_rw);
83806+ module_free(mod, mod->module_core_rw);
83807+ return -ENOMEM;
83808+ }
83809+
83810+ pax_open_kernel();
83811+ memset(ptr, 0, mod->init_size_rx);
83812+ pax_close_kernel();
83813+ mod->module_init_rx = ptr;
83814+ } else
83815+ mod->module_init_rx = NULL;
83816
83817 /* Transfer each section which specifies SHF_ALLOC */
83818 pr_debug("final section addresses:\n");
83819@@ -2854,16 +2930,45 @@ static int move_module(struct module *mod, struct load_info *info)
83820 if (!(shdr->sh_flags & SHF_ALLOC))
83821 continue;
83822
83823- if (shdr->sh_entsize & INIT_OFFSET_MASK)
83824- dest = mod->module_init
83825- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
83826- else
83827- dest = mod->module_core + shdr->sh_entsize;
83828+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
83829+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
83830+ dest = mod->module_init_rw
83831+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
83832+ else
83833+ dest = mod->module_init_rx
83834+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
83835+ } else {
83836+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
83837+ dest = mod->module_core_rw + shdr->sh_entsize;
83838+ else
83839+ dest = mod->module_core_rx + shdr->sh_entsize;
83840+ }
83841+
83842+ if (shdr->sh_type != SHT_NOBITS) {
83843+
83844+#ifdef CONFIG_PAX_KERNEXEC
83845+#ifdef CONFIG_X86_64
83846+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
83847+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
83848+#endif
83849+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
83850+ pax_open_kernel();
83851+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
83852+ pax_close_kernel();
83853+ } else
83854+#endif
83855
83856- if (shdr->sh_type != SHT_NOBITS)
83857 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
83858+ }
83859 /* Update sh_addr to point to copy in image. */
83860- shdr->sh_addr = (unsigned long)dest;
83861+
83862+#ifdef CONFIG_PAX_KERNEXEC
83863+ if (shdr->sh_flags & SHF_EXECINSTR)
83864+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
83865+ else
83866+#endif
83867+
83868+ shdr->sh_addr = (unsigned long)dest;
83869 pr_debug("\t0x%lx %s\n",
83870 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
83871 }
83872@@ -2920,12 +3025,12 @@ static void flush_module_icache(const struct module *mod)
83873 * Do it before processing of module parameters, so the module
83874 * can provide parameter accessor functions of its own.
83875 */
83876- if (mod->module_init)
83877- flush_icache_range((unsigned long)mod->module_init,
83878- (unsigned long)mod->module_init
83879- + mod->init_size);
83880- flush_icache_range((unsigned long)mod->module_core,
83881- (unsigned long)mod->module_core + mod->core_size);
83882+ if (mod->module_init_rx)
83883+ flush_icache_range((unsigned long)mod->module_init_rx,
83884+ (unsigned long)mod->module_init_rx
83885+ + mod->init_size_rx);
83886+ flush_icache_range((unsigned long)mod->module_core_rx,
83887+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
83888
83889 set_fs(old_fs);
83890 }
83891@@ -2982,8 +3087,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
83892 static void module_deallocate(struct module *mod, struct load_info *info)
83893 {
83894 percpu_modfree(mod);
83895- module_free(mod, mod->module_init);
83896- module_free(mod, mod->module_core);
83897+ module_free_exec(mod, mod->module_init_rx);
83898+ module_free_exec(mod, mod->module_core_rx);
83899+ module_free(mod, mod->module_init_rw);
83900+ module_free(mod, mod->module_core_rw);
83901 }
83902
83903 int __weak module_finalize(const Elf_Ehdr *hdr,
83904@@ -2996,7 +3103,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
83905 static int post_relocation(struct module *mod, const struct load_info *info)
83906 {
83907 /* Sort exception table now relocations are done. */
83908+ pax_open_kernel();
83909 sort_extable(mod->extable, mod->extable + mod->num_exentries);
83910+ pax_close_kernel();
83911
83912 /* Copy relocated percpu area over. */
83913 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
83914@@ -3050,16 +3159,16 @@ static int do_init_module(struct module *mod)
83915 MODULE_STATE_COMING, mod);
83916
83917 /* Set RO and NX regions for core */
83918- set_section_ro_nx(mod->module_core,
83919- mod->core_text_size,
83920- mod->core_ro_size,
83921- mod->core_size);
83922+ set_section_ro_nx(mod->module_core_rx,
83923+ mod->core_size_rx,
83924+ mod->core_size_rx,
83925+ mod->core_size_rx);
83926
83927 /* Set RO and NX regions for init */
83928- set_section_ro_nx(mod->module_init,
83929- mod->init_text_size,
83930- mod->init_ro_size,
83931- mod->init_size);
83932+ set_section_ro_nx(mod->module_init_rx,
83933+ mod->init_size_rx,
83934+ mod->init_size_rx,
83935+ mod->init_size_rx);
83936
83937 do_mod_ctors(mod);
83938 /* Start the module */
83939@@ -3121,11 +3230,12 @@ static int do_init_module(struct module *mod)
83940 mod->strtab = mod->core_strtab;
83941 #endif
83942 unset_module_init_ro_nx(mod);
83943- module_free(mod, mod->module_init);
83944- mod->module_init = NULL;
83945- mod->init_size = 0;
83946- mod->init_ro_size = 0;
83947- mod->init_text_size = 0;
83948+ module_free(mod, mod->module_init_rw);
83949+ module_free_exec(mod, mod->module_init_rx);
83950+ mod->module_init_rw = NULL;
83951+ mod->module_init_rx = NULL;
83952+ mod->init_size_rw = 0;
83953+ mod->init_size_rx = 0;
83954 mutex_unlock(&module_mutex);
83955 wake_up_all(&module_wq);
83956
83957@@ -3269,9 +3379,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
83958 if (err)
83959 goto free_unload;
83960
83961+ /* Now copy in args */
83962+ mod->args = strndup_user(uargs, ~0UL >> 1);
83963+ if (IS_ERR(mod->args)) {
83964+ err = PTR_ERR(mod->args);
83965+ goto free_unload;
83966+ }
83967+
83968 /* Set up MODINFO_ATTR fields */
83969 setup_modinfo(mod, info);
83970
83971+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83972+ {
83973+ char *p, *p2;
83974+
83975+ if (strstr(mod->args, "grsec_modharden_netdev")) {
83976+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
83977+ err = -EPERM;
83978+ goto free_modinfo;
83979+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
83980+ p += sizeof("grsec_modharden_normal") - 1;
83981+ p2 = strstr(p, "_");
83982+ if (p2) {
83983+ *p2 = '\0';
83984+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
83985+ *p2 = '_';
83986+ }
83987+ err = -EPERM;
83988+ goto free_modinfo;
83989+ }
83990+ }
83991+#endif
83992+
83993 /* Fix up syms, so that st_value is a pointer to location. */
83994 err = simplify_symbols(mod, info);
83995 if (err < 0)
83996@@ -3287,13 +3426,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
83997
83998 flush_module_icache(mod);
83999
84000- /* Now copy in args */
84001- mod->args = strndup_user(uargs, ~0UL >> 1);
84002- if (IS_ERR(mod->args)) {
84003- err = PTR_ERR(mod->args);
84004- goto free_arch_cleanup;
84005- }
84006-
84007 dynamic_debug_setup(info->debug, info->num_debug);
84008
84009 /* Finally it's fully formed, ready to start executing. */
84010@@ -3328,11 +3460,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
84011 ddebug_cleanup:
84012 dynamic_debug_remove(info->debug);
84013 synchronize_sched();
84014- kfree(mod->args);
84015- free_arch_cleanup:
84016 module_arch_cleanup(mod);
84017 free_modinfo:
84018 free_modinfo(mod);
84019+ kfree(mod->args);
84020 free_unload:
84021 module_unload_free(mod);
84022 unlink_mod:
84023@@ -3415,10 +3546,16 @@ static const char *get_ksymbol(struct module *mod,
84024 unsigned long nextval;
84025
84026 /* At worse, next value is at end of module */
84027- if (within_module_init(addr, mod))
84028- nextval = (unsigned long)mod->module_init+mod->init_text_size;
84029+ if (within_module_init_rx(addr, mod))
84030+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
84031+ else if (within_module_init_rw(addr, mod))
84032+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
84033+ else if (within_module_core_rx(addr, mod))
84034+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
84035+ else if (within_module_core_rw(addr, mod))
84036+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
84037 else
84038- nextval = (unsigned long)mod->module_core+mod->core_text_size;
84039+ return NULL;
84040
84041 /* Scan for closest preceding symbol, and next symbol. (ELF
84042 starts real symbols at 1). */
84043@@ -3669,7 +3806,7 @@ static int m_show(struct seq_file *m, void *p)
84044 return 0;
84045
84046 seq_printf(m, "%s %u",
84047- mod->name, mod->init_size + mod->core_size);
84048+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
84049 print_unload_info(m, mod);
84050
84051 /* Informative for users. */
84052@@ -3678,7 +3815,7 @@ static int m_show(struct seq_file *m, void *p)
84053 mod->state == MODULE_STATE_COMING ? "Loading":
84054 "Live");
84055 /* Used by oprofile and other similar tools. */
84056- seq_printf(m, " 0x%pK", mod->module_core);
84057+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
84058
84059 /* Taints info */
84060 if (mod->taints)
84061@@ -3714,7 +3851,17 @@ static const struct file_operations proc_modules_operations = {
84062
84063 static int __init proc_modules_init(void)
84064 {
84065+#ifndef CONFIG_GRKERNSEC_HIDESYM
84066+#ifdef CONFIG_GRKERNSEC_PROC_USER
84067+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
84068+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84069+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
84070+#else
84071 proc_create("modules", 0, NULL, &proc_modules_operations);
84072+#endif
84073+#else
84074+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
84075+#endif
84076 return 0;
84077 }
84078 module_init(proc_modules_init);
84079@@ -3775,14 +3922,14 @@ struct module *__module_address(unsigned long addr)
84080 {
84081 struct module *mod;
84082
84083- if (addr < module_addr_min || addr > module_addr_max)
84084+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
84085+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
84086 return NULL;
84087
84088 list_for_each_entry_rcu(mod, &modules, list) {
84089 if (mod->state == MODULE_STATE_UNFORMED)
84090 continue;
84091- if (within_module_core(addr, mod)
84092- || within_module_init(addr, mod))
84093+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
84094 return mod;
84095 }
84096 return NULL;
84097@@ -3817,11 +3964,20 @@ bool is_module_text_address(unsigned long addr)
84098 */
84099 struct module *__module_text_address(unsigned long addr)
84100 {
84101- struct module *mod = __module_address(addr);
84102+ struct module *mod;
84103+
84104+#ifdef CONFIG_X86_32
84105+ addr = ktla_ktva(addr);
84106+#endif
84107+
84108+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
84109+ return NULL;
84110+
84111+ mod = __module_address(addr);
84112+
84113 if (mod) {
84114 /* Make sure it's within the text section. */
84115- if (!within(addr, mod->module_init, mod->init_text_size)
84116- && !within(addr, mod->module_core, mod->core_text_size))
84117+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
84118 mod = NULL;
84119 }
84120 return mod;
84121diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
84122index 7e3443f..b2a1e6b 100644
84123--- a/kernel/mutex-debug.c
84124+++ b/kernel/mutex-debug.c
84125@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
84126 }
84127
84128 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
84129- struct thread_info *ti)
84130+ struct task_struct *task)
84131 {
84132 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
84133
84134 /* Mark the current thread as blocked on the lock: */
84135- ti->task->blocked_on = waiter;
84136+ task->blocked_on = waiter;
84137 }
84138
84139 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
84140- struct thread_info *ti)
84141+ struct task_struct *task)
84142 {
84143 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
84144- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
84145- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
84146- ti->task->blocked_on = NULL;
84147+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
84148+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
84149+ task->blocked_on = NULL;
84150
84151 list_del_init(&waiter->list);
84152 waiter->task = NULL;
84153diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
84154index 0799fd3..d06ae3b 100644
84155--- a/kernel/mutex-debug.h
84156+++ b/kernel/mutex-debug.h
84157@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
84158 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
84159 extern void debug_mutex_add_waiter(struct mutex *lock,
84160 struct mutex_waiter *waiter,
84161- struct thread_info *ti);
84162+ struct task_struct *task);
84163 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
84164- struct thread_info *ti);
84165+ struct task_struct *task);
84166 extern void debug_mutex_unlock(struct mutex *lock);
84167 extern void debug_mutex_init(struct mutex *lock, const char *name,
84168 struct lock_class_key *key);
84169diff --git a/kernel/mutex.c b/kernel/mutex.c
84170index d24105b..15648eb 100644
84171--- a/kernel/mutex.c
84172+++ b/kernel/mutex.c
84173@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
84174 node->locked = 1;
84175 return;
84176 }
84177- ACCESS_ONCE(prev->next) = node;
84178+ ACCESS_ONCE_RW(prev->next) = node;
84179 smp_wmb();
84180 /* Wait until the lock holder passes the lock down */
84181 while (!ACCESS_ONCE(node->locked))
84182@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
84183 while (!(next = ACCESS_ONCE(node->next)))
84184 arch_mutex_cpu_relax();
84185 }
84186- ACCESS_ONCE(next->locked) = 1;
84187+ ACCESS_ONCE_RW(next->locked) = 1;
84188 smp_wmb();
84189 }
84190
84191@@ -520,7 +520,7 @@ slowpath:
84192 goto skip_wait;
84193
84194 debug_mutex_lock_common(lock, &waiter);
84195- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
84196+ debug_mutex_add_waiter(lock, &waiter, task);
84197
84198 /* add waiting tasks to the end of the waitqueue (FIFO): */
84199 list_add_tail(&waiter.list, &lock->wait_list);
84200@@ -564,7 +564,7 @@ slowpath:
84201 schedule_preempt_disabled();
84202 spin_lock_mutex(&lock->wait_lock, flags);
84203 }
84204- mutex_remove_waiter(lock, &waiter, current_thread_info());
84205+ mutex_remove_waiter(lock, &waiter, task);
84206 /* set it to 0 if there are no waiters left: */
84207 if (likely(list_empty(&lock->wait_list)))
84208 atomic_set(&lock->count, 0);
84209@@ -601,7 +601,7 @@ skip_wait:
84210 return 0;
84211
84212 err:
84213- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
84214+ mutex_remove_waiter(lock, &waiter, task);
84215 spin_unlock_mutex(&lock->wait_lock, flags);
84216 debug_mutex_free_waiter(&waiter);
84217 mutex_release(&lock->dep_map, 1, ip);
84218diff --git a/kernel/notifier.c b/kernel/notifier.c
84219index 2d5cc4c..d9ea600 100644
84220--- a/kernel/notifier.c
84221+++ b/kernel/notifier.c
84222@@ -5,6 +5,7 @@
84223 #include <linux/rcupdate.h>
84224 #include <linux/vmalloc.h>
84225 #include <linux/reboot.h>
84226+#include <linux/mm.h>
84227
84228 /*
84229 * Notifier list for kernel code which wants to be called
84230@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
84231 while ((*nl) != NULL) {
84232 if (n->priority > (*nl)->priority)
84233 break;
84234- nl = &((*nl)->next);
84235+ nl = (struct notifier_block **)&((*nl)->next);
84236 }
84237- n->next = *nl;
84238+ pax_open_kernel();
84239+ *(const void **)&n->next = *nl;
84240 rcu_assign_pointer(*nl, n);
84241+ pax_close_kernel();
84242 return 0;
84243 }
84244
84245@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
84246 return 0;
84247 if (n->priority > (*nl)->priority)
84248 break;
84249- nl = &((*nl)->next);
84250+ nl = (struct notifier_block **)&((*nl)->next);
84251 }
84252- n->next = *nl;
84253+ pax_open_kernel();
84254+ *(const void **)&n->next = *nl;
84255 rcu_assign_pointer(*nl, n);
84256+ pax_close_kernel();
84257 return 0;
84258 }
84259
84260@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
84261 {
84262 while ((*nl) != NULL) {
84263 if ((*nl) == n) {
84264+ pax_open_kernel();
84265 rcu_assign_pointer(*nl, n->next);
84266+ pax_close_kernel();
84267 return 0;
84268 }
84269- nl = &((*nl)->next);
84270+ nl = (struct notifier_block **)&((*nl)->next);
84271 }
84272 return -ENOENT;
84273 }
84274diff --git a/kernel/panic.c b/kernel/panic.c
84275index b6c482c..5578061 100644
84276--- a/kernel/panic.c
84277+++ b/kernel/panic.c
84278@@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
84279 disable_trace_on_warning();
84280
84281 pr_warn("------------[ cut here ]------------\n");
84282- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
84283+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
84284 raw_smp_processor_id(), current->pid, file, line, caller);
84285
84286 if (args)
84287@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
84288 */
84289 void __stack_chk_fail(void)
84290 {
84291- panic("stack-protector: Kernel stack is corrupted in: %p\n",
84292+ dump_stack();
84293+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
84294 __builtin_return_address(0));
84295 }
84296 EXPORT_SYMBOL(__stack_chk_fail);
84297diff --git a/kernel/pid.c b/kernel/pid.c
84298index 9b9a266..c20ef80 100644
84299--- a/kernel/pid.c
84300+++ b/kernel/pid.c
84301@@ -33,6 +33,7 @@
84302 #include <linux/rculist.h>
84303 #include <linux/bootmem.h>
84304 #include <linux/hash.h>
84305+#include <linux/security.h>
84306 #include <linux/pid_namespace.h>
84307 #include <linux/init_task.h>
84308 #include <linux/syscalls.h>
84309@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
84310
84311 int pid_max = PID_MAX_DEFAULT;
84312
84313-#define RESERVED_PIDS 300
84314+#define RESERVED_PIDS 500
84315
84316 int pid_max_min = RESERVED_PIDS + 1;
84317 int pid_max_max = PID_MAX_LIMIT;
84318@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
84319 */
84320 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
84321 {
84322+ struct task_struct *task;
84323+
84324 rcu_lockdep_assert(rcu_read_lock_held(),
84325 "find_task_by_pid_ns() needs rcu_read_lock()"
84326 " protection");
84327- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
84328+
84329+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
84330+
84331+ if (gr_pid_is_chrooted(task))
84332+ return NULL;
84333+
84334+ return task;
84335 }
84336
84337 struct task_struct *find_task_by_vpid(pid_t vnr)
84338@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
84339 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
84340 }
84341
84342+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
84343+{
84344+ rcu_lockdep_assert(rcu_read_lock_held(),
84345+ "find_task_by_pid_ns() needs rcu_read_lock()"
84346+ " protection");
84347+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
84348+}
84349+
84350 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
84351 {
84352 struct pid *pid;
84353diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
84354index 4208655..19f36a5 100644
84355--- a/kernel/pid_namespace.c
84356+++ b/kernel/pid_namespace.c
84357@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
84358 void __user *buffer, size_t *lenp, loff_t *ppos)
84359 {
84360 struct pid_namespace *pid_ns = task_active_pid_ns(current);
84361- struct ctl_table tmp = *table;
84362+ ctl_table_no_const tmp = *table;
84363
84364 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
84365 return -EPERM;
84366diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
84367index c7f31aa..2b44977 100644
84368--- a/kernel/posix-cpu-timers.c
84369+++ b/kernel/posix-cpu-timers.c
84370@@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
84371
84372 static __init int init_posix_cpu_timers(void)
84373 {
84374- struct k_clock process = {
84375+ static struct k_clock process = {
84376 .clock_getres = process_cpu_clock_getres,
84377 .clock_get = process_cpu_clock_get,
84378 .timer_create = process_cpu_timer_create,
84379 .nsleep = process_cpu_nsleep,
84380 .nsleep_restart = process_cpu_nsleep_restart,
84381 };
84382- struct k_clock thread = {
84383+ static struct k_clock thread = {
84384 .clock_getres = thread_cpu_clock_getres,
84385 .clock_get = thread_cpu_clock_get,
84386 .timer_create = thread_cpu_timer_create,
84387diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
84388index 424c2d4..679242f 100644
84389--- a/kernel/posix-timers.c
84390+++ b/kernel/posix-timers.c
84391@@ -43,6 +43,7 @@
84392 #include <linux/hash.h>
84393 #include <linux/posix-clock.h>
84394 #include <linux/posix-timers.h>
84395+#include <linux/grsecurity.h>
84396 #include <linux/syscalls.h>
84397 #include <linux/wait.h>
84398 #include <linux/workqueue.h>
84399@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
84400 * which we beg off on and pass to do_sys_settimeofday().
84401 */
84402
84403-static struct k_clock posix_clocks[MAX_CLOCKS];
84404+static struct k_clock *posix_clocks[MAX_CLOCKS];
84405
84406 /*
84407 * These ones are defined below.
84408@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
84409 */
84410 static __init int init_posix_timers(void)
84411 {
84412- struct k_clock clock_realtime = {
84413+ static struct k_clock clock_realtime = {
84414 .clock_getres = hrtimer_get_res,
84415 .clock_get = posix_clock_realtime_get,
84416 .clock_set = posix_clock_realtime_set,
84417@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
84418 .timer_get = common_timer_get,
84419 .timer_del = common_timer_del,
84420 };
84421- struct k_clock clock_monotonic = {
84422+ static struct k_clock clock_monotonic = {
84423 .clock_getres = hrtimer_get_res,
84424 .clock_get = posix_ktime_get_ts,
84425 .nsleep = common_nsleep,
84426@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
84427 .timer_get = common_timer_get,
84428 .timer_del = common_timer_del,
84429 };
84430- struct k_clock clock_monotonic_raw = {
84431+ static struct k_clock clock_monotonic_raw = {
84432 .clock_getres = hrtimer_get_res,
84433 .clock_get = posix_get_monotonic_raw,
84434 };
84435- struct k_clock clock_realtime_coarse = {
84436+ static struct k_clock clock_realtime_coarse = {
84437 .clock_getres = posix_get_coarse_res,
84438 .clock_get = posix_get_realtime_coarse,
84439 };
84440- struct k_clock clock_monotonic_coarse = {
84441+ static struct k_clock clock_monotonic_coarse = {
84442 .clock_getres = posix_get_coarse_res,
84443 .clock_get = posix_get_monotonic_coarse,
84444 };
84445- struct k_clock clock_tai = {
84446+ static struct k_clock clock_tai = {
84447 .clock_getres = hrtimer_get_res,
84448 .clock_get = posix_get_tai,
84449 .nsleep = common_nsleep,
84450@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
84451 .timer_get = common_timer_get,
84452 .timer_del = common_timer_del,
84453 };
84454- struct k_clock clock_boottime = {
84455+ static struct k_clock clock_boottime = {
84456 .clock_getres = hrtimer_get_res,
84457 .clock_get = posix_get_boottime,
84458 .nsleep = common_nsleep,
84459@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
84460 return;
84461 }
84462
84463- posix_clocks[clock_id] = *new_clock;
84464+ posix_clocks[clock_id] = new_clock;
84465 }
84466 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
84467
84468@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
84469 return (id & CLOCKFD_MASK) == CLOCKFD ?
84470 &clock_posix_dynamic : &clock_posix_cpu;
84471
84472- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
84473+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
84474 return NULL;
84475- return &posix_clocks[id];
84476+ return posix_clocks[id];
84477 }
84478
84479 static int common_timer_create(struct k_itimer *new_timer)
84480@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
84481 struct k_clock *kc = clockid_to_kclock(which_clock);
84482 struct k_itimer *new_timer;
84483 int error, new_timer_id;
84484- sigevent_t event;
84485+ sigevent_t event = { };
84486 int it_id_set = IT_ID_NOT_SET;
84487
84488 if (!kc)
84489@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
84490 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
84491 return -EFAULT;
84492
84493+ /* only the CLOCK_REALTIME clock can be set, all other clocks
84494+ have their clock_set fptr set to a nosettime dummy function
84495+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
84496+ call common_clock_set, which calls do_sys_settimeofday, which
84497+ we hook
84498+ */
84499+
84500 return kc->clock_set(which_clock, &new_tp);
84501 }
84502
84503diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
84504index d444c4e..bc3de51 100644
84505--- a/kernel/power/Kconfig
84506+++ b/kernel/power/Kconfig
84507@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
84508 config HIBERNATION
84509 bool "Hibernation (aka 'suspend to disk')"
84510 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
84511+ depends on !GRKERNSEC_KMEM
84512+ depends on !PAX_MEMORY_SANITIZE
84513 select HIBERNATE_CALLBACKS
84514 select LZO_COMPRESS
84515 select LZO_DECOMPRESS
84516diff --git a/kernel/power/process.c b/kernel/power/process.c
84517index 06ec886..9dba35e 100644
84518--- a/kernel/power/process.c
84519+++ b/kernel/power/process.c
84520@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
84521 unsigned int elapsed_msecs;
84522 bool wakeup = false;
84523 int sleep_usecs = USEC_PER_MSEC;
84524+ bool timedout = false;
84525
84526 do_gettimeofday(&start);
84527
84528@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
84529
84530 while (true) {
84531 todo = 0;
84532+ if (time_after(jiffies, end_time))
84533+ timedout = true;
84534 read_lock(&tasklist_lock);
84535 do_each_thread(g, p) {
84536 if (p == current || !freeze_task(p))
84537 continue;
84538
84539- if (!freezer_should_skip(p))
84540+ if (!freezer_should_skip(p)) {
84541 todo++;
84542+ if (timedout) {
84543+ printk(KERN_ERR "Task refusing to freeze:\n");
84544+ sched_show_task(p);
84545+ }
84546+ }
84547 } while_each_thread(g, p);
84548 read_unlock(&tasklist_lock);
84549
84550@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
84551 todo += wq_busy;
84552 }
84553
84554- if (!todo || time_after(jiffies, end_time))
84555+ if (!todo || timedout)
84556 break;
84557
84558 if (pm_wakeup_pending()) {
84559diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
84560index b4e8500..b457a6c 100644
84561--- a/kernel/printk/printk.c
84562+++ b/kernel/printk/printk.c
84563@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
84564 if (from_file && type != SYSLOG_ACTION_OPEN)
84565 return 0;
84566
84567+#ifdef CONFIG_GRKERNSEC_DMESG
84568+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
84569+ return -EPERM;
84570+#endif
84571+
84572 if (syslog_action_restricted(type)) {
84573 if (capable(CAP_SYSLOG))
84574 return 0;
84575diff --git a/kernel/profile.c b/kernel/profile.c
84576index 6631e1e..310c266 100644
84577--- a/kernel/profile.c
84578+++ b/kernel/profile.c
84579@@ -37,7 +37,7 @@ struct profile_hit {
84580 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
84581 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
84582
84583-static atomic_t *prof_buffer;
84584+static atomic_unchecked_t *prof_buffer;
84585 static unsigned long prof_len, prof_shift;
84586
84587 int prof_on __read_mostly;
84588@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
84589 hits[i].pc = 0;
84590 continue;
84591 }
84592- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
84593+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
84594 hits[i].hits = hits[i].pc = 0;
84595 }
84596 }
84597@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
84598 * Add the current hit(s) and flush the write-queue out
84599 * to the global buffer:
84600 */
84601- atomic_add(nr_hits, &prof_buffer[pc]);
84602+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
84603 for (i = 0; i < NR_PROFILE_HIT; ++i) {
84604- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
84605+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
84606 hits[i].pc = hits[i].hits = 0;
84607 }
84608 out:
84609@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
84610 {
84611 unsigned long pc;
84612 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
84613- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
84614+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
84615 }
84616 #endif /* !CONFIG_SMP */
84617
84618@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
84619 return -EFAULT;
84620 buf++; p++; count--; read++;
84621 }
84622- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
84623+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
84624 if (copy_to_user(buf, (void *)pnt, count))
84625 return -EFAULT;
84626 read += count;
84627@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
84628 }
84629 #endif
84630 profile_discard_flip_buffers();
84631- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
84632+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
84633 return count;
84634 }
84635
84636diff --git a/kernel/ptrace.c b/kernel/ptrace.c
84637index 1f4bcb3..99cf7ab 100644
84638--- a/kernel/ptrace.c
84639+++ b/kernel/ptrace.c
84640@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
84641 if (seize)
84642 flags |= PT_SEIZED;
84643 rcu_read_lock();
84644- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
84645+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
84646 flags |= PT_PTRACE_CAP;
84647 rcu_read_unlock();
84648 task->ptrace = flags;
84649@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
84650 break;
84651 return -EIO;
84652 }
84653- if (copy_to_user(dst, buf, retval))
84654+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
84655 return -EFAULT;
84656 copied += retval;
84657 src += retval;
84658@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
84659 bool seized = child->ptrace & PT_SEIZED;
84660 int ret = -EIO;
84661 siginfo_t siginfo, *si;
84662- void __user *datavp = (void __user *) data;
84663+ void __user *datavp = (__force void __user *) data;
84664 unsigned long __user *datalp = datavp;
84665 unsigned long flags;
84666
84667@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
84668 goto out;
84669 }
84670
84671+ if (gr_handle_ptrace(child, request)) {
84672+ ret = -EPERM;
84673+ goto out_put_task_struct;
84674+ }
84675+
84676 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
84677 ret = ptrace_attach(child, request, addr, data);
84678 /*
84679 * Some architectures need to do book-keeping after
84680 * a ptrace attach.
84681 */
84682- if (!ret)
84683+ if (!ret) {
84684 arch_ptrace_attach(child);
84685+ gr_audit_ptrace(child);
84686+ }
84687 goto out_put_task_struct;
84688 }
84689
84690@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
84691 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
84692 if (copied != sizeof(tmp))
84693 return -EIO;
84694- return put_user(tmp, (unsigned long __user *)data);
84695+ return put_user(tmp, (__force unsigned long __user *)data);
84696 }
84697
84698 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
84699@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
84700 }
84701
84702 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
84703- compat_long_t addr, compat_long_t data)
84704+ compat_ulong_t addr, compat_ulong_t data)
84705 {
84706 struct task_struct *child;
84707 long ret;
84708@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
84709 goto out;
84710 }
84711
84712+ if (gr_handle_ptrace(child, request)) {
84713+ ret = -EPERM;
84714+ goto out_put_task_struct;
84715+ }
84716+
84717 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
84718 ret = ptrace_attach(child, request, addr, data);
84719 /*
84720 * Some architectures need to do book-keeping after
84721 * a ptrace attach.
84722 */
84723- if (!ret)
84724+ if (!ret) {
84725 arch_ptrace_attach(child);
84726+ gr_audit_ptrace(child);
84727+ }
84728 goto out_put_task_struct;
84729 }
84730
84731diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
84732index b02a339..ce2d20c 100644
84733--- a/kernel/rcupdate.c
84734+++ b/kernel/rcupdate.c
84735@@ -312,10 +312,10 @@ int rcu_jiffies_till_stall_check(void)
84736 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
84737 */
84738 if (till_stall_check < 3) {
84739- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
84740+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
84741 till_stall_check = 3;
84742 } else if (till_stall_check > 300) {
84743- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
84744+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
84745 till_stall_check = 300;
84746 }
84747 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
84748diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
84749index 9ed6075..c9e8a63 100644
84750--- a/kernel/rcutiny.c
84751+++ b/kernel/rcutiny.c
84752@@ -45,7 +45,7 @@
84753 /* Forward declarations for rcutiny_plugin.h. */
84754 struct rcu_ctrlblk;
84755 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
84756-static void rcu_process_callbacks(struct softirq_action *unused);
84757+static void rcu_process_callbacks(void);
84758 static void __call_rcu(struct rcu_head *head,
84759 void (*func)(struct rcu_head *rcu),
84760 struct rcu_ctrlblk *rcp);
84761@@ -309,7 +309,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
84762 false));
84763 }
84764
84765-static void rcu_process_callbacks(struct softirq_action *unused)
84766+static __latent_entropy void rcu_process_callbacks(void)
84767 {
84768 __rcu_process_callbacks(&rcu_sched_ctrlblk);
84769 __rcu_process_callbacks(&rcu_bh_ctrlblk);
84770diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
84771index be63101..05cf721 100644
84772--- a/kernel/rcutorture.c
84773+++ b/kernel/rcutorture.c
84774@@ -170,12 +170,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
84775 { 0 };
84776 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
84777 { 0 };
84778-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
84779-static atomic_t n_rcu_torture_alloc;
84780-static atomic_t n_rcu_torture_alloc_fail;
84781-static atomic_t n_rcu_torture_free;
84782-static atomic_t n_rcu_torture_mberror;
84783-static atomic_t n_rcu_torture_error;
84784+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
84785+static atomic_unchecked_t n_rcu_torture_alloc;
84786+static atomic_unchecked_t n_rcu_torture_alloc_fail;
84787+static atomic_unchecked_t n_rcu_torture_free;
84788+static atomic_unchecked_t n_rcu_torture_mberror;
84789+static atomic_unchecked_t n_rcu_torture_error;
84790 static long n_rcu_torture_barrier_error;
84791 static long n_rcu_torture_boost_ktrerror;
84792 static long n_rcu_torture_boost_rterror;
84793@@ -293,11 +293,11 @@ rcu_torture_alloc(void)
84794
84795 spin_lock_bh(&rcu_torture_lock);
84796 if (list_empty(&rcu_torture_freelist)) {
84797- atomic_inc(&n_rcu_torture_alloc_fail);
84798+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
84799 spin_unlock_bh(&rcu_torture_lock);
84800 return NULL;
84801 }
84802- atomic_inc(&n_rcu_torture_alloc);
84803+ atomic_inc_unchecked(&n_rcu_torture_alloc);
84804 p = rcu_torture_freelist.next;
84805 list_del_init(p);
84806 spin_unlock_bh(&rcu_torture_lock);
84807@@ -310,7 +310,7 @@ rcu_torture_alloc(void)
84808 static void
84809 rcu_torture_free(struct rcu_torture *p)
84810 {
84811- atomic_inc(&n_rcu_torture_free);
84812+ atomic_inc_unchecked(&n_rcu_torture_free);
84813 spin_lock_bh(&rcu_torture_lock);
84814 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
84815 spin_unlock_bh(&rcu_torture_lock);
84816@@ -431,7 +431,7 @@ rcu_torture_cb(struct rcu_head *p)
84817 i = rp->rtort_pipe_count;
84818 if (i > RCU_TORTURE_PIPE_LEN)
84819 i = RCU_TORTURE_PIPE_LEN;
84820- atomic_inc(&rcu_torture_wcount[i]);
84821+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
84822 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
84823 rp->rtort_mbtest = 0;
84824 rcu_torture_free(rp);
84825@@ -821,7 +821,7 @@ rcu_torture_writer(void *arg)
84826 i = old_rp->rtort_pipe_count;
84827 if (i > RCU_TORTURE_PIPE_LEN)
84828 i = RCU_TORTURE_PIPE_LEN;
84829- atomic_inc(&rcu_torture_wcount[i]);
84830+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
84831 old_rp->rtort_pipe_count++;
84832 if (gp_normal == gp_exp)
84833 exp = !!(rcu_random(&rand) & 0x80);
84834@@ -839,7 +839,7 @@ rcu_torture_writer(void *arg)
84835 i = rp->rtort_pipe_count;
84836 if (i > RCU_TORTURE_PIPE_LEN)
84837 i = RCU_TORTURE_PIPE_LEN;
84838- atomic_inc(&rcu_torture_wcount[i]);
84839+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
84840 if (++rp->rtort_pipe_count >=
84841 RCU_TORTURE_PIPE_LEN) {
84842 rp->rtort_mbtest = 0;
84843@@ -938,7 +938,7 @@ static void rcu_torture_timer(unsigned long unused)
84844 return;
84845 }
84846 if (p->rtort_mbtest == 0)
84847- atomic_inc(&n_rcu_torture_mberror);
84848+ atomic_inc_unchecked(&n_rcu_torture_mberror);
84849 spin_lock(&rand_lock);
84850 cur_ops->read_delay(&rand);
84851 n_rcu_torture_timers++;
84852@@ -1008,7 +1008,7 @@ rcu_torture_reader(void *arg)
84853 continue;
84854 }
84855 if (p->rtort_mbtest == 0)
84856- atomic_inc(&n_rcu_torture_mberror);
84857+ atomic_inc_unchecked(&n_rcu_torture_mberror);
84858 cur_ops->read_delay(&rand);
84859 preempt_disable();
84860 pipe_count = p->rtort_pipe_count;
84861@@ -1071,11 +1071,11 @@ rcu_torture_printk(char *page)
84862 rcu_torture_current,
84863 rcu_torture_current_version,
84864 list_empty(&rcu_torture_freelist),
84865- atomic_read(&n_rcu_torture_alloc),
84866- atomic_read(&n_rcu_torture_alloc_fail),
84867- atomic_read(&n_rcu_torture_free));
84868+ atomic_read_unchecked(&n_rcu_torture_alloc),
84869+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
84870+ atomic_read_unchecked(&n_rcu_torture_free));
84871 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
84872- atomic_read(&n_rcu_torture_mberror),
84873+ atomic_read_unchecked(&n_rcu_torture_mberror),
84874 n_rcu_torture_boost_ktrerror,
84875 n_rcu_torture_boost_rterror);
84876 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
84877@@ -1094,14 +1094,14 @@ rcu_torture_printk(char *page)
84878 n_barrier_attempts,
84879 n_rcu_torture_barrier_error);
84880 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
84881- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
84882+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
84883 n_rcu_torture_barrier_error != 0 ||
84884 n_rcu_torture_boost_ktrerror != 0 ||
84885 n_rcu_torture_boost_rterror != 0 ||
84886 n_rcu_torture_boost_failure != 0 ||
84887 i > 1) {
84888 cnt += sprintf(&page[cnt], "!!! ");
84889- atomic_inc(&n_rcu_torture_error);
84890+ atomic_inc_unchecked(&n_rcu_torture_error);
84891 WARN_ON_ONCE(1);
84892 }
84893 cnt += sprintf(&page[cnt], "Reader Pipe: ");
84894@@ -1115,7 +1115,7 @@ rcu_torture_printk(char *page)
84895 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
84896 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
84897 cnt += sprintf(&page[cnt], " %d",
84898- atomic_read(&rcu_torture_wcount[i]));
84899+ atomic_read_unchecked(&rcu_torture_wcount[i]));
84900 }
84901 cnt += sprintf(&page[cnt], "\n");
84902 if (cur_ops->stats)
84903@@ -1830,7 +1830,7 @@ rcu_torture_cleanup(void)
84904
84905 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
84906
84907- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
84908+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
84909 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
84910 else if (n_online_successes != n_online_attempts ||
84911 n_offline_successes != n_offline_attempts)
84912@@ -1952,18 +1952,18 @@ rcu_torture_init(void)
84913
84914 rcu_torture_current = NULL;
84915 rcu_torture_current_version = 0;
84916- atomic_set(&n_rcu_torture_alloc, 0);
84917- atomic_set(&n_rcu_torture_alloc_fail, 0);
84918- atomic_set(&n_rcu_torture_free, 0);
84919- atomic_set(&n_rcu_torture_mberror, 0);
84920- atomic_set(&n_rcu_torture_error, 0);
84921+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
84922+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
84923+ atomic_set_unchecked(&n_rcu_torture_free, 0);
84924+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
84925+ atomic_set_unchecked(&n_rcu_torture_error, 0);
84926 n_rcu_torture_barrier_error = 0;
84927 n_rcu_torture_boost_ktrerror = 0;
84928 n_rcu_torture_boost_rterror = 0;
84929 n_rcu_torture_boost_failure = 0;
84930 n_rcu_torture_boosts = 0;
84931 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
84932- atomic_set(&rcu_torture_wcount[i], 0);
84933+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
84934 for_each_possible_cpu(cpu) {
84935 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
84936 per_cpu(rcu_torture_count, cpu)[i] = 0;
84937diff --git a/kernel/rcutree.c b/kernel/rcutree.c
84938index 32618b3..c1fb822 100644
84939--- a/kernel/rcutree.c
84940+++ b/kernel/rcutree.c
84941@@ -382,9 +382,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
84942 rcu_prepare_for_idle(smp_processor_id());
84943 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
84944 smp_mb__before_atomic_inc(); /* See above. */
84945- atomic_inc(&rdtp->dynticks);
84946+ atomic_inc_unchecked(&rdtp->dynticks);
84947 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
84948- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
84949+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
84950
84951 /*
84952 * It is illegal to enter an extended quiescent state while
84953@@ -501,10 +501,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
84954 int user)
84955 {
84956 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
84957- atomic_inc(&rdtp->dynticks);
84958+ atomic_inc_unchecked(&rdtp->dynticks);
84959 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
84960 smp_mb__after_atomic_inc(); /* See above. */
84961- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
84962+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
84963 rcu_cleanup_after_idle(smp_processor_id());
84964 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
84965 if (!user && !is_idle_task(current)) {
84966@@ -623,14 +623,14 @@ void rcu_nmi_enter(void)
84967 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
84968
84969 if (rdtp->dynticks_nmi_nesting == 0 &&
84970- (atomic_read(&rdtp->dynticks) & 0x1))
84971+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
84972 return;
84973 rdtp->dynticks_nmi_nesting++;
84974 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
84975- atomic_inc(&rdtp->dynticks);
84976+ atomic_inc_unchecked(&rdtp->dynticks);
84977 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
84978 smp_mb__after_atomic_inc(); /* See above. */
84979- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
84980+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
84981 }
84982
84983 /**
84984@@ -649,9 +649,9 @@ void rcu_nmi_exit(void)
84985 return;
84986 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
84987 smp_mb__before_atomic_inc(); /* See above. */
84988- atomic_inc(&rdtp->dynticks);
84989+ atomic_inc_unchecked(&rdtp->dynticks);
84990 smp_mb__after_atomic_inc(); /* Force delay to next write. */
84991- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
84992+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
84993 }
84994
84995 /**
84996@@ -665,7 +665,7 @@ int rcu_is_cpu_idle(void)
84997 int ret;
84998
84999 preempt_disable();
85000- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
85001+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
85002 preempt_enable();
85003 return ret;
85004 }
85005@@ -734,7 +734,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
85006 static int dyntick_save_progress_counter(struct rcu_data *rdp,
85007 bool *isidle, unsigned long *maxj)
85008 {
85009- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
85010+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
85011 rcu_sysidle_check_cpu(rdp, isidle, maxj);
85012 return (rdp->dynticks_snap & 0x1) == 0;
85013 }
85014@@ -751,7 +751,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
85015 unsigned int curr;
85016 unsigned int snap;
85017
85018- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
85019+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
85020 snap = (unsigned int)rdp->dynticks_snap;
85021
85022 /*
85023@@ -1341,9 +1341,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
85024 rdp = this_cpu_ptr(rsp->rda);
85025 rcu_preempt_check_blocked_tasks(rnp);
85026 rnp->qsmask = rnp->qsmaskinit;
85027- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
85028+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
85029 WARN_ON_ONCE(rnp->completed != rsp->completed);
85030- ACCESS_ONCE(rnp->completed) = rsp->completed;
85031+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
85032 if (rnp == rdp->mynode)
85033 __note_gp_changes(rsp, rnp, rdp);
85034 rcu_preempt_boost_start_gp(rnp);
85035@@ -1434,7 +1434,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
85036 */
85037 rcu_for_each_node_breadth_first(rsp, rnp) {
85038 raw_spin_lock_irq(&rnp->lock);
85039- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
85040+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
85041 rdp = this_cpu_ptr(rsp->rda);
85042 if (rnp == rdp->mynode)
85043 __note_gp_changes(rsp, rnp, rdp);
85044@@ -1766,7 +1766,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
85045 rsp->qlen += rdp->qlen;
85046 rdp->n_cbs_orphaned += rdp->qlen;
85047 rdp->qlen_lazy = 0;
85048- ACCESS_ONCE(rdp->qlen) = 0;
85049+ ACCESS_ONCE_RW(rdp->qlen) = 0;
85050 }
85051
85052 /*
85053@@ -2012,7 +2012,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
85054 }
85055 smp_mb(); /* List handling before counting for rcu_barrier(). */
85056 rdp->qlen_lazy -= count_lazy;
85057- ACCESS_ONCE(rdp->qlen) -= count;
85058+ ACCESS_ONCE_RW(rdp->qlen) -= count;
85059 rdp->n_cbs_invoked += count;
85060
85061 /* Reinstate batch limit if we have worked down the excess. */
85062@@ -2209,7 +2209,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
85063 /*
85064 * Do RCU core processing for the current CPU.
85065 */
85066-static void rcu_process_callbacks(struct softirq_action *unused)
85067+static __latent_entropy void rcu_process_callbacks(void)
85068 {
85069 struct rcu_state *rsp;
85070
85071@@ -2316,7 +2316,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
85072 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
85073 if (debug_rcu_head_queue(head)) {
85074 /* Probable double call_rcu(), so leak the callback. */
85075- ACCESS_ONCE(head->func) = rcu_leak_callback;
85076+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
85077 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
85078 return;
85079 }
85080@@ -2344,7 +2344,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
85081 local_irq_restore(flags);
85082 return;
85083 }
85084- ACCESS_ONCE(rdp->qlen)++;
85085+ ACCESS_ONCE_RW(rdp->qlen)++;
85086 if (lazy)
85087 rdp->qlen_lazy++;
85088 else
85089@@ -2553,11 +2553,11 @@ void synchronize_sched_expedited(void)
85090 * counter wrap on a 32-bit system. Quite a few more CPUs would of
85091 * course be required on a 64-bit system.
85092 */
85093- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
85094+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
85095 (ulong)atomic_long_read(&rsp->expedited_done) +
85096 ULONG_MAX / 8)) {
85097 synchronize_sched();
85098- atomic_long_inc(&rsp->expedited_wrap);
85099+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
85100 return;
85101 }
85102
85103@@ -2565,7 +2565,7 @@ void synchronize_sched_expedited(void)
85104 * Take a ticket. Note that atomic_inc_return() implies a
85105 * full memory barrier.
85106 */
85107- snap = atomic_long_inc_return(&rsp->expedited_start);
85108+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
85109 firstsnap = snap;
85110 get_online_cpus();
85111 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
85112@@ -2578,14 +2578,14 @@ void synchronize_sched_expedited(void)
85113 synchronize_sched_expedited_cpu_stop,
85114 NULL) == -EAGAIN) {
85115 put_online_cpus();
85116- atomic_long_inc(&rsp->expedited_tryfail);
85117+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
85118
85119 /* Check to see if someone else did our work for us. */
85120 s = atomic_long_read(&rsp->expedited_done);
85121 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
85122 /* ensure test happens before caller kfree */
85123 smp_mb__before_atomic_inc(); /* ^^^ */
85124- atomic_long_inc(&rsp->expedited_workdone1);
85125+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
85126 return;
85127 }
85128
85129@@ -2594,7 +2594,7 @@ void synchronize_sched_expedited(void)
85130 udelay(trycount * num_online_cpus());
85131 } else {
85132 wait_rcu_gp(call_rcu_sched);
85133- atomic_long_inc(&rsp->expedited_normal);
85134+ atomic_long_inc_unchecked(&rsp->expedited_normal);
85135 return;
85136 }
85137
85138@@ -2603,7 +2603,7 @@ void synchronize_sched_expedited(void)
85139 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
85140 /* ensure test happens before caller kfree */
85141 smp_mb__before_atomic_inc(); /* ^^^ */
85142- atomic_long_inc(&rsp->expedited_workdone2);
85143+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
85144 return;
85145 }
85146
85147@@ -2615,10 +2615,10 @@ void synchronize_sched_expedited(void)
85148 * period works for us.
85149 */
85150 get_online_cpus();
85151- snap = atomic_long_read(&rsp->expedited_start);
85152+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
85153 smp_mb(); /* ensure read is before try_stop_cpus(). */
85154 }
85155- atomic_long_inc(&rsp->expedited_stoppedcpus);
85156+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
85157
85158 /*
85159 * Everyone up to our most recent fetch is covered by our grace
85160@@ -2627,16 +2627,16 @@ void synchronize_sched_expedited(void)
85161 * than we did already did their update.
85162 */
85163 do {
85164- atomic_long_inc(&rsp->expedited_done_tries);
85165+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
85166 s = atomic_long_read(&rsp->expedited_done);
85167 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
85168 /* ensure test happens before caller kfree */
85169 smp_mb__before_atomic_inc(); /* ^^^ */
85170- atomic_long_inc(&rsp->expedited_done_lost);
85171+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
85172 break;
85173 }
85174 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
85175- atomic_long_inc(&rsp->expedited_done_exit);
85176+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
85177
85178 put_online_cpus();
85179 }
85180@@ -2829,7 +2829,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
85181 * ACCESS_ONCE() to prevent the compiler from speculating
85182 * the increment to precede the early-exit check.
85183 */
85184- ACCESS_ONCE(rsp->n_barrier_done)++;
85185+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
85186 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
85187 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
85188 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
85189@@ -2879,7 +2879,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
85190
85191 /* Increment ->n_barrier_done to prevent duplicate work. */
85192 smp_mb(); /* Keep increment after above mechanism. */
85193- ACCESS_ONCE(rsp->n_barrier_done)++;
85194+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
85195 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
85196 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
85197 smp_mb(); /* Keep increment before caller's subsequent code. */
85198@@ -2924,10 +2924,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
85199 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
85200 init_callback_list(rdp);
85201 rdp->qlen_lazy = 0;
85202- ACCESS_ONCE(rdp->qlen) = 0;
85203+ ACCESS_ONCE_RW(rdp->qlen) = 0;
85204 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
85205 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
85206- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
85207+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
85208 rdp->cpu = cpu;
85209 rdp->rsp = rsp;
85210 rcu_boot_init_nocb_percpu_data(rdp);
85211@@ -2961,8 +2961,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
85212 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
85213 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
85214 rcu_sysidle_init_percpu_data(rdp->dynticks);
85215- atomic_set(&rdp->dynticks->dynticks,
85216- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
85217+ atomic_set_unchecked(&rdp->dynticks->dynticks,
85218+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
85219 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
85220
85221 /* Add CPU to rcu_node bitmasks. */
85222diff --git a/kernel/rcutree.h b/kernel/rcutree.h
85223index 5f97eab..db8f687 100644
85224--- a/kernel/rcutree.h
85225+++ b/kernel/rcutree.h
85226@@ -87,11 +87,11 @@ struct rcu_dynticks {
85227 long long dynticks_nesting; /* Track irq/process nesting level. */
85228 /* Process level is worth LLONG_MAX/2. */
85229 int dynticks_nmi_nesting; /* Track NMI nesting level. */
85230- atomic_t dynticks; /* Even value for idle, else odd. */
85231+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
85232 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
85233 long long dynticks_idle_nesting;
85234 /* irq/process nesting level from idle. */
85235- atomic_t dynticks_idle; /* Even value for idle, else odd. */
85236+ atomic_unchecked_t dynticks_idle; /* Even value for idle, else odd. */
85237 /* "Idle" excludes userspace execution. */
85238 unsigned long dynticks_idle_jiffies;
85239 /* End of last non-NMI non-idle period. */
85240@@ -427,17 +427,17 @@ struct rcu_state {
85241 /* _rcu_barrier(). */
85242 /* End of fields guarded by barrier_mutex. */
85243
85244- atomic_long_t expedited_start; /* Starting ticket. */
85245- atomic_long_t expedited_done; /* Done ticket. */
85246- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
85247- atomic_long_t expedited_tryfail; /* # acquisition failures. */
85248- atomic_long_t expedited_workdone1; /* # done by others #1. */
85249- atomic_long_t expedited_workdone2; /* # done by others #2. */
85250- atomic_long_t expedited_normal; /* # fallbacks to normal. */
85251- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
85252- atomic_long_t expedited_done_tries; /* # tries to update _done. */
85253- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
85254- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
85255+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
85256+ atomic_long_t expedited_done; /* Done ticket. */
85257+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
85258+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
85259+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
85260+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
85261+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
85262+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
85263+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
85264+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
85265+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
85266
85267 unsigned long jiffies_force_qs; /* Time at which to invoke */
85268 /* force_quiescent_state(). */
85269diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
85270index 130c97b..bcbe7f7 100644
85271--- a/kernel/rcutree_plugin.h
85272+++ b/kernel/rcutree_plugin.h
85273@@ -744,7 +744,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
85274 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
85275 {
85276 return !rcu_preempted_readers_exp(rnp) &&
85277- ACCESS_ONCE(rnp->expmask) == 0;
85278+ ACCESS_ONCE_RW(rnp->expmask) == 0;
85279 }
85280
85281 /*
85282@@ -900,7 +900,7 @@ void synchronize_rcu_expedited(void)
85283
85284 /* Clean up and exit. */
85285 smp_mb(); /* ensure expedited GP seen before counter increment. */
85286- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
85287+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
85288 unlock_mb_ret:
85289 mutex_unlock(&sync_rcu_preempt_exp_mutex);
85290 mb_ret:
85291@@ -1474,7 +1474,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
85292 free_cpumask_var(cm);
85293 }
85294
85295-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
85296+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
85297 .store = &rcu_cpu_kthread_task,
85298 .thread_should_run = rcu_cpu_kthread_should_run,
85299 .thread_fn = rcu_cpu_kthread,
85300@@ -1939,7 +1939,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
85301 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
85302 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
85303 cpu, ticks_value, ticks_title,
85304- atomic_read(&rdtp->dynticks) & 0xfff,
85305+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
85306 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
85307 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
85308 fast_no_hz);
85309@@ -2102,7 +2102,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
85310
85311 /* Enqueue the callback on the nocb list and update counts. */
85312 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
85313- ACCESS_ONCE(*old_rhpp) = rhp;
85314+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
85315 atomic_long_add(rhcount, &rdp->nocb_q_count);
85316 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
85317
85318@@ -2242,12 +2242,12 @@ static int rcu_nocb_kthread(void *arg)
85319 * Extract queued callbacks, update counts, and wait
85320 * for a grace period to elapse.
85321 */
85322- ACCESS_ONCE(rdp->nocb_head) = NULL;
85323+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
85324 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
85325 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
85326 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
85327- ACCESS_ONCE(rdp->nocb_p_count) += c;
85328- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
85329+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
85330+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
85331 rcu_nocb_wait_gp(rdp);
85332
85333 /* Each pass through the following loop invokes a callback. */
85334@@ -2269,8 +2269,8 @@ static int rcu_nocb_kthread(void *arg)
85335 list = next;
85336 }
85337 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
85338- ACCESS_ONCE(rdp->nocb_p_count) -= c;
85339- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
85340+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
85341+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
85342 rdp->n_nocbs_invoked += c;
85343 }
85344 return 0;
85345@@ -2297,7 +2297,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
85346 t = kthread_run(rcu_nocb_kthread, rdp,
85347 "rcuo%c/%d", rsp->abbr, cpu);
85348 BUG_ON(IS_ERR(t));
85349- ACCESS_ONCE(rdp->nocb_kthread) = t;
85350+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
85351 }
85352 }
85353
85354@@ -2423,11 +2423,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
85355
85356 /* Record start of fully idle period. */
85357 j = jiffies;
85358- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
85359+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
85360 smp_mb__before_atomic_inc();
85361- atomic_inc(&rdtp->dynticks_idle);
85362+ atomic_inc_unchecked(&rdtp->dynticks_idle);
85363 smp_mb__after_atomic_inc();
85364- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
85365+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
85366 }
85367
85368 /*
85369@@ -2492,9 +2492,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
85370
85371 /* Record end of idle period. */
85372 smp_mb__before_atomic_inc();
85373- atomic_inc(&rdtp->dynticks_idle);
85374+ atomic_inc_unchecked(&rdtp->dynticks_idle);
85375 smp_mb__after_atomic_inc();
85376- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
85377+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
85378
85379 /*
85380 * If we are the timekeeping CPU, we are permitted to be non-idle
85381@@ -2535,7 +2535,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
85382 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
85383
85384 /* Pick up current idle and NMI-nesting counter and check. */
85385- cur = atomic_read(&rdtp->dynticks_idle);
85386+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
85387 if (cur & 0x1) {
85388 *isidle = false; /* We are not idle! */
85389 return;
85390@@ -2598,7 +2598,7 @@ static void rcu_sysidle(unsigned long j)
85391 case RCU_SYSIDLE_NOT:
85392
85393 /* First time all are idle, so note a short idle period. */
85394- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
85395+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
85396 break;
85397
85398 case RCU_SYSIDLE_SHORT:
85399@@ -2635,7 +2635,7 @@ static void rcu_sysidle(unsigned long j)
85400 static void rcu_sysidle_cancel(void)
85401 {
85402 smp_mb();
85403- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
85404+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
85405 }
85406
85407 /*
85408@@ -2683,7 +2683,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
85409 smp_mb(); /* grace period precedes setting inuse. */
85410
85411 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
85412- ACCESS_ONCE(rshp->inuse) = 0;
85413+ ACCESS_ONCE_RW(rshp->inuse) = 0;
85414 }
85415
85416 /*
85417diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
85418index cf6c174..a8f4b50 100644
85419--- a/kernel/rcutree_trace.c
85420+++ b/kernel/rcutree_trace.c
85421@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
85422 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
85423 rdp->passed_quiesce, rdp->qs_pending);
85424 seq_printf(m, " dt=%d/%llx/%d df=%lu",
85425- atomic_read(&rdp->dynticks->dynticks),
85426+ atomic_read_unchecked(&rdp->dynticks->dynticks),
85427 rdp->dynticks->dynticks_nesting,
85428 rdp->dynticks->dynticks_nmi_nesting,
85429 rdp->dynticks_fqs);
85430@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
85431 struct rcu_state *rsp = (struct rcu_state *)m->private;
85432
85433 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
85434- atomic_long_read(&rsp->expedited_start),
85435+ atomic_long_read_unchecked(&rsp->expedited_start),
85436 atomic_long_read(&rsp->expedited_done),
85437- atomic_long_read(&rsp->expedited_wrap),
85438- atomic_long_read(&rsp->expedited_tryfail),
85439- atomic_long_read(&rsp->expedited_workdone1),
85440- atomic_long_read(&rsp->expedited_workdone2),
85441- atomic_long_read(&rsp->expedited_normal),
85442- atomic_long_read(&rsp->expedited_stoppedcpus),
85443- atomic_long_read(&rsp->expedited_done_tries),
85444- atomic_long_read(&rsp->expedited_done_lost),
85445- atomic_long_read(&rsp->expedited_done_exit));
85446+ atomic_long_read_unchecked(&rsp->expedited_wrap),
85447+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
85448+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
85449+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
85450+ atomic_long_read_unchecked(&rsp->expedited_normal),
85451+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
85452+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
85453+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
85454+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
85455 return 0;
85456 }
85457
85458diff --git a/kernel/resource.c b/kernel/resource.c
85459index 3f285dc..5755f62 100644
85460--- a/kernel/resource.c
85461+++ b/kernel/resource.c
85462@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
85463
85464 static int __init ioresources_init(void)
85465 {
85466+#ifdef CONFIG_GRKERNSEC_PROC_ADD
85467+#ifdef CONFIG_GRKERNSEC_PROC_USER
85468+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
85469+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
85470+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85471+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
85472+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
85473+#endif
85474+#else
85475 proc_create("ioports", 0, NULL, &proc_ioports_operations);
85476 proc_create("iomem", 0, NULL, &proc_iomem_operations);
85477+#endif
85478 return 0;
85479 }
85480 __initcall(ioresources_init);
85481diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
85482index 1d96dd0..994ff19 100644
85483--- a/kernel/rtmutex-tester.c
85484+++ b/kernel/rtmutex-tester.c
85485@@ -22,7 +22,7 @@
85486 #define MAX_RT_TEST_MUTEXES 8
85487
85488 static spinlock_t rttest_lock;
85489-static atomic_t rttest_event;
85490+static atomic_unchecked_t rttest_event;
85491
85492 struct test_thread_data {
85493 int opcode;
85494@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85495
85496 case RTTEST_LOCKCONT:
85497 td->mutexes[td->opdata] = 1;
85498- td->event = atomic_add_return(1, &rttest_event);
85499+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85500 return 0;
85501
85502 case RTTEST_RESET:
85503@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85504 return 0;
85505
85506 case RTTEST_RESETEVENT:
85507- atomic_set(&rttest_event, 0);
85508+ atomic_set_unchecked(&rttest_event, 0);
85509 return 0;
85510
85511 default:
85512@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85513 return ret;
85514
85515 td->mutexes[id] = 1;
85516- td->event = atomic_add_return(1, &rttest_event);
85517+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85518 rt_mutex_lock(&mutexes[id]);
85519- td->event = atomic_add_return(1, &rttest_event);
85520+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85521 td->mutexes[id] = 4;
85522 return 0;
85523
85524@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85525 return ret;
85526
85527 td->mutexes[id] = 1;
85528- td->event = atomic_add_return(1, &rttest_event);
85529+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85530 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
85531- td->event = atomic_add_return(1, &rttest_event);
85532+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85533 td->mutexes[id] = ret ? 0 : 4;
85534 return ret ? -EINTR : 0;
85535
85536@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85537 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
85538 return ret;
85539
85540- td->event = atomic_add_return(1, &rttest_event);
85541+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85542 rt_mutex_unlock(&mutexes[id]);
85543- td->event = atomic_add_return(1, &rttest_event);
85544+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85545 td->mutexes[id] = 0;
85546 return 0;
85547
85548@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85549 break;
85550
85551 td->mutexes[dat] = 2;
85552- td->event = atomic_add_return(1, &rttest_event);
85553+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85554 break;
85555
85556 default:
85557@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85558 return;
85559
85560 td->mutexes[dat] = 3;
85561- td->event = atomic_add_return(1, &rttest_event);
85562+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85563 break;
85564
85565 case RTTEST_LOCKNOWAIT:
85566@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85567 return;
85568
85569 td->mutexes[dat] = 1;
85570- td->event = atomic_add_return(1, &rttest_event);
85571+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85572 return;
85573
85574 default:
85575diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
85576index 4a07353..66b5291 100644
85577--- a/kernel/sched/auto_group.c
85578+++ b/kernel/sched/auto_group.c
85579@@ -11,7 +11,7 @@
85580
85581 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
85582 static struct autogroup autogroup_default;
85583-static atomic_t autogroup_seq_nr;
85584+static atomic_unchecked_t autogroup_seq_nr;
85585
85586 void __init autogroup_init(struct task_struct *init_task)
85587 {
85588@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
85589
85590 kref_init(&ag->kref);
85591 init_rwsem(&ag->lock);
85592- ag->id = atomic_inc_return(&autogroup_seq_nr);
85593+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
85594 ag->tg = tg;
85595 #ifdef CONFIG_RT_GROUP_SCHED
85596 /*
85597diff --git a/kernel/sched/core.c b/kernel/sched/core.c
85598index 5ac63c9..d912786 100644
85599--- a/kernel/sched/core.c
85600+++ b/kernel/sched/core.c
85601@@ -2868,7 +2868,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
85602 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
85603 * or number of jiffies left till timeout) if completed.
85604 */
85605-long __sched
85606+long __sched __intentional_overflow(-1)
85607 wait_for_completion_interruptible_timeout(struct completion *x,
85608 unsigned long timeout)
85609 {
85610@@ -2885,7 +2885,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
85611 *
85612 * Return: -ERESTARTSYS if interrupted, 0 if completed.
85613 */
85614-int __sched wait_for_completion_killable(struct completion *x)
85615+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
85616 {
85617 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
85618 if (t == -ERESTARTSYS)
85619@@ -2906,7 +2906,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
85620 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
85621 * or number of jiffies left till timeout) if completed.
85622 */
85623-long __sched
85624+long __sched __intentional_overflow(-1)
85625 wait_for_completion_killable_timeout(struct completion *x,
85626 unsigned long timeout)
85627 {
85628@@ -3132,6 +3132,8 @@ int can_nice(const struct task_struct *p, const int nice)
85629 /* convert nice value [19,-20] to rlimit style value [1,40] */
85630 int nice_rlim = 20 - nice;
85631
85632+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
85633+
85634 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
85635 capable(CAP_SYS_NICE));
85636 }
85637@@ -3165,7 +3167,8 @@ SYSCALL_DEFINE1(nice, int, increment)
85638 if (nice > 19)
85639 nice = 19;
85640
85641- if (increment < 0 && !can_nice(current, nice))
85642+ if (increment < 0 && (!can_nice(current, nice) ||
85643+ gr_handle_chroot_nice()))
85644 return -EPERM;
85645
85646 retval = security_task_setnice(current, nice);
85647@@ -3327,6 +3330,7 @@ recheck:
85648 unsigned long rlim_rtprio =
85649 task_rlimit(p, RLIMIT_RTPRIO);
85650
85651+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
85652 /* can't set/change the rt policy */
85653 if (policy != p->policy && !rlim_rtprio)
85654 return -EPERM;
85655@@ -4456,7 +4460,7 @@ static void migrate_tasks(unsigned int dead_cpu)
85656
85657 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
85658
85659-static struct ctl_table sd_ctl_dir[] = {
85660+static ctl_table_no_const sd_ctl_dir[] __read_only = {
85661 {
85662 .procname = "sched_domain",
85663 .mode = 0555,
85664@@ -4473,17 +4477,17 @@ static struct ctl_table sd_ctl_root[] = {
85665 {}
85666 };
85667
85668-static struct ctl_table *sd_alloc_ctl_entry(int n)
85669+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
85670 {
85671- struct ctl_table *entry =
85672+ ctl_table_no_const *entry =
85673 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
85674
85675 return entry;
85676 }
85677
85678-static void sd_free_ctl_entry(struct ctl_table **tablep)
85679+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
85680 {
85681- struct ctl_table *entry;
85682+ ctl_table_no_const *entry;
85683
85684 /*
85685 * In the intermediate directories, both the child directory and
85686@@ -4491,22 +4495,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
85687 * will always be set. In the lowest directory the names are
85688 * static strings and all have proc handlers.
85689 */
85690- for (entry = *tablep; entry->mode; entry++) {
85691- if (entry->child)
85692- sd_free_ctl_entry(&entry->child);
85693+ for (entry = tablep; entry->mode; entry++) {
85694+ if (entry->child) {
85695+ sd_free_ctl_entry(entry->child);
85696+ pax_open_kernel();
85697+ entry->child = NULL;
85698+ pax_close_kernel();
85699+ }
85700 if (entry->proc_handler == NULL)
85701 kfree(entry->procname);
85702 }
85703
85704- kfree(*tablep);
85705- *tablep = NULL;
85706+ kfree(tablep);
85707 }
85708
85709 static int min_load_idx = 0;
85710 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
85711
85712 static void
85713-set_table_entry(struct ctl_table *entry,
85714+set_table_entry(ctl_table_no_const *entry,
85715 const char *procname, void *data, int maxlen,
85716 umode_t mode, proc_handler *proc_handler,
85717 bool load_idx)
85718@@ -4526,7 +4533,7 @@ set_table_entry(struct ctl_table *entry,
85719 static struct ctl_table *
85720 sd_alloc_ctl_domain_table(struct sched_domain *sd)
85721 {
85722- struct ctl_table *table = sd_alloc_ctl_entry(13);
85723+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
85724
85725 if (table == NULL)
85726 return NULL;
85727@@ -4561,9 +4568,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
85728 return table;
85729 }
85730
85731-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
85732+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
85733 {
85734- struct ctl_table *entry, *table;
85735+ ctl_table_no_const *entry, *table;
85736 struct sched_domain *sd;
85737 int domain_num = 0, i;
85738 char buf[32];
85739@@ -4590,11 +4597,13 @@ static struct ctl_table_header *sd_sysctl_header;
85740 static void register_sched_domain_sysctl(void)
85741 {
85742 int i, cpu_num = num_possible_cpus();
85743- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
85744+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
85745 char buf[32];
85746
85747 WARN_ON(sd_ctl_dir[0].child);
85748+ pax_open_kernel();
85749 sd_ctl_dir[0].child = entry;
85750+ pax_close_kernel();
85751
85752 if (entry == NULL)
85753 return;
85754@@ -4617,8 +4626,12 @@ static void unregister_sched_domain_sysctl(void)
85755 if (sd_sysctl_header)
85756 unregister_sysctl_table(sd_sysctl_header);
85757 sd_sysctl_header = NULL;
85758- if (sd_ctl_dir[0].child)
85759- sd_free_ctl_entry(&sd_ctl_dir[0].child);
85760+ if (sd_ctl_dir[0].child) {
85761+ sd_free_ctl_entry(sd_ctl_dir[0].child);
85762+ pax_open_kernel();
85763+ sd_ctl_dir[0].child = NULL;
85764+ pax_close_kernel();
85765+ }
85766 }
85767 #else
85768 static void register_sched_domain_sysctl(void)
85769diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
85770index 513fc2f..906a851 100644
85771--- a/kernel/sched/fair.c
85772+++ b/kernel/sched/fair.c
85773@@ -869,7 +869,7 @@ void task_numa_fault(int node, int pages, bool migrated)
85774
85775 static void reset_ptenuma_scan(struct task_struct *p)
85776 {
85777- ACCESS_ONCE(p->mm->numa_scan_seq)++;
85778+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
85779 p->mm->numa_scan_offset = 0;
85780 }
85781
85782@@ -5840,7 +5840,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
85783 * run_rebalance_domains is triggered when needed from the scheduler tick.
85784 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
85785 */
85786-static void run_rebalance_domains(struct softirq_action *h)
85787+static __latent_entropy void run_rebalance_domains(void)
85788 {
85789 int this_cpu = smp_processor_id();
85790 struct rq *this_rq = cpu_rq(this_cpu);
85791diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
85792index b3c5653..a4d192a 100644
85793--- a/kernel/sched/sched.h
85794+++ b/kernel/sched/sched.h
85795@@ -1004,7 +1004,7 @@ struct sched_class {
85796 #ifdef CONFIG_FAIR_GROUP_SCHED
85797 void (*task_move_group) (struct task_struct *p, int on_rq);
85798 #endif
85799-};
85800+} __do_const;
85801
85802 #define sched_class_highest (&stop_sched_class)
85803 #define for_each_class(class) \
85804diff --git a/kernel/signal.c b/kernel/signal.c
85805index ded28b9..6886c08 100644
85806--- a/kernel/signal.c
85807+++ b/kernel/signal.c
85808@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
85809
85810 int print_fatal_signals __read_mostly;
85811
85812-static void __user *sig_handler(struct task_struct *t, int sig)
85813+static __sighandler_t sig_handler(struct task_struct *t, int sig)
85814 {
85815 return t->sighand->action[sig - 1].sa.sa_handler;
85816 }
85817
85818-static int sig_handler_ignored(void __user *handler, int sig)
85819+static int sig_handler_ignored(__sighandler_t handler, int sig)
85820 {
85821 /* Is it explicitly or implicitly ignored? */
85822 return handler == SIG_IGN ||
85823@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
85824
85825 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
85826 {
85827- void __user *handler;
85828+ __sighandler_t handler;
85829
85830 handler = sig_handler(t, sig);
85831
85832@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
85833 atomic_inc(&user->sigpending);
85834 rcu_read_unlock();
85835
85836+ if (!override_rlimit)
85837+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
85838+
85839 if (override_rlimit ||
85840 atomic_read(&user->sigpending) <=
85841 task_rlimit(t, RLIMIT_SIGPENDING)) {
85842@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
85843
85844 int unhandled_signal(struct task_struct *tsk, int sig)
85845 {
85846- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
85847+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
85848 if (is_global_init(tsk))
85849 return 1;
85850 if (handler != SIG_IGN && handler != SIG_DFL)
85851@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
85852 }
85853 }
85854
85855+ /* allow glibc communication via tgkill to other threads in our
85856+ thread group */
85857+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
85858+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
85859+ && gr_handle_signal(t, sig))
85860+ return -EPERM;
85861+
85862 return security_task_kill(t, info, sig, 0);
85863 }
85864
85865@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
85866 return send_signal(sig, info, p, 1);
85867 }
85868
85869-static int
85870+int
85871 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
85872 {
85873 return send_signal(sig, info, t, 0);
85874@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
85875 unsigned long int flags;
85876 int ret, blocked, ignored;
85877 struct k_sigaction *action;
85878+ int is_unhandled = 0;
85879
85880 spin_lock_irqsave(&t->sighand->siglock, flags);
85881 action = &t->sighand->action[sig-1];
85882@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
85883 }
85884 if (action->sa.sa_handler == SIG_DFL)
85885 t->signal->flags &= ~SIGNAL_UNKILLABLE;
85886+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
85887+ is_unhandled = 1;
85888 ret = specific_send_sig_info(sig, info, t);
85889 spin_unlock_irqrestore(&t->sighand->siglock, flags);
85890
85891+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
85892+ normal operation */
85893+ if (is_unhandled) {
85894+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
85895+ gr_handle_crash(t, sig);
85896+ }
85897+
85898 return ret;
85899 }
85900
85901@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
85902 ret = check_kill_permission(sig, info, p);
85903 rcu_read_unlock();
85904
85905- if (!ret && sig)
85906+ if (!ret && sig) {
85907 ret = do_send_sig_info(sig, info, p, true);
85908+ if (!ret)
85909+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
85910+ }
85911
85912 return ret;
85913 }
85914@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
85915 int error = -ESRCH;
85916
85917 rcu_read_lock();
85918- p = find_task_by_vpid(pid);
85919+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
85920+ /* allow glibc communication via tgkill to other threads in our
85921+ thread group */
85922+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
85923+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
85924+ p = find_task_by_vpid_unrestricted(pid);
85925+ else
85926+#endif
85927+ p = find_task_by_vpid(pid);
85928 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
85929 error = check_kill_permission(sig, info, p);
85930 /*
85931@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
85932 }
85933 seg = get_fs();
85934 set_fs(KERNEL_DS);
85935- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
85936- (stack_t __force __user *) &uoss,
85937+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
85938+ (stack_t __force_user *) &uoss,
85939 compat_user_stack_pointer());
85940 set_fs(seg);
85941 if (ret >= 0 && uoss_ptr) {
85942diff --git a/kernel/smpboot.c b/kernel/smpboot.c
85943index eb89e18..a4e6792 100644
85944--- a/kernel/smpboot.c
85945+++ b/kernel/smpboot.c
85946@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
85947 }
85948 smpboot_unpark_thread(plug_thread, cpu);
85949 }
85950- list_add(&plug_thread->list, &hotplug_threads);
85951+ pax_list_add(&plug_thread->list, &hotplug_threads);
85952 out:
85953 mutex_unlock(&smpboot_threads_lock);
85954 return ret;
85955@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
85956 {
85957 get_online_cpus();
85958 mutex_lock(&smpboot_threads_lock);
85959- list_del(&plug_thread->list);
85960+ pax_list_del(&plug_thread->list);
85961 smpboot_destroy_threads(plug_thread);
85962 mutex_unlock(&smpboot_threads_lock);
85963 put_online_cpus();
85964diff --git a/kernel/softirq.c b/kernel/softirq.c
85965index d7d498d..94fe0f7 100644
85966--- a/kernel/softirq.c
85967+++ b/kernel/softirq.c
85968@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
85969 EXPORT_SYMBOL(irq_stat);
85970 #endif
85971
85972-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
85973+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
85974
85975 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
85976
85977-char *softirq_to_name[NR_SOFTIRQS] = {
85978+const char * const softirq_to_name[NR_SOFTIRQS] = {
85979 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
85980 "TASKLET", "SCHED", "HRTIMER", "RCU"
85981 };
85982@@ -248,7 +248,7 @@ restart:
85983 kstat_incr_softirqs_this_cpu(vec_nr);
85984
85985 trace_softirq_entry(vec_nr);
85986- h->action(h);
85987+ h->action();
85988 trace_softirq_exit(vec_nr);
85989 if (unlikely(prev_count != preempt_count())) {
85990 printk(KERN_ERR "huh, entered softirq %u %s %p"
85991@@ -412,7 +412,7 @@ void __raise_softirq_irqoff(unsigned int nr)
85992 or_softirq_pending(1UL << nr);
85993 }
85994
85995-void open_softirq(int nr, void (*action)(struct softirq_action *))
85996+void __init open_softirq(int nr, void (*action)(void))
85997 {
85998 softirq_vec[nr].action = action;
85999 }
86000@@ -468,7 +468,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
86001
86002 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
86003
86004-static void tasklet_action(struct softirq_action *a)
86005+static __latent_entropy void tasklet_action(void)
86006 {
86007 struct tasklet_struct *list;
86008
86009@@ -503,7 +503,7 @@ static void tasklet_action(struct softirq_action *a)
86010 }
86011 }
86012
86013-static void tasklet_hi_action(struct softirq_action *a)
86014+static __latent_entropy void tasklet_hi_action(void)
86015 {
86016 struct tasklet_struct *list;
86017
86018@@ -858,7 +858,7 @@ static struct notifier_block cpu_nfb = {
86019 .notifier_call = cpu_callback
86020 };
86021
86022-static struct smp_hotplug_thread softirq_threads = {
86023+static struct smp_hotplug_thread softirq_threads __read_only = {
86024 .store = &ksoftirqd,
86025 .thread_should_run = ksoftirqd_should_run,
86026 .thread_fn = run_ksoftirqd,
86027diff --git a/kernel/srcu.c b/kernel/srcu.c
86028index 01d5ccb..cdcbee6 100644
86029--- a/kernel/srcu.c
86030+++ b/kernel/srcu.c
86031@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
86032
86033 idx = ACCESS_ONCE(sp->completed) & 0x1;
86034 preempt_disable();
86035- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
86036+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
86037 smp_mb(); /* B */ /* Avoid leaking the critical section. */
86038- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
86039+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
86040 preempt_enable();
86041 return idx;
86042 }
86043diff --git a/kernel/sys.c b/kernel/sys.c
86044index c18ecca..b3c2dd2 100644
86045--- a/kernel/sys.c
86046+++ b/kernel/sys.c
86047@@ -149,6 +149,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
86048 error = -EACCES;
86049 goto out;
86050 }
86051+
86052+ if (gr_handle_chroot_setpriority(p, niceval)) {
86053+ error = -EACCES;
86054+ goto out;
86055+ }
86056+
86057 no_nice = security_task_setnice(p, niceval);
86058 if (no_nice) {
86059 error = no_nice;
86060@@ -352,6 +358,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
86061 goto error;
86062 }
86063
86064+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
86065+ goto error;
86066+
86067 if (rgid != (gid_t) -1 ||
86068 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
86069 new->sgid = new->egid;
86070@@ -387,6 +396,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
86071 old = current_cred();
86072
86073 retval = -EPERM;
86074+
86075+ if (gr_check_group_change(kgid, kgid, kgid))
86076+ goto error;
86077+
86078 if (ns_capable(old->user_ns, CAP_SETGID))
86079 new->gid = new->egid = new->sgid = new->fsgid = kgid;
86080 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
86081@@ -404,7 +417,7 @@ error:
86082 /*
86083 * change the user struct in a credentials set to match the new UID
86084 */
86085-static int set_user(struct cred *new)
86086+int set_user(struct cred *new)
86087 {
86088 struct user_struct *new_user;
86089
86090@@ -484,6 +497,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
86091 goto error;
86092 }
86093
86094+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
86095+ goto error;
86096+
86097 if (!uid_eq(new->uid, old->uid)) {
86098 retval = set_user(new);
86099 if (retval < 0)
86100@@ -534,6 +550,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
86101 old = current_cred();
86102
86103 retval = -EPERM;
86104+
86105+ if (gr_check_crash_uid(kuid))
86106+ goto error;
86107+ if (gr_check_user_change(kuid, kuid, kuid))
86108+ goto error;
86109+
86110 if (ns_capable(old->user_ns, CAP_SETUID)) {
86111 new->suid = new->uid = kuid;
86112 if (!uid_eq(kuid, old->uid)) {
86113@@ -603,6 +625,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
86114 goto error;
86115 }
86116
86117+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
86118+ goto error;
86119+
86120 if (ruid != (uid_t) -1) {
86121 new->uid = kruid;
86122 if (!uid_eq(kruid, old->uid)) {
86123@@ -685,6 +710,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
86124 goto error;
86125 }
86126
86127+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
86128+ goto error;
86129+
86130 if (rgid != (gid_t) -1)
86131 new->gid = krgid;
86132 if (egid != (gid_t) -1)
86133@@ -746,12 +774,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
86134 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
86135 ns_capable(old->user_ns, CAP_SETUID)) {
86136 if (!uid_eq(kuid, old->fsuid)) {
86137+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
86138+ goto error;
86139+
86140 new->fsuid = kuid;
86141 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
86142 goto change_okay;
86143 }
86144 }
86145
86146+error:
86147 abort_creds(new);
86148 return old_fsuid;
86149
86150@@ -784,12 +816,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
86151 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
86152 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
86153 ns_capable(old->user_ns, CAP_SETGID)) {
86154+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
86155+ goto error;
86156+
86157 if (!gid_eq(kgid, old->fsgid)) {
86158 new->fsgid = kgid;
86159 goto change_okay;
86160 }
86161 }
86162
86163+error:
86164 abort_creds(new);
86165 return old_fsgid;
86166
86167@@ -1169,19 +1205,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
86168 return -EFAULT;
86169
86170 down_read(&uts_sem);
86171- error = __copy_to_user(&name->sysname, &utsname()->sysname,
86172+ error = __copy_to_user(name->sysname, &utsname()->sysname,
86173 __OLD_UTS_LEN);
86174 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
86175- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
86176+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
86177 __OLD_UTS_LEN);
86178 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
86179- error |= __copy_to_user(&name->release, &utsname()->release,
86180+ error |= __copy_to_user(name->release, &utsname()->release,
86181 __OLD_UTS_LEN);
86182 error |= __put_user(0, name->release + __OLD_UTS_LEN);
86183- error |= __copy_to_user(&name->version, &utsname()->version,
86184+ error |= __copy_to_user(name->version, &utsname()->version,
86185 __OLD_UTS_LEN);
86186 error |= __put_user(0, name->version + __OLD_UTS_LEN);
86187- error |= __copy_to_user(&name->machine, &utsname()->machine,
86188+ error |= __copy_to_user(name->machine, &utsname()->machine,
86189 __OLD_UTS_LEN);
86190 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
86191 up_read(&uts_sem);
86192@@ -1383,6 +1419,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
86193 */
86194 new_rlim->rlim_cur = 1;
86195 }
86196+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
86197+ is changed to a lower value. Since tasks can be created by the same
86198+ user in between this limit change and an execve by this task, force
86199+ a recheck only for this task by setting PF_NPROC_EXCEEDED
86200+ */
86201+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
86202+ tsk->flags |= PF_NPROC_EXCEEDED;
86203 }
86204 if (!retval) {
86205 if (old_rlim)
86206diff --git a/kernel/sysctl.c b/kernel/sysctl.c
86207index b2f06f3..e6354ab 100644
86208--- a/kernel/sysctl.c
86209+++ b/kernel/sysctl.c
86210@@ -93,7 +93,6 @@
86211
86212
86213 #if defined(CONFIG_SYSCTL)
86214-
86215 /* External variables not in a header file. */
86216 extern int sysctl_overcommit_memory;
86217 extern int sysctl_overcommit_ratio;
86218@@ -119,17 +118,18 @@ extern int blk_iopoll_enabled;
86219
86220 /* Constants used for minimum and maximum */
86221 #ifdef CONFIG_LOCKUP_DETECTOR
86222-static int sixty = 60;
86223+static int sixty __read_only = 60;
86224 #endif
86225
86226-static int zero;
86227-static int __maybe_unused one = 1;
86228-static int __maybe_unused two = 2;
86229-static int __maybe_unused three = 3;
86230-static unsigned long one_ul = 1;
86231-static int one_hundred = 100;
86232+static int neg_one __read_only = -1;
86233+static int zero __read_only = 0;
86234+static int __maybe_unused one __read_only = 1;
86235+static int __maybe_unused two __read_only = 2;
86236+static int __maybe_unused three __read_only = 3;
86237+static unsigned long one_ul __read_only = 1;
86238+static int one_hundred __read_only = 100;
86239 #ifdef CONFIG_PRINTK
86240-static int ten_thousand = 10000;
86241+static int ten_thousand __read_only = 10000;
86242 #endif
86243
86244 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
86245@@ -176,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
86246 void __user *buffer, size_t *lenp, loff_t *ppos);
86247 #endif
86248
86249-#ifdef CONFIG_PRINTK
86250 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
86251 void __user *buffer, size_t *lenp, loff_t *ppos);
86252-#endif
86253
86254 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
86255 void __user *buffer, size_t *lenp, loff_t *ppos);
86256@@ -210,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
86257
86258 #endif
86259
86260+extern struct ctl_table grsecurity_table[];
86261+
86262 static struct ctl_table kern_table[];
86263 static struct ctl_table vm_table[];
86264 static struct ctl_table fs_table[];
86265@@ -224,6 +224,20 @@ extern struct ctl_table epoll_table[];
86266 int sysctl_legacy_va_layout;
86267 #endif
86268
86269+#ifdef CONFIG_PAX_SOFTMODE
86270+static ctl_table pax_table[] = {
86271+ {
86272+ .procname = "softmode",
86273+ .data = &pax_softmode,
86274+ .maxlen = sizeof(unsigned int),
86275+ .mode = 0600,
86276+ .proc_handler = &proc_dointvec,
86277+ },
86278+
86279+ { }
86280+};
86281+#endif
86282+
86283 /* The default sysctl tables: */
86284
86285 static struct ctl_table sysctl_base_table[] = {
86286@@ -272,6 +286,22 @@ static int max_extfrag_threshold = 1000;
86287 #endif
86288
86289 static struct ctl_table kern_table[] = {
86290+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
86291+ {
86292+ .procname = "grsecurity",
86293+ .mode = 0500,
86294+ .child = grsecurity_table,
86295+ },
86296+#endif
86297+
86298+#ifdef CONFIG_PAX_SOFTMODE
86299+ {
86300+ .procname = "pax",
86301+ .mode = 0500,
86302+ .child = pax_table,
86303+ },
86304+#endif
86305+
86306 {
86307 .procname = "sched_child_runs_first",
86308 .data = &sysctl_sched_child_runs_first,
86309@@ -613,7 +643,7 @@ static struct ctl_table kern_table[] = {
86310 .data = &modprobe_path,
86311 .maxlen = KMOD_PATH_LEN,
86312 .mode = 0644,
86313- .proc_handler = proc_dostring,
86314+ .proc_handler = proc_dostring_modpriv,
86315 },
86316 {
86317 .procname = "modules_disabled",
86318@@ -780,16 +810,20 @@ static struct ctl_table kern_table[] = {
86319 .extra1 = &zero,
86320 .extra2 = &one,
86321 },
86322+#endif
86323 {
86324 .procname = "kptr_restrict",
86325 .data = &kptr_restrict,
86326 .maxlen = sizeof(int),
86327 .mode = 0644,
86328 .proc_handler = proc_dointvec_minmax_sysadmin,
86329+#ifdef CONFIG_GRKERNSEC_HIDESYM
86330+ .extra1 = &two,
86331+#else
86332 .extra1 = &zero,
86333+#endif
86334 .extra2 = &two,
86335 },
86336-#endif
86337 {
86338 .procname = "ngroups_max",
86339 .data = &ngroups_max,
86340@@ -1031,10 +1065,17 @@ static struct ctl_table kern_table[] = {
86341 */
86342 {
86343 .procname = "perf_event_paranoid",
86344- .data = &sysctl_perf_event_paranoid,
86345- .maxlen = sizeof(sysctl_perf_event_paranoid),
86346+ .data = &sysctl_perf_event_legitimately_concerned,
86347+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
86348 .mode = 0644,
86349- .proc_handler = proc_dointvec,
86350+ /* go ahead, be a hero */
86351+ .proc_handler = proc_dointvec_minmax_sysadmin,
86352+ .extra1 = &neg_one,
86353+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
86354+ .extra2 = &three,
86355+#else
86356+ .extra2 = &two,
86357+#endif
86358 },
86359 {
86360 .procname = "perf_event_mlock_kb",
86361@@ -1297,6 +1338,13 @@ static struct ctl_table vm_table[] = {
86362 .proc_handler = proc_dointvec_minmax,
86363 .extra1 = &zero,
86364 },
86365+ {
86366+ .procname = "heap_stack_gap",
86367+ .data = &sysctl_heap_stack_gap,
86368+ .maxlen = sizeof(sysctl_heap_stack_gap),
86369+ .mode = 0644,
86370+ .proc_handler = proc_doulongvec_minmax,
86371+ },
86372 #else
86373 {
86374 .procname = "nr_trim_pages",
86375@@ -1761,6 +1809,16 @@ int proc_dostring(struct ctl_table *table, int write,
86376 buffer, lenp, ppos);
86377 }
86378
86379+int proc_dostring_modpriv(struct ctl_table *table, int write,
86380+ void __user *buffer, size_t *lenp, loff_t *ppos)
86381+{
86382+ if (write && !capable(CAP_SYS_MODULE))
86383+ return -EPERM;
86384+
86385+ return _proc_do_string(table->data, table->maxlen, write,
86386+ buffer, lenp, ppos);
86387+}
86388+
86389 static size_t proc_skip_spaces(char **buf)
86390 {
86391 size_t ret;
86392@@ -1866,6 +1924,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
86393 len = strlen(tmp);
86394 if (len > *size)
86395 len = *size;
86396+ if (len > sizeof(tmp))
86397+ len = sizeof(tmp);
86398 if (copy_to_user(*buf, tmp, len))
86399 return -EFAULT;
86400 *size -= len;
86401@@ -2030,7 +2090,7 @@ int proc_dointvec(struct ctl_table *table, int write,
86402 static int proc_taint(struct ctl_table *table, int write,
86403 void __user *buffer, size_t *lenp, loff_t *ppos)
86404 {
86405- struct ctl_table t;
86406+ ctl_table_no_const t;
86407 unsigned long tmptaint = get_taint();
86408 int err;
86409
86410@@ -2058,7 +2118,6 @@ static int proc_taint(struct ctl_table *table, int write,
86411 return err;
86412 }
86413
86414-#ifdef CONFIG_PRINTK
86415 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
86416 void __user *buffer, size_t *lenp, loff_t *ppos)
86417 {
86418@@ -2067,7 +2126,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
86419
86420 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
86421 }
86422-#endif
86423
86424 struct do_proc_dointvec_minmax_conv_param {
86425 int *min;
86426@@ -2214,8 +2272,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
86427 *i = val;
86428 } else {
86429 val = convdiv * (*i) / convmul;
86430- if (!first)
86431+ if (!first) {
86432 err = proc_put_char(&buffer, &left, '\t');
86433+ if (err)
86434+ break;
86435+ }
86436 err = proc_put_long(&buffer, &left, val, false);
86437 if (err)
86438 break;
86439@@ -2611,6 +2672,12 @@ int proc_dostring(struct ctl_table *table, int write,
86440 return -ENOSYS;
86441 }
86442
86443+int proc_dostring_modpriv(struct ctl_table *table, int write,
86444+ void __user *buffer, size_t *lenp, loff_t *ppos)
86445+{
86446+ return -ENOSYS;
86447+}
86448+
86449 int proc_dointvec(struct ctl_table *table, int write,
86450 void __user *buffer, size_t *lenp, loff_t *ppos)
86451 {
86452@@ -2667,5 +2734,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
86453 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
86454 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
86455 EXPORT_SYMBOL(proc_dostring);
86456+EXPORT_SYMBOL(proc_dostring_modpriv);
86457 EXPORT_SYMBOL(proc_doulongvec_minmax);
86458 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
86459diff --git a/kernel/taskstats.c b/kernel/taskstats.c
86460index 145bb4d..b2aa969 100644
86461--- a/kernel/taskstats.c
86462+++ b/kernel/taskstats.c
86463@@ -28,9 +28,12 @@
86464 #include <linux/fs.h>
86465 #include <linux/file.h>
86466 #include <linux/pid_namespace.h>
86467+#include <linux/grsecurity.h>
86468 #include <net/genetlink.h>
86469 #include <linux/atomic.h>
86470
86471+extern int gr_is_taskstats_denied(int pid);
86472+
86473 /*
86474 * Maximum length of a cpumask that can be specified in
86475 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
86476@@ -570,6 +573,9 @@ err:
86477
86478 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
86479 {
86480+ if (gr_is_taskstats_denied(current->pid))
86481+ return -EACCES;
86482+
86483 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
86484 return cmd_attr_register_cpumask(info);
86485 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
86486diff --git a/kernel/time.c b/kernel/time.c
86487index 7c7964c..2a0d412 100644
86488--- a/kernel/time.c
86489+++ b/kernel/time.c
86490@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
86491 return error;
86492
86493 if (tz) {
86494+ /* we log in do_settimeofday called below, so don't log twice
86495+ */
86496+ if (!tv)
86497+ gr_log_timechange();
86498+
86499 sys_tz = *tz;
86500 update_vsyscall_tz();
86501 if (firsttime) {
86502diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
86503index 88c9c65..7497ebc 100644
86504--- a/kernel/time/alarmtimer.c
86505+++ b/kernel/time/alarmtimer.c
86506@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
86507 struct platform_device *pdev;
86508 int error = 0;
86509 int i;
86510- struct k_clock alarm_clock = {
86511+ static struct k_clock alarm_clock = {
86512 .clock_getres = alarm_clock_getres,
86513 .clock_get = alarm_clock_get,
86514 .timer_create = alarm_timer_create,
86515diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
86516index 5cf6c70..ac341b0 100644
86517--- a/kernel/time/timekeeping.c
86518+++ b/kernel/time/timekeeping.c
86519@@ -15,6 +15,7 @@
86520 #include <linux/init.h>
86521 #include <linux/mm.h>
86522 #include <linux/sched.h>
86523+#include <linux/grsecurity.h>
86524 #include <linux/syscore_ops.h>
86525 #include <linux/clocksource.h>
86526 #include <linux/jiffies.h>
86527@@ -500,6 +501,8 @@ int do_settimeofday(const struct timespec *tv)
86528 if (!timespec_valid_strict(tv))
86529 return -EINVAL;
86530
86531+ gr_log_timechange();
86532+
86533 raw_spin_lock_irqsave(&timekeeper_lock, flags);
86534 write_seqcount_begin(&timekeeper_seq);
86535
86536diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
86537index 61ed862..3b52c65 100644
86538--- a/kernel/time/timer_list.c
86539+++ b/kernel/time/timer_list.c
86540@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
86541
86542 static void print_name_offset(struct seq_file *m, void *sym)
86543 {
86544+#ifdef CONFIG_GRKERNSEC_HIDESYM
86545+ SEQ_printf(m, "<%p>", NULL);
86546+#else
86547 char symname[KSYM_NAME_LEN];
86548
86549 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
86550 SEQ_printf(m, "<%pK>", sym);
86551 else
86552 SEQ_printf(m, "%s", symname);
86553+#endif
86554 }
86555
86556 static void
86557@@ -119,7 +123,11 @@ next_one:
86558 static void
86559 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
86560 {
86561+#ifdef CONFIG_GRKERNSEC_HIDESYM
86562+ SEQ_printf(m, " .base: %p\n", NULL);
86563+#else
86564 SEQ_printf(m, " .base: %pK\n", base);
86565+#endif
86566 SEQ_printf(m, " .index: %d\n",
86567 base->index);
86568 SEQ_printf(m, " .resolution: %Lu nsecs\n",
86569@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
86570 {
86571 struct proc_dir_entry *pe;
86572
86573+#ifdef CONFIG_GRKERNSEC_PROC_ADD
86574+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
86575+#else
86576 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
86577+#endif
86578 if (!pe)
86579 return -ENOMEM;
86580 return 0;
86581diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
86582index 0b537f2..40d6c20 100644
86583--- a/kernel/time/timer_stats.c
86584+++ b/kernel/time/timer_stats.c
86585@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
86586 static unsigned long nr_entries;
86587 static struct entry entries[MAX_ENTRIES];
86588
86589-static atomic_t overflow_count;
86590+static atomic_unchecked_t overflow_count;
86591
86592 /*
86593 * The entries are in a hash-table, for fast lookup:
86594@@ -140,7 +140,7 @@ static void reset_entries(void)
86595 nr_entries = 0;
86596 memset(entries, 0, sizeof(entries));
86597 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
86598- atomic_set(&overflow_count, 0);
86599+ atomic_set_unchecked(&overflow_count, 0);
86600 }
86601
86602 static struct entry *alloc_entry(void)
86603@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
86604 if (likely(entry))
86605 entry->count++;
86606 else
86607- atomic_inc(&overflow_count);
86608+ atomic_inc_unchecked(&overflow_count);
86609
86610 out_unlock:
86611 raw_spin_unlock_irqrestore(lock, flags);
86612@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
86613
86614 static void print_name_offset(struct seq_file *m, unsigned long addr)
86615 {
86616+#ifdef CONFIG_GRKERNSEC_HIDESYM
86617+ seq_printf(m, "<%p>", NULL);
86618+#else
86619 char symname[KSYM_NAME_LEN];
86620
86621 if (lookup_symbol_name(addr, symname) < 0)
86622- seq_printf(m, "<%p>", (void *)addr);
86623+ seq_printf(m, "<%pK>", (void *)addr);
86624 else
86625 seq_printf(m, "%s", symname);
86626+#endif
86627 }
86628
86629 static int tstats_show(struct seq_file *m, void *v)
86630@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
86631
86632 seq_puts(m, "Timer Stats Version: v0.2\n");
86633 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
86634- if (atomic_read(&overflow_count))
86635+ if (atomic_read_unchecked(&overflow_count))
86636 seq_printf(m, "Overflow: %d entries\n",
86637- atomic_read(&overflow_count));
86638+ atomic_read_unchecked(&overflow_count));
86639
86640 for (i = 0; i < nr_entries; i++) {
86641 entry = entries + i;
86642@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
86643 {
86644 struct proc_dir_entry *pe;
86645
86646+#ifdef CONFIG_GRKERNSEC_PROC_ADD
86647+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
86648+#else
86649 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
86650+#endif
86651 if (!pe)
86652 return -ENOMEM;
86653 return 0;
86654diff --git a/kernel/timer.c b/kernel/timer.c
86655index 4296d13..0164b04 100644
86656--- a/kernel/timer.c
86657+++ b/kernel/timer.c
86658@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
86659 /*
86660 * This function runs timers and the timer-tq in bottom half context.
86661 */
86662-static void run_timer_softirq(struct softirq_action *h)
86663+static __latent_entropy void run_timer_softirq(void)
86664 {
86665 struct tvec_base *base = __this_cpu_read(tvec_bases);
86666
86667@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
86668 *
86669 * In all cases the return value is guaranteed to be non-negative.
86670 */
86671-signed long __sched schedule_timeout(signed long timeout)
86672+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
86673 {
86674 struct timer_list timer;
86675 unsigned long expire;
86676diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
86677index b8b8560..75b1a09 100644
86678--- a/kernel/trace/blktrace.c
86679+++ b/kernel/trace/blktrace.c
86680@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
86681 struct blk_trace *bt = filp->private_data;
86682 char buf[16];
86683
86684- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
86685+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
86686
86687 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
86688 }
86689@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
86690 return 1;
86691
86692 bt = buf->chan->private_data;
86693- atomic_inc(&bt->dropped);
86694+ atomic_inc_unchecked(&bt->dropped);
86695 return 0;
86696 }
86697
86698@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
86699
86700 bt->dir = dir;
86701 bt->dev = dev;
86702- atomic_set(&bt->dropped, 0);
86703+ atomic_set_unchecked(&bt->dropped, 0);
86704
86705 ret = -EIO;
86706 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
86707diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
86708index f3bd09ee..9bb9586 100644
86709--- a/kernel/trace/ftrace.c
86710+++ b/kernel/trace/ftrace.c
86711@@ -1944,12 +1944,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
86712 if (unlikely(ftrace_disabled))
86713 return 0;
86714
86715+ ret = ftrace_arch_code_modify_prepare();
86716+ FTRACE_WARN_ON(ret);
86717+ if (ret)
86718+ return 0;
86719+
86720 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
86721+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
86722 if (ret) {
86723 ftrace_bug(ret, ip);
86724- return 0;
86725 }
86726- return 1;
86727+ return ret ? 0 : 1;
86728 }
86729
86730 /*
86731@@ -4043,8 +4048,10 @@ static int ftrace_process_locs(struct module *mod,
86732 if (!count)
86733 return 0;
86734
86735+ pax_open_kernel();
86736 sort(start, count, sizeof(*start),
86737 ftrace_cmp_ips, ftrace_swap_ips);
86738+ pax_close_kernel();
86739
86740 start_pg = ftrace_allocate_pages(count);
86741 if (!start_pg)
86742@@ -4766,8 +4773,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
86743 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
86744
86745 static int ftrace_graph_active;
86746-static struct notifier_block ftrace_suspend_notifier;
86747-
86748 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
86749 {
86750 return 0;
86751@@ -4918,6 +4923,10 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
86752 FTRACE_OPS_FL_RECURSION_SAFE,
86753 };
86754
86755+static struct notifier_block ftrace_suspend_notifier = {
86756+ .notifier_call = ftrace_suspend_notifier_call
86757+};
86758+
86759 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
86760 trace_func_graph_ent_t entryfunc)
86761 {
86762@@ -4931,7 +4940,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
86763 goto out;
86764 }
86765
86766- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
86767 register_pm_notifier(&ftrace_suspend_notifier);
86768
86769 ftrace_graph_active++;
86770diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
86771index cc2f66f..05edd54 100644
86772--- a/kernel/trace/ring_buffer.c
86773+++ b/kernel/trace/ring_buffer.c
86774@@ -352,9 +352,9 @@ struct buffer_data_page {
86775 */
86776 struct buffer_page {
86777 struct list_head list; /* list of buffer pages */
86778- local_t write; /* index for next write */
86779+ local_unchecked_t write; /* index for next write */
86780 unsigned read; /* index for next read */
86781- local_t entries; /* entries on this page */
86782+ local_unchecked_t entries; /* entries on this page */
86783 unsigned long real_end; /* real end of data */
86784 struct buffer_data_page *page; /* Actual data page */
86785 };
86786@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
86787 unsigned long last_overrun;
86788 local_t entries_bytes;
86789 local_t entries;
86790- local_t overrun;
86791- local_t commit_overrun;
86792+ local_unchecked_t overrun;
86793+ local_unchecked_t commit_overrun;
86794 local_t dropped_events;
86795 local_t committing;
86796 local_t commits;
86797@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
86798 *
86799 * We add a counter to the write field to denote this.
86800 */
86801- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
86802- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
86803+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
86804+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
86805
86806 /*
86807 * Just make sure we have seen our old_write and synchronize
86808@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
86809 * cmpxchg to only update if an interrupt did not already
86810 * do it for us. If the cmpxchg fails, we don't care.
86811 */
86812- (void)local_cmpxchg(&next_page->write, old_write, val);
86813- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
86814+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
86815+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
86816
86817 /*
86818 * No need to worry about races with clearing out the commit.
86819@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
86820
86821 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
86822 {
86823- return local_read(&bpage->entries) & RB_WRITE_MASK;
86824+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
86825 }
86826
86827 static inline unsigned long rb_page_write(struct buffer_page *bpage)
86828 {
86829- return local_read(&bpage->write) & RB_WRITE_MASK;
86830+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
86831 }
86832
86833 static int
86834@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
86835 * bytes consumed in ring buffer from here.
86836 * Increment overrun to account for the lost events.
86837 */
86838- local_add(page_entries, &cpu_buffer->overrun);
86839+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
86840 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
86841 }
86842
86843@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
86844 * it is our responsibility to update
86845 * the counters.
86846 */
86847- local_add(entries, &cpu_buffer->overrun);
86848+ local_add_unchecked(entries, &cpu_buffer->overrun);
86849 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
86850
86851 /*
86852@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
86853 if (tail == BUF_PAGE_SIZE)
86854 tail_page->real_end = 0;
86855
86856- local_sub(length, &tail_page->write);
86857+ local_sub_unchecked(length, &tail_page->write);
86858 return;
86859 }
86860
86861@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
86862 rb_event_set_padding(event);
86863
86864 /* Set the write back to the previous setting */
86865- local_sub(length, &tail_page->write);
86866+ local_sub_unchecked(length, &tail_page->write);
86867 return;
86868 }
86869
86870@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
86871
86872 /* Set write to end of buffer */
86873 length = (tail + length) - BUF_PAGE_SIZE;
86874- local_sub(length, &tail_page->write);
86875+ local_sub_unchecked(length, &tail_page->write);
86876 }
86877
86878 /*
86879@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
86880 * about it.
86881 */
86882 if (unlikely(next_page == commit_page)) {
86883- local_inc(&cpu_buffer->commit_overrun);
86884+ local_inc_unchecked(&cpu_buffer->commit_overrun);
86885 goto out_reset;
86886 }
86887
86888@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
86889 cpu_buffer->tail_page) &&
86890 (cpu_buffer->commit_page ==
86891 cpu_buffer->reader_page))) {
86892- local_inc(&cpu_buffer->commit_overrun);
86893+ local_inc_unchecked(&cpu_buffer->commit_overrun);
86894 goto out_reset;
86895 }
86896 }
86897@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
86898 length += RB_LEN_TIME_EXTEND;
86899
86900 tail_page = cpu_buffer->tail_page;
86901- write = local_add_return(length, &tail_page->write);
86902+ write = local_add_return_unchecked(length, &tail_page->write);
86903
86904 /* set write to only the index of the write */
86905 write &= RB_WRITE_MASK;
86906@@ -2408,7 +2408,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
86907 kmemcheck_annotate_bitfield(event, bitfield);
86908 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
86909
86910- local_inc(&tail_page->entries);
86911+ local_inc_unchecked(&tail_page->entries);
86912
86913 /*
86914 * If this is the first commit on the page, then update
86915@@ -2441,7 +2441,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
86916
86917 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
86918 unsigned long write_mask =
86919- local_read(&bpage->write) & ~RB_WRITE_MASK;
86920+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
86921 unsigned long event_length = rb_event_length(event);
86922 /*
86923 * This is on the tail page. It is possible that
86924@@ -2451,7 +2451,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
86925 */
86926 old_index += write_mask;
86927 new_index += write_mask;
86928- index = local_cmpxchg(&bpage->write, old_index, new_index);
86929+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
86930 if (index == old_index) {
86931 /* update counters */
86932 local_sub(event_length, &cpu_buffer->entries_bytes);
86933@@ -2843,7 +2843,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
86934
86935 /* Do the likely case first */
86936 if (likely(bpage->page == (void *)addr)) {
86937- local_dec(&bpage->entries);
86938+ local_dec_unchecked(&bpage->entries);
86939 return;
86940 }
86941
86942@@ -2855,7 +2855,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
86943 start = bpage;
86944 do {
86945 if (bpage->page == (void *)addr) {
86946- local_dec(&bpage->entries);
86947+ local_dec_unchecked(&bpage->entries);
86948 return;
86949 }
86950 rb_inc_page(cpu_buffer, &bpage);
86951@@ -3139,7 +3139,7 @@ static inline unsigned long
86952 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
86953 {
86954 return local_read(&cpu_buffer->entries) -
86955- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
86956+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
86957 }
86958
86959 /**
86960@@ -3228,7 +3228,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
86961 return 0;
86962
86963 cpu_buffer = buffer->buffers[cpu];
86964- ret = local_read(&cpu_buffer->overrun);
86965+ ret = local_read_unchecked(&cpu_buffer->overrun);
86966
86967 return ret;
86968 }
86969@@ -3251,7 +3251,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
86970 return 0;
86971
86972 cpu_buffer = buffer->buffers[cpu];
86973- ret = local_read(&cpu_buffer->commit_overrun);
86974+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
86975
86976 return ret;
86977 }
86978@@ -3336,7 +3336,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
86979 /* if you care about this being correct, lock the buffer */
86980 for_each_buffer_cpu(buffer, cpu) {
86981 cpu_buffer = buffer->buffers[cpu];
86982- overruns += local_read(&cpu_buffer->overrun);
86983+ overruns += local_read_unchecked(&cpu_buffer->overrun);
86984 }
86985
86986 return overruns;
86987@@ -3512,8 +3512,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
86988 /*
86989 * Reset the reader page to size zero.
86990 */
86991- local_set(&cpu_buffer->reader_page->write, 0);
86992- local_set(&cpu_buffer->reader_page->entries, 0);
86993+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
86994+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
86995 local_set(&cpu_buffer->reader_page->page->commit, 0);
86996 cpu_buffer->reader_page->real_end = 0;
86997
86998@@ -3547,7 +3547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
86999 * want to compare with the last_overrun.
87000 */
87001 smp_mb();
87002- overwrite = local_read(&(cpu_buffer->overrun));
87003+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
87004
87005 /*
87006 * Here's the tricky part.
87007@@ -4117,8 +4117,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
87008
87009 cpu_buffer->head_page
87010 = list_entry(cpu_buffer->pages, struct buffer_page, list);
87011- local_set(&cpu_buffer->head_page->write, 0);
87012- local_set(&cpu_buffer->head_page->entries, 0);
87013+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
87014+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
87015 local_set(&cpu_buffer->head_page->page->commit, 0);
87016
87017 cpu_buffer->head_page->read = 0;
87018@@ -4128,14 +4128,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
87019
87020 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
87021 INIT_LIST_HEAD(&cpu_buffer->new_pages);
87022- local_set(&cpu_buffer->reader_page->write, 0);
87023- local_set(&cpu_buffer->reader_page->entries, 0);
87024+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
87025+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
87026 local_set(&cpu_buffer->reader_page->page->commit, 0);
87027 cpu_buffer->reader_page->read = 0;
87028
87029 local_set(&cpu_buffer->entries_bytes, 0);
87030- local_set(&cpu_buffer->overrun, 0);
87031- local_set(&cpu_buffer->commit_overrun, 0);
87032+ local_set_unchecked(&cpu_buffer->overrun, 0);
87033+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
87034 local_set(&cpu_buffer->dropped_events, 0);
87035 local_set(&cpu_buffer->entries, 0);
87036 local_set(&cpu_buffer->committing, 0);
87037@@ -4540,8 +4540,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
87038 rb_init_page(bpage);
87039 bpage = reader->page;
87040 reader->page = *data_page;
87041- local_set(&reader->write, 0);
87042- local_set(&reader->entries, 0);
87043+ local_set_unchecked(&reader->write, 0);
87044+ local_set_unchecked(&reader->entries, 0);
87045 reader->read = 0;
87046 *data_page = bpage;
87047
87048diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
87049index b778e96..4e84621 100644
87050--- a/kernel/trace/trace.c
87051+++ b/kernel/trace/trace.c
87052@@ -3335,7 +3335,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
87053 return 0;
87054 }
87055
87056-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
87057+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
87058 {
87059 /* do nothing if flag is already set */
87060 if (!!(trace_flags & mask) == !!enabled)
87061diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
87062index 10c86fb..645ab0a 100644
87063--- a/kernel/trace/trace.h
87064+++ b/kernel/trace/trace.h
87065@@ -1029,7 +1029,7 @@ extern const char *__stop___tracepoint_str[];
87066 void trace_printk_init_buffers(void);
87067 void trace_printk_start_comm(void);
87068 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
87069-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
87070+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
87071
87072 /*
87073 * Normal trace_printk() and friends allocates special buffers
87074diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
87075index 26dc348..8708ca7 100644
87076--- a/kernel/trace/trace_clock.c
87077+++ b/kernel/trace/trace_clock.c
87078@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
87079 return now;
87080 }
87081
87082-static atomic64_t trace_counter;
87083+static atomic64_unchecked_t trace_counter;
87084
87085 /*
87086 * trace_clock_counter(): simply an atomic counter.
87087@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
87088 */
87089 u64 notrace trace_clock_counter(void)
87090 {
87091- return atomic64_add_return(1, &trace_counter);
87092+ return atomic64_inc_return_unchecked(&trace_counter);
87093 }
87094diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
87095index 368a4d5..7b6d1d5 100644
87096--- a/kernel/trace/trace_events.c
87097+++ b/kernel/trace/trace_events.c
87098@@ -1673,7 +1673,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
87099 return 0;
87100 }
87101
87102-struct ftrace_module_file_ops;
87103 static void __add_event_to_tracers(struct ftrace_event_call *call);
87104
87105 /* Add an additional event_call dynamically */
87106diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
87107index b3dcfb2..ebee344 100644
87108--- a/kernel/trace/trace_mmiotrace.c
87109+++ b/kernel/trace/trace_mmiotrace.c
87110@@ -24,7 +24,7 @@ struct header_iter {
87111 static struct trace_array *mmio_trace_array;
87112 static bool overrun_detected;
87113 static unsigned long prev_overruns;
87114-static atomic_t dropped_count;
87115+static atomic_unchecked_t dropped_count;
87116
87117 static void mmio_reset_data(struct trace_array *tr)
87118 {
87119@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
87120
87121 static unsigned long count_overruns(struct trace_iterator *iter)
87122 {
87123- unsigned long cnt = atomic_xchg(&dropped_count, 0);
87124+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
87125 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
87126
87127 if (over > prev_overruns)
87128@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
87129 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
87130 sizeof(*entry), 0, pc);
87131 if (!event) {
87132- atomic_inc(&dropped_count);
87133+ atomic_inc_unchecked(&dropped_count);
87134 return;
87135 }
87136 entry = ring_buffer_event_data(event);
87137@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
87138 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
87139 sizeof(*entry), 0, pc);
87140 if (!event) {
87141- atomic_inc(&dropped_count);
87142+ atomic_inc_unchecked(&dropped_count);
87143 return;
87144 }
87145 entry = ring_buffer_event_data(event);
87146diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
87147index 34e7cba..6f9a729 100644
87148--- a/kernel/trace/trace_output.c
87149+++ b/kernel/trace/trace_output.c
87150@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
87151
87152 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
87153 if (!IS_ERR(p)) {
87154- p = mangle_path(s->buffer + s->len, p, "\n");
87155+ p = mangle_path(s->buffer + s->len, p, "\n\\");
87156 if (p) {
87157 s->len = p - s->buffer;
87158 return 1;
87159@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
87160 goto out;
87161 }
87162
87163+ pax_open_kernel();
87164 if (event->funcs->trace == NULL)
87165- event->funcs->trace = trace_nop_print;
87166+ *(void **)&event->funcs->trace = trace_nop_print;
87167 if (event->funcs->raw == NULL)
87168- event->funcs->raw = trace_nop_print;
87169+ *(void **)&event->funcs->raw = trace_nop_print;
87170 if (event->funcs->hex == NULL)
87171- event->funcs->hex = trace_nop_print;
87172+ *(void **)&event->funcs->hex = trace_nop_print;
87173 if (event->funcs->binary == NULL)
87174- event->funcs->binary = trace_nop_print;
87175+ *(void **)&event->funcs->binary = trace_nop_print;
87176+ pax_close_kernel();
87177
87178 key = event->type & (EVENT_HASHSIZE - 1);
87179
87180diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
87181index b20428c..4845a10 100644
87182--- a/kernel/trace/trace_stack.c
87183+++ b/kernel/trace/trace_stack.c
87184@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
87185 return;
87186
87187 /* we do not handle interrupt stacks yet */
87188- if (!object_is_on_stack(stack))
87189+ if (!object_starts_on_stack(stack))
87190 return;
87191
87192 local_irq_save(flags);
87193diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
87194index 13fb113..a6ced3f 100644
87195--- a/kernel/user_namespace.c
87196+++ b/kernel/user_namespace.c
87197@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
87198 !kgid_has_mapping(parent_ns, group))
87199 return -EPERM;
87200
87201+#ifdef CONFIG_GRKERNSEC
87202+ /*
87203+ * This doesn't really inspire confidence:
87204+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
87205+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
87206+ * Increases kernel attack surface in areas developers
87207+ * previously cared little about ("low importance due
87208+ * to requiring "root" capability")
87209+ * To be removed when this code receives *proper* review
87210+ */
87211+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
87212+ !capable(CAP_SETGID))
87213+ return -EPERM;
87214+#endif
87215+
87216 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
87217 if (!ns)
87218 return -ENOMEM;
87219@@ -860,7 +875,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
87220 if (atomic_read(&current->mm->mm_users) > 1)
87221 return -EINVAL;
87222
87223- if (current->fs->users != 1)
87224+ if (atomic_read(&current->fs->users) != 1)
87225 return -EINVAL;
87226
87227 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
87228diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
87229index 4f69f9a..7c6f8f8 100644
87230--- a/kernel/utsname_sysctl.c
87231+++ b/kernel/utsname_sysctl.c
87232@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
87233 static int proc_do_uts_string(ctl_table *table, int write,
87234 void __user *buffer, size_t *lenp, loff_t *ppos)
87235 {
87236- struct ctl_table uts_table;
87237+ ctl_table_no_const uts_table;
87238 int r;
87239 memcpy(&uts_table, table, sizeof(uts_table));
87240 uts_table.data = get_uts(table, write);
87241diff --git a/kernel/watchdog.c b/kernel/watchdog.c
87242index 4431610..4265616 100644
87243--- a/kernel/watchdog.c
87244+++ b/kernel/watchdog.c
87245@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
87246 static void watchdog_nmi_disable(unsigned int cpu) { return; }
87247 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
87248
87249-static struct smp_hotplug_thread watchdog_threads = {
87250+static struct smp_hotplug_thread watchdog_threads __read_only = {
87251 .store = &softlockup_watchdog,
87252 .thread_should_run = watchdog_should_run,
87253 .thread_fn = watchdog,
87254diff --git a/kernel/workqueue.c b/kernel/workqueue.c
87255index 93c2652..66a1cfd 100644
87256--- a/kernel/workqueue.c
87257+++ b/kernel/workqueue.c
87258@@ -4668,7 +4668,7 @@ static void rebind_workers(struct worker_pool *pool)
87259 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
87260 worker_flags |= WORKER_REBOUND;
87261 worker_flags &= ~WORKER_UNBOUND;
87262- ACCESS_ONCE(worker->flags) = worker_flags;
87263+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
87264 }
87265
87266 spin_unlock_irq(&pool->lock);
87267diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
87268index 094f315..244a824 100644
87269--- a/lib/Kconfig.debug
87270+++ b/lib/Kconfig.debug
87271@@ -836,7 +836,7 @@ config DEBUG_MUTEXES
87272
87273 config DEBUG_WW_MUTEX_SLOWPATH
87274 bool "Wait/wound mutex debugging: Slowpath testing"
87275- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87276+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87277 select DEBUG_LOCK_ALLOC
87278 select DEBUG_SPINLOCK
87279 select DEBUG_MUTEXES
87280@@ -849,7 +849,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
87281
87282 config DEBUG_LOCK_ALLOC
87283 bool "Lock debugging: detect incorrect freeing of live locks"
87284- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87285+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87286 select DEBUG_SPINLOCK
87287 select DEBUG_MUTEXES
87288 select LOCKDEP
87289@@ -863,7 +863,7 @@ config DEBUG_LOCK_ALLOC
87290
87291 config PROVE_LOCKING
87292 bool "Lock debugging: prove locking correctness"
87293- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87294+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87295 select LOCKDEP
87296 select DEBUG_SPINLOCK
87297 select DEBUG_MUTEXES
87298@@ -914,7 +914,7 @@ config LOCKDEP
87299
87300 config LOCK_STAT
87301 bool "Lock usage statistics"
87302- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87303+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87304 select LOCKDEP
87305 select DEBUG_SPINLOCK
87306 select DEBUG_MUTEXES
87307@@ -1376,6 +1376,7 @@ config LATENCYTOP
87308 depends on DEBUG_KERNEL
87309 depends on STACKTRACE_SUPPORT
87310 depends on PROC_FS
87311+ depends on !GRKERNSEC_HIDESYM
87312 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
87313 select KALLSYMS
87314 select KALLSYMS_ALL
87315@@ -1392,7 +1393,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
87316 config DEBUG_STRICT_USER_COPY_CHECKS
87317 bool "Strict user copy size checks"
87318 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
87319- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
87320+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
87321 help
87322 Enabling this option turns a certain set of sanity checks for user
87323 copy operations into compile time failures.
87324@@ -1502,7 +1503,7 @@ endmenu # runtime tests
87325
87326 config PROVIDE_OHCI1394_DMA_INIT
87327 bool "Remote debugging over FireWire early on boot"
87328- depends on PCI && X86
87329+ depends on PCI && X86 && !GRKERNSEC
87330 help
87331 If you want to debug problems which hang or crash the kernel early
87332 on boot and the crashing machine has a FireWire port, you can use
87333@@ -1531,7 +1532,7 @@ config PROVIDE_OHCI1394_DMA_INIT
87334
87335 config FIREWIRE_OHCI_REMOTE_DMA
87336 bool "Remote debugging over FireWire with firewire-ohci"
87337- depends on FIREWIRE_OHCI
87338+ depends on FIREWIRE_OHCI && !GRKERNSEC
87339 help
87340 This option lets you use the FireWire bus for remote debugging
87341 with help of the firewire-ohci driver. It enables unfiltered
87342diff --git a/lib/Makefile b/lib/Makefile
87343index f3bb2cb..b358ace 100644
87344--- a/lib/Makefile
87345+++ b/lib/Makefile
87346@@ -52,7 +52,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
87347
87348 obj-$(CONFIG_BTREE) += btree.o
87349 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
87350-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
87351+obj-y += list_debug.o
87352 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
87353
87354 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
87355diff --git a/lib/bitmap.c b/lib/bitmap.c
87356index 06f7e4f..f3cf2b0 100644
87357--- a/lib/bitmap.c
87358+++ b/lib/bitmap.c
87359@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
87360 {
87361 int c, old_c, totaldigits, ndigits, nchunks, nbits;
87362 u32 chunk;
87363- const char __user __force *ubuf = (const char __user __force *)buf;
87364+ const char __user *ubuf = (const char __force_user *)buf;
87365
87366 bitmap_zero(maskp, nmaskbits);
87367
87368@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
87369 {
87370 if (!access_ok(VERIFY_READ, ubuf, ulen))
87371 return -EFAULT;
87372- return __bitmap_parse((const char __force *)ubuf,
87373+ return __bitmap_parse((const char __force_kernel *)ubuf,
87374 ulen, 1, maskp, nmaskbits);
87375
87376 }
87377@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
87378 {
87379 unsigned a, b;
87380 int c, old_c, totaldigits;
87381- const char __user __force *ubuf = (const char __user __force *)buf;
87382+ const char __user *ubuf = (const char __force_user *)buf;
87383 int exp_digit, in_range;
87384
87385 totaldigits = c = 0;
87386@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
87387 {
87388 if (!access_ok(VERIFY_READ, ubuf, ulen))
87389 return -EFAULT;
87390- return __bitmap_parselist((const char __force *)ubuf,
87391+ return __bitmap_parselist((const char __force_kernel *)ubuf,
87392 ulen, 1, maskp, nmaskbits);
87393 }
87394 EXPORT_SYMBOL(bitmap_parselist_user);
87395diff --git a/lib/bug.c b/lib/bug.c
87396index 1686034..a9c00c8 100644
87397--- a/lib/bug.c
87398+++ b/lib/bug.c
87399@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
87400 return BUG_TRAP_TYPE_NONE;
87401
87402 bug = find_bug(bugaddr);
87403+ if (!bug)
87404+ return BUG_TRAP_TYPE_NONE;
87405
87406 file = NULL;
87407 line = 0;
87408diff --git a/lib/debugobjects.c b/lib/debugobjects.c
87409index bf2c8b1..1d00ccf 100644
87410--- a/lib/debugobjects.c
87411+++ b/lib/debugobjects.c
87412@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
87413 if (limit > 4)
87414 return;
87415
87416- is_on_stack = object_is_on_stack(addr);
87417+ is_on_stack = object_starts_on_stack(addr);
87418 if (is_on_stack == onstack)
87419 return;
87420
87421diff --git a/lib/devres.c b/lib/devres.c
87422index 8235331..5881053 100644
87423--- a/lib/devres.c
87424+++ b/lib/devres.c
87425@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
87426 void devm_iounmap(struct device *dev, void __iomem *addr)
87427 {
87428 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
87429- (void *)addr));
87430+ (void __force *)addr));
87431 iounmap(addr);
87432 }
87433 EXPORT_SYMBOL(devm_iounmap);
87434@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
87435 {
87436 ioport_unmap(addr);
87437 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
87438- devm_ioport_map_match, (void *)addr));
87439+ devm_ioport_map_match, (void __force *)addr));
87440 }
87441 EXPORT_SYMBOL(devm_ioport_unmap);
87442 #endif /* CONFIG_HAS_IOPORT */
87443diff --git a/lib/div64.c b/lib/div64.c
87444index 4382ad7..08aa558 100644
87445--- a/lib/div64.c
87446+++ b/lib/div64.c
87447@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
87448 EXPORT_SYMBOL(__div64_32);
87449
87450 #ifndef div_s64_rem
87451-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
87452+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
87453 {
87454 u64 quotient;
87455
87456@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
87457 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
87458 */
87459 #ifndef div64_u64
87460-u64 div64_u64(u64 dividend, u64 divisor)
87461+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
87462 {
87463 u32 high = divisor >> 32;
87464 u64 quot;
87465diff --git a/lib/dma-debug.c b/lib/dma-debug.c
87466index d87a17a..ac0d79a 100644
87467--- a/lib/dma-debug.c
87468+++ b/lib/dma-debug.c
87469@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
87470
87471 void dma_debug_add_bus(struct bus_type *bus)
87472 {
87473- struct notifier_block *nb;
87474+ notifier_block_no_const *nb;
87475
87476 if (global_disable)
87477 return;
87478@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
87479
87480 static void check_for_stack(struct device *dev, void *addr)
87481 {
87482- if (object_is_on_stack(addr))
87483+ if (object_starts_on_stack(addr))
87484 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
87485 "stack [addr=%p]\n", addr);
87486 }
87487diff --git a/lib/inflate.c b/lib/inflate.c
87488index 013a761..c28f3fc 100644
87489--- a/lib/inflate.c
87490+++ b/lib/inflate.c
87491@@ -269,7 +269,7 @@ static void free(void *where)
87492 malloc_ptr = free_mem_ptr;
87493 }
87494 #else
87495-#define malloc(a) kmalloc(a, GFP_KERNEL)
87496+#define malloc(a) kmalloc((a), GFP_KERNEL)
87497 #define free(a) kfree(a)
87498 #endif
87499
87500diff --git a/lib/ioremap.c b/lib/ioremap.c
87501index 0c9216c..863bd89 100644
87502--- a/lib/ioremap.c
87503+++ b/lib/ioremap.c
87504@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
87505 unsigned long next;
87506
87507 phys_addr -= addr;
87508- pmd = pmd_alloc(&init_mm, pud, addr);
87509+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
87510 if (!pmd)
87511 return -ENOMEM;
87512 do {
87513@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
87514 unsigned long next;
87515
87516 phys_addr -= addr;
87517- pud = pud_alloc(&init_mm, pgd, addr);
87518+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
87519 if (!pud)
87520 return -ENOMEM;
87521 do {
87522diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
87523index bd2bea9..6b3c95e 100644
87524--- a/lib/is_single_threaded.c
87525+++ b/lib/is_single_threaded.c
87526@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
87527 struct task_struct *p, *t;
87528 bool ret;
87529
87530+ if (!mm)
87531+ return true;
87532+
87533 if (atomic_read(&task->signal->live) != 1)
87534 return false;
87535
87536diff --git a/lib/kobject.c b/lib/kobject.c
87537index 084f7b1..d265b8a 100644
87538--- a/lib/kobject.c
87539+++ b/lib/kobject.c
87540@@ -875,9 +875,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
87541
87542
87543 static DEFINE_SPINLOCK(kobj_ns_type_lock);
87544-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
87545+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
87546
87547-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
87548+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
87549 {
87550 enum kobj_ns_type type = ops->type;
87551 int error;
87552diff --git a/lib/list_debug.c b/lib/list_debug.c
87553index c24c2f7..f0296f4 100644
87554--- a/lib/list_debug.c
87555+++ b/lib/list_debug.c
87556@@ -11,7 +11,9 @@
87557 #include <linux/bug.h>
87558 #include <linux/kernel.h>
87559 #include <linux/rculist.h>
87560+#include <linux/mm.h>
87561
87562+#ifdef CONFIG_DEBUG_LIST
87563 /*
87564 * Insert a new entry between two known consecutive entries.
87565 *
87566@@ -19,21 +21,40 @@
87567 * the prev/next entries already!
87568 */
87569
87570+static bool __list_add_debug(struct list_head *new,
87571+ struct list_head *prev,
87572+ struct list_head *next)
87573+{
87574+ if (unlikely(next->prev != prev)) {
87575+ printk(KERN_ERR "list_add corruption. next->prev should be "
87576+ "prev (%p), but was %p. (next=%p).\n",
87577+ prev, next->prev, next);
87578+ BUG();
87579+ return false;
87580+ }
87581+ if (unlikely(prev->next != next)) {
87582+ printk(KERN_ERR "list_add corruption. prev->next should be "
87583+ "next (%p), but was %p. (prev=%p).\n",
87584+ next, prev->next, prev);
87585+ BUG();
87586+ return false;
87587+ }
87588+ if (unlikely(new == prev || new == next)) {
87589+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
87590+ new, prev, next);
87591+ BUG();
87592+ return false;
87593+ }
87594+ return true;
87595+}
87596+
87597 void __list_add(struct list_head *new,
87598- struct list_head *prev,
87599- struct list_head *next)
87600+ struct list_head *prev,
87601+ struct list_head *next)
87602 {
87603- WARN(next->prev != prev,
87604- "list_add corruption. next->prev should be "
87605- "prev (%p), but was %p. (next=%p).\n",
87606- prev, next->prev, next);
87607- WARN(prev->next != next,
87608- "list_add corruption. prev->next should be "
87609- "next (%p), but was %p. (prev=%p).\n",
87610- next, prev->next, prev);
87611- WARN(new == prev || new == next,
87612- "list_add double add: new=%p, prev=%p, next=%p.\n",
87613- new, prev, next);
87614+ if (!__list_add_debug(new, prev, next))
87615+ return;
87616+
87617 next->prev = new;
87618 new->next = next;
87619 new->prev = prev;
87620@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
87621 }
87622 EXPORT_SYMBOL(__list_add);
87623
87624-void __list_del_entry(struct list_head *entry)
87625+static bool __list_del_entry_debug(struct list_head *entry)
87626 {
87627 struct list_head *prev, *next;
87628
87629 prev = entry->prev;
87630 next = entry->next;
87631
87632- if (WARN(next == LIST_POISON1,
87633- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
87634- entry, LIST_POISON1) ||
87635- WARN(prev == LIST_POISON2,
87636- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
87637- entry, LIST_POISON2) ||
87638- WARN(prev->next != entry,
87639- "list_del corruption. prev->next should be %p, "
87640- "but was %p\n", entry, prev->next) ||
87641- WARN(next->prev != entry,
87642- "list_del corruption. next->prev should be %p, "
87643- "but was %p\n", entry, next->prev))
87644+ if (unlikely(next == LIST_POISON1)) {
87645+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
87646+ entry, LIST_POISON1);
87647+ BUG();
87648+ return false;
87649+ }
87650+ if (unlikely(prev == LIST_POISON2)) {
87651+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
87652+ entry, LIST_POISON2);
87653+ BUG();
87654+ return false;
87655+ }
87656+ if (unlikely(entry->prev->next != entry)) {
87657+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
87658+ "but was %p\n", entry, prev->next);
87659+ BUG();
87660+ return false;
87661+ }
87662+ if (unlikely(entry->next->prev != entry)) {
87663+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
87664+ "but was %p\n", entry, next->prev);
87665+ BUG();
87666+ return false;
87667+ }
87668+ return true;
87669+}
87670+
87671+void __list_del_entry(struct list_head *entry)
87672+{
87673+ if (!__list_del_entry_debug(entry))
87674 return;
87675
87676- __list_del(prev, next);
87677+ __list_del(entry->prev, entry->next);
87678 }
87679 EXPORT_SYMBOL(__list_del_entry);
87680
87681@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
87682 void __list_add_rcu(struct list_head *new,
87683 struct list_head *prev, struct list_head *next)
87684 {
87685- WARN(next->prev != prev,
87686- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
87687- prev, next->prev, next);
87688- WARN(prev->next != next,
87689- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
87690- next, prev->next, prev);
87691+ if (!__list_add_debug(new, prev, next))
87692+ return;
87693+
87694 new->next = next;
87695 new->prev = prev;
87696 rcu_assign_pointer(list_next_rcu(prev), new);
87697 next->prev = new;
87698 }
87699 EXPORT_SYMBOL(__list_add_rcu);
87700+#endif
87701+
87702+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
87703+{
87704+#ifdef CONFIG_DEBUG_LIST
87705+ if (!__list_add_debug(new, prev, next))
87706+ return;
87707+#endif
87708+
87709+ pax_open_kernel();
87710+ next->prev = new;
87711+ new->next = next;
87712+ new->prev = prev;
87713+ prev->next = new;
87714+ pax_close_kernel();
87715+}
87716+EXPORT_SYMBOL(__pax_list_add);
87717+
87718+void pax_list_del(struct list_head *entry)
87719+{
87720+#ifdef CONFIG_DEBUG_LIST
87721+ if (!__list_del_entry_debug(entry))
87722+ return;
87723+#endif
87724+
87725+ pax_open_kernel();
87726+ __list_del(entry->prev, entry->next);
87727+ entry->next = LIST_POISON1;
87728+ entry->prev = LIST_POISON2;
87729+ pax_close_kernel();
87730+}
87731+EXPORT_SYMBOL(pax_list_del);
87732+
87733+void pax_list_del_init(struct list_head *entry)
87734+{
87735+ pax_open_kernel();
87736+ __list_del(entry->prev, entry->next);
87737+ INIT_LIST_HEAD(entry);
87738+ pax_close_kernel();
87739+}
87740+EXPORT_SYMBOL(pax_list_del_init);
87741+
87742+void __pax_list_add_rcu(struct list_head *new,
87743+ struct list_head *prev, struct list_head *next)
87744+{
87745+#ifdef CONFIG_DEBUG_LIST
87746+ if (!__list_add_debug(new, prev, next))
87747+ return;
87748+#endif
87749+
87750+ pax_open_kernel();
87751+ new->next = next;
87752+ new->prev = prev;
87753+ rcu_assign_pointer(list_next_rcu(prev), new);
87754+ next->prev = new;
87755+ pax_close_kernel();
87756+}
87757+EXPORT_SYMBOL(__pax_list_add_rcu);
87758+
87759+void pax_list_del_rcu(struct list_head *entry)
87760+{
87761+#ifdef CONFIG_DEBUG_LIST
87762+ if (!__list_del_entry_debug(entry))
87763+ return;
87764+#endif
87765+
87766+ pax_open_kernel();
87767+ __list_del(entry->prev, entry->next);
87768+ entry->next = LIST_POISON1;
87769+ entry->prev = LIST_POISON2;
87770+ pax_close_kernel();
87771+}
87772+EXPORT_SYMBOL(pax_list_del_rcu);
87773diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
87774index 1a53d49..ace934c 100644
87775--- a/lib/percpu-refcount.c
87776+++ b/lib/percpu-refcount.c
87777@@ -29,7 +29,7 @@
87778 * can't hit 0 before we've added up all the percpu refs.
87779 */
87780
87781-#define PCPU_COUNT_BIAS (1U << 31)
87782+#define PCPU_COUNT_BIAS (1U << 30)
87783
87784 /**
87785 * percpu_ref_init - initialize a percpu refcount
87786diff --git a/lib/radix-tree.c b/lib/radix-tree.c
87787index 7811ed3..f80ca19 100644
87788--- a/lib/radix-tree.c
87789+++ b/lib/radix-tree.c
87790@@ -93,7 +93,7 @@ struct radix_tree_preload {
87791 int nr;
87792 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
87793 };
87794-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
87795+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
87796
87797 static inline void *ptr_to_indirect(void *ptr)
87798 {
87799diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
87800index bb2b201..46abaf9 100644
87801--- a/lib/strncpy_from_user.c
87802+++ b/lib/strncpy_from_user.c
87803@@ -21,7 +21,7 @@
87804 */
87805 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
87806 {
87807- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87808+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87809 long res = 0;
87810
87811 /*
87812diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
87813index a28df52..3d55877 100644
87814--- a/lib/strnlen_user.c
87815+++ b/lib/strnlen_user.c
87816@@ -26,7 +26,7 @@
87817 */
87818 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
87819 {
87820- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87821+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87822 long align, res = 0;
87823 unsigned long c;
87824
87825diff --git a/lib/swiotlb.c b/lib/swiotlb.c
87826index 4e8686c..3e8c92f 100644
87827--- a/lib/swiotlb.c
87828+++ b/lib/swiotlb.c
87829@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
87830
87831 void
87832 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
87833- dma_addr_t dev_addr)
87834+ dma_addr_t dev_addr, struct dma_attrs *attrs)
87835 {
87836 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
87837
87838diff --git a/lib/usercopy.c b/lib/usercopy.c
87839index 4f5b1dd..7cab418 100644
87840--- a/lib/usercopy.c
87841+++ b/lib/usercopy.c
87842@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
87843 WARN(1, "Buffer overflow detected!\n");
87844 }
87845 EXPORT_SYMBOL(copy_from_user_overflow);
87846+
87847+void copy_to_user_overflow(void)
87848+{
87849+ WARN(1, "Buffer overflow detected!\n");
87850+}
87851+EXPORT_SYMBOL(copy_to_user_overflow);
87852diff --git a/lib/vsprintf.c b/lib/vsprintf.c
87853index d76555c..62d4bfe 100644
87854--- a/lib/vsprintf.c
87855+++ b/lib/vsprintf.c
87856@@ -16,6 +16,9 @@
87857 * - scnprintf and vscnprintf
87858 */
87859
87860+#ifdef CONFIG_GRKERNSEC_HIDESYM
87861+#define __INCLUDED_BY_HIDESYM 1
87862+#endif
87863 #include <stdarg.h>
87864 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
87865 #include <linux/types.h>
87866@@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
87867 return number(buf, end, *(const netdev_features_t *)addr, spec);
87868 }
87869
87870+#ifdef CONFIG_GRKERNSEC_HIDESYM
87871+int kptr_restrict __read_mostly = 2;
87872+#else
87873 int kptr_restrict __read_mostly;
87874+#endif
87875
87876 /*
87877 * Show a '%p' thing. A kernel extension is that the '%p' is followed
87878@@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
87879 * - 'f' For simple symbolic function names without offset
87880 * - 'S' For symbolic direct pointers with offset
87881 * - 's' For symbolic direct pointers without offset
87882+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
87883 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
87884 * - 'B' For backtraced symbolic direct pointers with offset
87885 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
87886@@ -1232,12 +1240,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87887
87888 if (!ptr && *fmt != 'K') {
87889 /*
87890- * Print (null) with the same width as a pointer so it makes
87891+ * Print (nil) with the same width as a pointer so it makes
87892 * tabular output look nice.
87893 */
87894 if (spec.field_width == -1)
87895 spec.field_width = default_width;
87896- return string(buf, end, "(null)", spec);
87897+ return string(buf, end, "(nil)", spec);
87898 }
87899
87900 switch (*fmt) {
87901@@ -1247,6 +1255,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87902 /* Fallthrough */
87903 case 'S':
87904 case 's':
87905+#ifdef CONFIG_GRKERNSEC_HIDESYM
87906+ break;
87907+#else
87908+ return symbol_string(buf, end, ptr, spec, fmt);
87909+#endif
87910+ case 'A':
87911 case 'B':
87912 return symbol_string(buf, end, ptr, spec, fmt);
87913 case 'R':
87914@@ -1302,6 +1316,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87915 va_end(va);
87916 return buf;
87917 }
87918+ case 'P':
87919+ break;
87920 case 'K':
87921 /*
87922 * %pK cannot be used in IRQ context because its test
87923@@ -1363,6 +1379,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87924 ((const struct file *)ptr)->f_path.dentry,
87925 spec, fmt);
87926 }
87927+
87928+#ifdef CONFIG_GRKERNSEC_HIDESYM
87929+ /* 'P' = approved pointers to copy to userland,
87930+ as in the /proc/kallsyms case, as we make it display nothing
87931+ for non-root users, and the real contents for root users
87932+ Also ignore 'K' pointers, since we force their NULLing for non-root users
87933+ above
87934+ */
87935+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
87936+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
87937+ dump_stack();
87938+ ptr = NULL;
87939+ }
87940+#endif
87941+
87942 spec.flags |= SMALL;
87943 if (spec.field_width == -1) {
87944 spec.field_width = default_width;
87945@@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
87946 typeof(type) value; \
87947 if (sizeof(type) == 8) { \
87948 args = PTR_ALIGN(args, sizeof(u32)); \
87949- *(u32 *)&value = *(u32 *)args; \
87950- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
87951+ *(u32 *)&value = *(const u32 *)args; \
87952+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
87953 } else { \
87954 args = PTR_ALIGN(args, sizeof(type)); \
87955- value = *(typeof(type) *)args; \
87956+ value = *(const typeof(type) *)args; \
87957 } \
87958 args += sizeof(type); \
87959 value; \
87960@@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
87961 case FORMAT_TYPE_STR: {
87962 const char *str_arg = args;
87963 args += strlen(str_arg) + 1;
87964- str = string(str, end, (char *)str_arg, spec);
87965+ str = string(str, end, str_arg, spec);
87966 break;
87967 }
87968
87969diff --git a/localversion-grsec b/localversion-grsec
87970new file mode 100644
87971index 0000000..7cd6065
87972--- /dev/null
87973+++ b/localversion-grsec
87974@@ -0,0 +1 @@
87975+-grsec
87976diff --git a/mm/Kconfig b/mm/Kconfig
87977index 394838f..0e5f816 100644
87978--- a/mm/Kconfig
87979+++ b/mm/Kconfig
87980@@ -317,10 +317,11 @@ config KSM
87981 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
87982
87983 config DEFAULT_MMAP_MIN_ADDR
87984- int "Low address space to protect from user allocation"
87985+ int "Low address space to protect from user allocation"
87986 depends on MMU
87987- default 4096
87988- help
87989+ default 32768 if ALPHA || ARM || PARISC || SPARC32
87990+ default 65536
87991+ help
87992 This is the portion of low virtual memory which should be protected
87993 from userspace allocation. Keeping a user from writing to low pages
87994 can help reduce the impact of kernel NULL pointer bugs.
87995@@ -351,7 +352,7 @@ config MEMORY_FAILURE
87996
87997 config HWPOISON_INJECT
87998 tristate "HWPoison pages injector"
87999- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
88000+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
88001 select PROC_PAGE_MONITOR
88002
88003 config NOMMU_INITIAL_TRIM_EXCESS
88004diff --git a/mm/backing-dev.c b/mm/backing-dev.c
88005index ce682f7..1fb54f9 100644
88006--- a/mm/backing-dev.c
88007+++ b/mm/backing-dev.c
88008@@ -12,7 +12,7 @@
88009 #include <linux/device.h>
88010 #include <trace/events/writeback.h>
88011
88012-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
88013+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
88014
88015 struct backing_dev_info default_backing_dev_info = {
88016 .name = "default",
88017@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
88018 return err;
88019
88020 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
88021- atomic_long_inc_return(&bdi_seq));
88022+ atomic_long_inc_return_unchecked(&bdi_seq));
88023 if (err) {
88024 bdi_destroy(bdi);
88025 return err;
88026diff --git a/mm/filemap.c b/mm/filemap.c
88027index ae4846f..b0acebe 100644
88028--- a/mm/filemap.c
88029+++ b/mm/filemap.c
88030@@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
88031 struct address_space *mapping = file->f_mapping;
88032
88033 if (!mapping->a_ops->readpage)
88034- return -ENOEXEC;
88035+ return -ENODEV;
88036 file_accessed(file);
88037 vma->vm_ops = &generic_file_vm_ops;
88038 return 0;
88039@@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
88040
88041 while (bytes) {
88042 char __user *buf = iov->iov_base + base;
88043- int copy = min(bytes, iov->iov_len - base);
88044+ size_t copy = min(bytes, iov->iov_len - base);
88045
88046 base = 0;
88047 left = __copy_from_user_inatomic(vaddr, buf, copy);
88048@@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
88049 BUG_ON(!in_atomic());
88050 kaddr = kmap_atomic(page);
88051 if (likely(i->nr_segs == 1)) {
88052- int left;
88053+ size_t left;
88054 char __user *buf = i->iov->iov_base + i->iov_offset;
88055 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
88056 copied = bytes - left;
88057@@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
88058
88059 kaddr = kmap(page);
88060 if (likely(i->nr_segs == 1)) {
88061- int left;
88062+ size_t left;
88063 char __user *buf = i->iov->iov_base + i->iov_offset;
88064 left = __copy_from_user(kaddr + offset, buf, bytes);
88065 copied = bytes - left;
88066@@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
88067 * zero-length segments (without overruning the iovec).
88068 */
88069 while (bytes || unlikely(i->count && !iov->iov_len)) {
88070- int copy;
88071+ size_t copy;
88072
88073 copy = min(bytes, iov->iov_len - base);
88074 BUG_ON(!i->count || i->count < copy);
88075@@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
88076 *pos = i_size_read(inode);
88077
88078 if (limit != RLIM_INFINITY) {
88079+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
88080 if (*pos >= limit) {
88081 send_sig(SIGXFSZ, current, 0);
88082 return -EFBIG;
88083diff --git a/mm/fremap.c b/mm/fremap.c
88084index 5bff081..bfa6e93 100644
88085--- a/mm/fremap.c
88086+++ b/mm/fremap.c
88087@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
88088 retry:
88089 vma = find_vma(mm, start);
88090
88091+#ifdef CONFIG_PAX_SEGMEXEC
88092+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
88093+ goto out;
88094+#endif
88095+
88096 /*
88097 * Make sure the vma is shared, that it supports prefaulting,
88098 * and that the remapped range is valid and fully within
88099@@ -208,9 +213,10 @@ get_write_lock:
88100 if (mapping_cap_account_dirty(mapping)) {
88101 unsigned long addr;
88102 struct file *file = get_file(vma->vm_file);
88103+ /* mmap_region may free vma; grab the info now */
88104+ vm_flags = ACCESS_ONCE(vma->vm_flags);
88105
88106- addr = mmap_region(file, start, size,
88107- vma->vm_flags, pgoff);
88108+ addr = mmap_region(file, start, size, vm_flags, pgoff);
88109 fput(file);
88110 if (IS_ERR_VALUE(addr)) {
88111 err = addr;
88112@@ -218,7 +224,7 @@ get_write_lock:
88113 BUG_ON(addr != start);
88114 err = 0;
88115 }
88116- goto out;
88117+ goto out_freed;
88118 }
88119 mutex_lock(&mapping->i_mmap_mutex);
88120 flush_dcache_mmap_lock(mapping);
88121@@ -253,6 +259,7 @@ get_write_lock:
88122 out:
88123 if (vma)
88124 vm_flags = vma->vm_flags;
88125+out_freed:
88126 if (likely(!has_write_lock))
88127 up_read(&mm->mmap_sem);
88128 else
88129diff --git a/mm/highmem.c b/mm/highmem.c
88130index b32b70c..e512eb0 100644
88131--- a/mm/highmem.c
88132+++ b/mm/highmem.c
88133@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
88134 * So no dangers, even with speculative execution.
88135 */
88136 page = pte_page(pkmap_page_table[i]);
88137+ pax_open_kernel();
88138 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
88139-
88140+ pax_close_kernel();
88141 set_page_address(page, NULL);
88142 need_flush = 1;
88143 }
88144@@ -198,9 +199,11 @@ start:
88145 }
88146 }
88147 vaddr = PKMAP_ADDR(last_pkmap_nr);
88148+
88149+ pax_open_kernel();
88150 set_pte_at(&init_mm, vaddr,
88151 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
88152-
88153+ pax_close_kernel();
88154 pkmap_count[last_pkmap_nr] = 1;
88155 set_page_address(page, (void *)vaddr);
88156
88157diff --git a/mm/hugetlb.c b/mm/hugetlb.c
88158index 0b7656e..d21cefc 100644
88159--- a/mm/hugetlb.c
88160+++ b/mm/hugetlb.c
88161@@ -2094,15 +2094,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
88162 struct hstate *h = &default_hstate;
88163 unsigned long tmp;
88164 int ret;
88165+ ctl_table_no_const hugetlb_table;
88166
88167 tmp = h->max_huge_pages;
88168
88169 if (write && h->order >= MAX_ORDER)
88170 return -EINVAL;
88171
88172- table->data = &tmp;
88173- table->maxlen = sizeof(unsigned long);
88174- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
88175+ hugetlb_table = *table;
88176+ hugetlb_table.data = &tmp;
88177+ hugetlb_table.maxlen = sizeof(unsigned long);
88178+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
88179 if (ret)
88180 goto out;
88181
88182@@ -2147,15 +2149,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
88183 struct hstate *h = &default_hstate;
88184 unsigned long tmp;
88185 int ret;
88186+ ctl_table_no_const hugetlb_table;
88187
88188 tmp = h->nr_overcommit_huge_pages;
88189
88190 if (write && h->order >= MAX_ORDER)
88191 return -EINVAL;
88192
88193- table->data = &tmp;
88194- table->maxlen = sizeof(unsigned long);
88195- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
88196+ hugetlb_table = *table;
88197+ hugetlb_table.data = &tmp;
88198+ hugetlb_table.maxlen = sizeof(unsigned long);
88199+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
88200 if (ret)
88201 goto out;
88202
88203@@ -2605,6 +2609,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
88204 return 1;
88205 }
88206
88207+#ifdef CONFIG_PAX_SEGMEXEC
88208+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
88209+{
88210+ struct mm_struct *mm = vma->vm_mm;
88211+ struct vm_area_struct *vma_m;
88212+ unsigned long address_m;
88213+ pte_t *ptep_m;
88214+
88215+ vma_m = pax_find_mirror_vma(vma);
88216+ if (!vma_m)
88217+ return;
88218+
88219+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88220+ address_m = address + SEGMEXEC_TASK_SIZE;
88221+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
88222+ get_page(page_m);
88223+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
88224+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
88225+}
88226+#endif
88227+
88228 /*
88229 * Hugetlb_cow() should be called with page lock of the original hugepage held.
88230 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
88231@@ -2721,6 +2746,11 @@ retry_avoidcopy:
88232 make_huge_pte(vma, new_page, 1));
88233 page_remove_rmap(old_page);
88234 hugepage_add_new_anon_rmap(new_page, vma, address);
88235+
88236+#ifdef CONFIG_PAX_SEGMEXEC
88237+ pax_mirror_huge_pte(vma, address, new_page);
88238+#endif
88239+
88240 /* Make the old page be freed below */
88241 new_page = old_page;
88242 }
88243@@ -2883,6 +2913,10 @@ retry:
88244 && (vma->vm_flags & VM_SHARED)));
88245 set_huge_pte_at(mm, address, ptep, new_pte);
88246
88247+#ifdef CONFIG_PAX_SEGMEXEC
88248+ pax_mirror_huge_pte(vma, address, page);
88249+#endif
88250+
88251 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
88252 /* Optimization, do the COW without a second fault */
88253 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
88254@@ -2912,6 +2946,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
88255 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
88256 struct hstate *h = hstate_vma(vma);
88257
88258+#ifdef CONFIG_PAX_SEGMEXEC
88259+ struct vm_area_struct *vma_m;
88260+#endif
88261+
88262 address &= huge_page_mask(h);
88263
88264 ptep = huge_pte_offset(mm, address);
88265@@ -2925,6 +2963,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
88266 VM_FAULT_SET_HINDEX(hstate_index(h));
88267 }
88268
88269+#ifdef CONFIG_PAX_SEGMEXEC
88270+ vma_m = pax_find_mirror_vma(vma);
88271+ if (vma_m) {
88272+ unsigned long address_m;
88273+
88274+ if (vma->vm_start > vma_m->vm_start) {
88275+ address_m = address;
88276+ address -= SEGMEXEC_TASK_SIZE;
88277+ vma = vma_m;
88278+ h = hstate_vma(vma);
88279+ } else
88280+ address_m = address + SEGMEXEC_TASK_SIZE;
88281+
88282+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
88283+ return VM_FAULT_OOM;
88284+ address_m &= HPAGE_MASK;
88285+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
88286+ }
88287+#endif
88288+
88289 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
88290 if (!ptep)
88291 return VM_FAULT_OOM;
88292diff --git a/mm/internal.h b/mm/internal.h
88293index 684f7aa..9eb9edc 100644
88294--- a/mm/internal.h
88295+++ b/mm/internal.h
88296@@ -97,6 +97,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
88297 * in mm/page_alloc.c
88298 */
88299 extern void __free_pages_bootmem(struct page *page, unsigned int order);
88300+extern void free_compound_page(struct page *page);
88301 extern void prep_compound_page(struct page *page, unsigned long order);
88302 #ifdef CONFIG_MEMORY_FAILURE
88303 extern bool is_free_buddy_page(struct page *page);
88304@@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable;
88305
88306 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
88307 unsigned long, unsigned long,
88308- unsigned long, unsigned long);
88309+ unsigned long, unsigned long) __intentional_overflow(-1);
88310
88311 extern void set_pageblock_order(void);
88312 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
88313diff --git a/mm/kmemleak.c b/mm/kmemleak.c
88314index e126b0e..e986018 100644
88315--- a/mm/kmemleak.c
88316+++ b/mm/kmemleak.c
88317@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
88318
88319 for (i = 0; i < object->trace_len; i++) {
88320 void *ptr = (void *)object->trace[i];
88321- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
88322+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
88323 }
88324 }
88325
88326@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
88327 return -ENOMEM;
88328 }
88329
88330- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
88331+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
88332 &kmemleak_fops);
88333 if (!dentry)
88334 pr_warning("Failed to create the debugfs kmemleak file\n");
88335diff --git a/mm/maccess.c b/mm/maccess.c
88336index d53adf9..03a24bf 100644
88337--- a/mm/maccess.c
88338+++ b/mm/maccess.c
88339@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
88340 set_fs(KERNEL_DS);
88341 pagefault_disable();
88342 ret = __copy_from_user_inatomic(dst,
88343- (__force const void __user *)src, size);
88344+ (const void __force_user *)src, size);
88345 pagefault_enable();
88346 set_fs(old_fs);
88347
88348@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
88349
88350 set_fs(KERNEL_DS);
88351 pagefault_disable();
88352- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
88353+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
88354 pagefault_enable();
88355 set_fs(old_fs);
88356
88357diff --git a/mm/madvise.c b/mm/madvise.c
88358index 539eeb9..e24a987 100644
88359--- a/mm/madvise.c
88360+++ b/mm/madvise.c
88361@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
88362 pgoff_t pgoff;
88363 unsigned long new_flags = vma->vm_flags;
88364
88365+#ifdef CONFIG_PAX_SEGMEXEC
88366+ struct vm_area_struct *vma_m;
88367+#endif
88368+
88369 switch (behavior) {
88370 case MADV_NORMAL:
88371 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
88372@@ -126,6 +130,13 @@ success:
88373 /*
88374 * vm_flags is protected by the mmap_sem held in write mode.
88375 */
88376+
88377+#ifdef CONFIG_PAX_SEGMEXEC
88378+ vma_m = pax_find_mirror_vma(vma);
88379+ if (vma_m)
88380+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
88381+#endif
88382+
88383 vma->vm_flags = new_flags;
88384
88385 out:
88386@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
88387 struct vm_area_struct **prev,
88388 unsigned long start, unsigned long end)
88389 {
88390+
88391+#ifdef CONFIG_PAX_SEGMEXEC
88392+ struct vm_area_struct *vma_m;
88393+#endif
88394+
88395 *prev = vma;
88396 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
88397 return -EINVAL;
88398@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
88399 zap_page_range(vma, start, end - start, &details);
88400 } else
88401 zap_page_range(vma, start, end - start, NULL);
88402+
88403+#ifdef CONFIG_PAX_SEGMEXEC
88404+ vma_m = pax_find_mirror_vma(vma);
88405+ if (vma_m) {
88406+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
88407+ struct zap_details details = {
88408+ .nonlinear_vma = vma_m,
88409+ .last_index = ULONG_MAX,
88410+ };
88411+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
88412+ } else
88413+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
88414+ }
88415+#endif
88416+
88417 return 0;
88418 }
88419
88420@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
88421 if (end < start)
88422 return error;
88423
88424+#ifdef CONFIG_PAX_SEGMEXEC
88425+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
88426+ if (end > SEGMEXEC_TASK_SIZE)
88427+ return error;
88428+ } else
88429+#endif
88430+
88431+ if (end > TASK_SIZE)
88432+ return error;
88433+
88434 error = 0;
88435 if (end == start)
88436 return error;
88437diff --git a/mm/memory-failure.c b/mm/memory-failure.c
88438index bf3351b..aea800d 100644
88439--- a/mm/memory-failure.c
88440+++ b/mm/memory-failure.c
88441@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
88442
88443 int sysctl_memory_failure_recovery __read_mostly = 1;
88444
88445-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
88446+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
88447
88448 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
88449
88450@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
88451 pfn, t->comm, t->pid);
88452 si.si_signo = SIGBUS;
88453 si.si_errno = 0;
88454- si.si_addr = (void *)addr;
88455+ si.si_addr = (void __user *)addr;
88456 #ifdef __ARCH_SI_TRAPNO
88457 si.si_trapno = trapno;
88458 #endif
88459@@ -762,7 +762,7 @@ static struct page_state {
88460 unsigned long res;
88461 char *msg;
88462 int (*action)(struct page *p, unsigned long pfn);
88463-} error_states[] = {
88464+} __do_const error_states[] = {
88465 { reserved, reserved, "reserved kernel", me_kernel },
88466 /*
88467 * free pages are specially detected outside this table:
88468@@ -1053,7 +1053,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
88469 nr_pages = 1 << compound_order(hpage);
88470 else /* normal page or thp */
88471 nr_pages = 1;
88472- atomic_long_add(nr_pages, &num_poisoned_pages);
88473+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
88474
88475 /*
88476 * We need/can do nothing about count=0 pages.
88477@@ -1083,7 +1083,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
88478 if (!PageHWPoison(hpage)
88479 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
88480 || (p != hpage && TestSetPageHWPoison(hpage))) {
88481- atomic_long_sub(nr_pages, &num_poisoned_pages);
88482+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
88483 return 0;
88484 }
88485 set_page_hwpoison_huge_page(hpage);
88486@@ -1152,7 +1152,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
88487 }
88488 if (hwpoison_filter(p)) {
88489 if (TestClearPageHWPoison(p))
88490- atomic_long_sub(nr_pages, &num_poisoned_pages);
88491+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
88492 unlock_page(hpage);
88493 put_page(hpage);
88494 return 0;
88495@@ -1370,7 +1370,7 @@ int unpoison_memory(unsigned long pfn)
88496 return 0;
88497 }
88498 if (TestClearPageHWPoison(p))
88499- atomic_long_dec(&num_poisoned_pages);
88500+ atomic_long_dec_unchecked(&num_poisoned_pages);
88501 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
88502 return 0;
88503 }
88504@@ -1384,7 +1384,7 @@ int unpoison_memory(unsigned long pfn)
88505 */
88506 if (TestClearPageHWPoison(page)) {
88507 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
88508- atomic_long_sub(nr_pages, &num_poisoned_pages);
88509+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
88510 freeit = 1;
88511 if (PageHuge(page))
88512 clear_page_hwpoison_huge_page(page);
88513@@ -1521,7 +1521,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
88514 } else {
88515 set_page_hwpoison_huge_page(hpage);
88516 dequeue_hwpoisoned_huge_page(hpage);
88517- atomic_long_add(1 << compound_order(hpage),
88518+ atomic_long_add_unchecked(1 << compound_order(hpage),
88519 &num_poisoned_pages);
88520 }
88521 return ret;
88522@@ -1560,7 +1560,7 @@ static int __soft_offline_page(struct page *page, int flags)
88523 put_page(page);
88524 pr_info("soft_offline: %#lx: invalidated\n", pfn);
88525 SetPageHWPoison(page);
88526- atomic_long_inc(&num_poisoned_pages);
88527+ atomic_long_inc_unchecked(&num_poisoned_pages);
88528 return 0;
88529 }
88530
88531@@ -1605,7 +1605,7 @@ static int __soft_offline_page(struct page *page, int flags)
88532 if (!is_free_buddy_page(page))
88533 pr_info("soft offline: %#lx: page leaked\n",
88534 pfn);
88535- atomic_long_inc(&num_poisoned_pages);
88536+ atomic_long_inc_unchecked(&num_poisoned_pages);
88537 }
88538 } else {
88539 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
88540@@ -1666,11 +1666,11 @@ int soft_offline_page(struct page *page, int flags)
88541 if (PageHuge(page)) {
88542 set_page_hwpoison_huge_page(hpage);
88543 dequeue_hwpoisoned_huge_page(hpage);
88544- atomic_long_add(1 << compound_order(hpage),
88545+ atomic_long_add_unchecked(1 << compound_order(hpage),
88546 &num_poisoned_pages);
88547 } else {
88548 SetPageHWPoison(page);
88549- atomic_long_inc(&num_poisoned_pages);
88550+ atomic_long_inc_unchecked(&num_poisoned_pages);
88551 }
88552 }
88553 unset:
88554diff --git a/mm/memory.c b/mm/memory.c
88555index d176154..cd1b387 100644
88556--- a/mm/memory.c
88557+++ b/mm/memory.c
88558@@ -402,6 +402,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
88559 free_pte_range(tlb, pmd, addr);
88560 } while (pmd++, addr = next, addr != end);
88561
88562+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
88563 start &= PUD_MASK;
88564 if (start < floor)
88565 return;
88566@@ -416,6 +417,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
88567 pmd = pmd_offset(pud, start);
88568 pud_clear(pud);
88569 pmd_free_tlb(tlb, pmd, start);
88570+#endif
88571+
88572 }
88573
88574 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
88575@@ -435,6 +438,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
88576 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
88577 } while (pud++, addr = next, addr != end);
88578
88579+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
88580 start &= PGDIR_MASK;
88581 if (start < floor)
88582 return;
88583@@ -449,6 +453,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
88584 pud = pud_offset(pgd, start);
88585 pgd_clear(pgd);
88586 pud_free_tlb(tlb, pud, start);
88587+#endif
88588+
88589 }
88590
88591 /*
88592@@ -1636,12 +1642,6 @@ no_page_table:
88593 return page;
88594 }
88595
88596-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
88597-{
88598- return stack_guard_page_start(vma, addr) ||
88599- stack_guard_page_end(vma, addr+PAGE_SIZE);
88600-}
88601-
88602 /**
88603 * __get_user_pages() - pin user pages in memory
88604 * @tsk: task_struct of target task
88605@@ -1728,10 +1728,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
88606
88607 i = 0;
88608
88609- do {
88610+ while (nr_pages) {
88611 struct vm_area_struct *vma;
88612
88613- vma = find_extend_vma(mm, start);
88614+ vma = find_vma(mm, start);
88615 if (!vma && in_gate_area(mm, start)) {
88616 unsigned long pg = start & PAGE_MASK;
88617 pgd_t *pgd;
88618@@ -1780,7 +1780,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
88619 goto next_page;
88620 }
88621
88622- if (!vma ||
88623+ if (!vma || start < vma->vm_start ||
88624 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
88625 !(vm_flags & vma->vm_flags))
88626 return i ? : -EFAULT;
88627@@ -1809,11 +1809,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
88628 int ret;
88629 unsigned int fault_flags = 0;
88630
88631- /* For mlock, just skip the stack guard page. */
88632- if (foll_flags & FOLL_MLOCK) {
88633- if (stack_guard_page(vma, start))
88634- goto next_page;
88635- }
88636 if (foll_flags & FOLL_WRITE)
88637 fault_flags |= FAULT_FLAG_WRITE;
88638 if (nonblocking)
88639@@ -1893,7 +1888,7 @@ next_page:
88640 start += page_increm * PAGE_SIZE;
88641 nr_pages -= page_increm;
88642 } while (nr_pages && start < vma->vm_end);
88643- } while (nr_pages);
88644+ }
88645 return i;
88646 }
88647 EXPORT_SYMBOL(__get_user_pages);
88648@@ -2100,6 +2095,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
88649 page_add_file_rmap(page);
88650 set_pte_at(mm, addr, pte, mk_pte(page, prot));
88651
88652+#ifdef CONFIG_PAX_SEGMEXEC
88653+ pax_mirror_file_pte(vma, addr, page, ptl);
88654+#endif
88655+
88656 retval = 0;
88657 pte_unmap_unlock(pte, ptl);
88658 return retval;
88659@@ -2144,9 +2143,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
88660 if (!page_count(page))
88661 return -EINVAL;
88662 if (!(vma->vm_flags & VM_MIXEDMAP)) {
88663+
88664+#ifdef CONFIG_PAX_SEGMEXEC
88665+ struct vm_area_struct *vma_m;
88666+#endif
88667+
88668 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
88669 BUG_ON(vma->vm_flags & VM_PFNMAP);
88670 vma->vm_flags |= VM_MIXEDMAP;
88671+
88672+#ifdef CONFIG_PAX_SEGMEXEC
88673+ vma_m = pax_find_mirror_vma(vma);
88674+ if (vma_m)
88675+ vma_m->vm_flags |= VM_MIXEDMAP;
88676+#endif
88677+
88678 }
88679 return insert_page(vma, addr, page, vma->vm_page_prot);
88680 }
88681@@ -2229,6 +2240,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
88682 unsigned long pfn)
88683 {
88684 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
88685+ BUG_ON(vma->vm_mirror);
88686
88687 if (addr < vma->vm_start || addr >= vma->vm_end)
88688 return -EFAULT;
88689@@ -2476,7 +2488,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
88690
88691 BUG_ON(pud_huge(*pud));
88692
88693- pmd = pmd_alloc(mm, pud, addr);
88694+ pmd = (mm == &init_mm) ?
88695+ pmd_alloc_kernel(mm, pud, addr) :
88696+ pmd_alloc(mm, pud, addr);
88697 if (!pmd)
88698 return -ENOMEM;
88699 do {
88700@@ -2496,7 +2510,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
88701 unsigned long next;
88702 int err;
88703
88704- pud = pud_alloc(mm, pgd, addr);
88705+ pud = (mm == &init_mm) ?
88706+ pud_alloc_kernel(mm, pgd, addr) :
88707+ pud_alloc(mm, pgd, addr);
88708 if (!pud)
88709 return -ENOMEM;
88710 do {
88711@@ -2584,6 +2600,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
88712 copy_user_highpage(dst, src, va, vma);
88713 }
88714
88715+#ifdef CONFIG_PAX_SEGMEXEC
88716+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
88717+{
88718+ struct mm_struct *mm = vma->vm_mm;
88719+ spinlock_t *ptl;
88720+ pte_t *pte, entry;
88721+
88722+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
88723+ entry = *pte;
88724+ if (!pte_present(entry)) {
88725+ if (!pte_none(entry)) {
88726+ BUG_ON(pte_file(entry));
88727+ free_swap_and_cache(pte_to_swp_entry(entry));
88728+ pte_clear_not_present_full(mm, address, pte, 0);
88729+ }
88730+ } else {
88731+ struct page *page;
88732+
88733+ flush_cache_page(vma, address, pte_pfn(entry));
88734+ entry = ptep_clear_flush(vma, address, pte);
88735+ BUG_ON(pte_dirty(entry));
88736+ page = vm_normal_page(vma, address, entry);
88737+ if (page) {
88738+ update_hiwater_rss(mm);
88739+ if (PageAnon(page))
88740+ dec_mm_counter_fast(mm, MM_ANONPAGES);
88741+ else
88742+ dec_mm_counter_fast(mm, MM_FILEPAGES);
88743+ page_remove_rmap(page);
88744+ page_cache_release(page);
88745+ }
88746+ }
88747+ pte_unmap_unlock(pte, ptl);
88748+}
88749+
88750+/* PaX: if vma is mirrored, synchronize the mirror's PTE
88751+ *
88752+ * the ptl of the lower mapped page is held on entry and is not released on exit
88753+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
88754+ */
88755+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
88756+{
88757+ struct mm_struct *mm = vma->vm_mm;
88758+ unsigned long address_m;
88759+ spinlock_t *ptl_m;
88760+ struct vm_area_struct *vma_m;
88761+ pmd_t *pmd_m;
88762+ pte_t *pte_m, entry_m;
88763+
88764+ BUG_ON(!page_m || !PageAnon(page_m));
88765+
88766+ vma_m = pax_find_mirror_vma(vma);
88767+ if (!vma_m)
88768+ return;
88769+
88770+ BUG_ON(!PageLocked(page_m));
88771+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88772+ address_m = address + SEGMEXEC_TASK_SIZE;
88773+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
88774+ pte_m = pte_offset_map(pmd_m, address_m);
88775+ ptl_m = pte_lockptr(mm, pmd_m);
88776+ if (ptl != ptl_m) {
88777+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
88778+ if (!pte_none(*pte_m))
88779+ goto out;
88780+ }
88781+
88782+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
88783+ page_cache_get(page_m);
88784+ page_add_anon_rmap(page_m, vma_m, address_m);
88785+ inc_mm_counter_fast(mm, MM_ANONPAGES);
88786+ set_pte_at(mm, address_m, pte_m, entry_m);
88787+ update_mmu_cache(vma_m, address_m, pte_m);
88788+out:
88789+ if (ptl != ptl_m)
88790+ spin_unlock(ptl_m);
88791+ pte_unmap(pte_m);
88792+ unlock_page(page_m);
88793+}
88794+
88795+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
88796+{
88797+ struct mm_struct *mm = vma->vm_mm;
88798+ unsigned long address_m;
88799+ spinlock_t *ptl_m;
88800+ struct vm_area_struct *vma_m;
88801+ pmd_t *pmd_m;
88802+ pte_t *pte_m, entry_m;
88803+
88804+ BUG_ON(!page_m || PageAnon(page_m));
88805+
88806+ vma_m = pax_find_mirror_vma(vma);
88807+ if (!vma_m)
88808+ return;
88809+
88810+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88811+ address_m = address + SEGMEXEC_TASK_SIZE;
88812+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
88813+ pte_m = pte_offset_map(pmd_m, address_m);
88814+ ptl_m = pte_lockptr(mm, pmd_m);
88815+ if (ptl != ptl_m) {
88816+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
88817+ if (!pte_none(*pte_m))
88818+ goto out;
88819+ }
88820+
88821+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
88822+ page_cache_get(page_m);
88823+ page_add_file_rmap(page_m);
88824+ inc_mm_counter_fast(mm, MM_FILEPAGES);
88825+ set_pte_at(mm, address_m, pte_m, entry_m);
88826+ update_mmu_cache(vma_m, address_m, pte_m);
88827+out:
88828+ if (ptl != ptl_m)
88829+ spin_unlock(ptl_m);
88830+ pte_unmap(pte_m);
88831+}
88832+
88833+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
88834+{
88835+ struct mm_struct *mm = vma->vm_mm;
88836+ unsigned long address_m;
88837+ spinlock_t *ptl_m;
88838+ struct vm_area_struct *vma_m;
88839+ pmd_t *pmd_m;
88840+ pte_t *pte_m, entry_m;
88841+
88842+ vma_m = pax_find_mirror_vma(vma);
88843+ if (!vma_m)
88844+ return;
88845+
88846+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88847+ address_m = address + SEGMEXEC_TASK_SIZE;
88848+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
88849+ pte_m = pte_offset_map(pmd_m, address_m);
88850+ ptl_m = pte_lockptr(mm, pmd_m);
88851+ if (ptl != ptl_m) {
88852+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
88853+ if (!pte_none(*pte_m))
88854+ goto out;
88855+ }
88856+
88857+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
88858+ set_pte_at(mm, address_m, pte_m, entry_m);
88859+out:
88860+ if (ptl != ptl_m)
88861+ spin_unlock(ptl_m);
88862+ pte_unmap(pte_m);
88863+}
88864+
88865+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
88866+{
88867+ struct page *page_m;
88868+ pte_t entry;
88869+
88870+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
88871+ goto out;
88872+
88873+ entry = *pte;
88874+ page_m = vm_normal_page(vma, address, entry);
88875+ if (!page_m)
88876+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
88877+ else if (PageAnon(page_m)) {
88878+ if (pax_find_mirror_vma(vma)) {
88879+ pte_unmap_unlock(pte, ptl);
88880+ lock_page(page_m);
88881+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
88882+ if (pte_same(entry, *pte))
88883+ pax_mirror_anon_pte(vma, address, page_m, ptl);
88884+ else
88885+ unlock_page(page_m);
88886+ }
88887+ } else
88888+ pax_mirror_file_pte(vma, address, page_m, ptl);
88889+
88890+out:
88891+ pte_unmap_unlock(pte, ptl);
88892+}
88893+#endif
88894+
88895 /*
88896 * This routine handles present pages, when users try to write
88897 * to a shared page. It is done by copying the page to a new address
88898@@ -2800,6 +2996,12 @@ gotten:
88899 */
88900 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
88901 if (likely(pte_same(*page_table, orig_pte))) {
88902+
88903+#ifdef CONFIG_PAX_SEGMEXEC
88904+ if (pax_find_mirror_vma(vma))
88905+ BUG_ON(!trylock_page(new_page));
88906+#endif
88907+
88908 if (old_page) {
88909 if (!PageAnon(old_page)) {
88910 dec_mm_counter_fast(mm, MM_FILEPAGES);
88911@@ -2851,6 +3053,10 @@ gotten:
88912 page_remove_rmap(old_page);
88913 }
88914
88915+#ifdef CONFIG_PAX_SEGMEXEC
88916+ pax_mirror_anon_pte(vma, address, new_page, ptl);
88917+#endif
88918+
88919 /* Free the old page.. */
88920 new_page = old_page;
88921 ret |= VM_FAULT_WRITE;
88922@@ -3128,6 +3334,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
88923 swap_free(entry);
88924 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
88925 try_to_free_swap(page);
88926+
88927+#ifdef CONFIG_PAX_SEGMEXEC
88928+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
88929+#endif
88930+
88931 unlock_page(page);
88932 if (page != swapcache) {
88933 /*
88934@@ -3151,6 +3362,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
88935
88936 /* No need to invalidate - it was non-present before */
88937 update_mmu_cache(vma, address, page_table);
88938+
88939+#ifdef CONFIG_PAX_SEGMEXEC
88940+ pax_mirror_anon_pte(vma, address, page, ptl);
88941+#endif
88942+
88943 unlock:
88944 pte_unmap_unlock(page_table, ptl);
88945 out:
88946@@ -3170,40 +3386,6 @@ out_release:
88947 }
88948
88949 /*
88950- * This is like a special single-page "expand_{down|up}wards()",
88951- * except we must first make sure that 'address{-|+}PAGE_SIZE'
88952- * doesn't hit another vma.
88953- */
88954-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
88955-{
88956- address &= PAGE_MASK;
88957- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
88958- struct vm_area_struct *prev = vma->vm_prev;
88959-
88960- /*
88961- * Is there a mapping abutting this one below?
88962- *
88963- * That's only ok if it's the same stack mapping
88964- * that has gotten split..
88965- */
88966- if (prev && prev->vm_end == address)
88967- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
88968-
88969- expand_downwards(vma, address - PAGE_SIZE);
88970- }
88971- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
88972- struct vm_area_struct *next = vma->vm_next;
88973-
88974- /* As VM_GROWSDOWN but s/below/above/ */
88975- if (next && next->vm_start == address + PAGE_SIZE)
88976- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
88977-
88978- expand_upwards(vma, address + PAGE_SIZE);
88979- }
88980- return 0;
88981-}
88982-
88983-/*
88984 * We enter with non-exclusive mmap_sem (to exclude vma changes,
88985 * but allow concurrent faults), and pte mapped but not yet locked.
88986 * We return with mmap_sem still held, but pte unmapped and unlocked.
88987@@ -3212,27 +3394,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
88988 unsigned long address, pte_t *page_table, pmd_t *pmd,
88989 unsigned int flags)
88990 {
88991- struct page *page;
88992+ struct page *page = NULL;
88993 spinlock_t *ptl;
88994 pte_t entry;
88995
88996- pte_unmap(page_table);
88997-
88998- /* Check if we need to add a guard page to the stack */
88999- if (check_stack_guard_page(vma, address) < 0)
89000- return VM_FAULT_SIGBUS;
89001-
89002- /* Use the zero-page for reads */
89003 if (!(flags & FAULT_FLAG_WRITE)) {
89004 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
89005 vma->vm_page_prot));
89006- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
89007+ ptl = pte_lockptr(mm, pmd);
89008+ spin_lock(ptl);
89009 if (!pte_none(*page_table))
89010 goto unlock;
89011 goto setpte;
89012 }
89013
89014 /* Allocate our own private page. */
89015+ pte_unmap(page_table);
89016+
89017 if (unlikely(anon_vma_prepare(vma)))
89018 goto oom;
89019 page = alloc_zeroed_user_highpage_movable(vma, address);
89020@@ -3256,6 +3434,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
89021 if (!pte_none(*page_table))
89022 goto release;
89023
89024+#ifdef CONFIG_PAX_SEGMEXEC
89025+ if (pax_find_mirror_vma(vma))
89026+ BUG_ON(!trylock_page(page));
89027+#endif
89028+
89029 inc_mm_counter_fast(mm, MM_ANONPAGES);
89030 page_add_new_anon_rmap(page, vma, address);
89031 setpte:
89032@@ -3263,6 +3446,12 @@ setpte:
89033
89034 /* No need to invalidate - it was non-present before */
89035 update_mmu_cache(vma, address, page_table);
89036+
89037+#ifdef CONFIG_PAX_SEGMEXEC
89038+ if (page)
89039+ pax_mirror_anon_pte(vma, address, page, ptl);
89040+#endif
89041+
89042 unlock:
89043 pte_unmap_unlock(page_table, ptl);
89044 return 0;
89045@@ -3406,6 +3595,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
89046 */
89047 /* Only go through if we didn't race with anybody else... */
89048 if (likely(pte_same(*page_table, orig_pte))) {
89049+
89050+#ifdef CONFIG_PAX_SEGMEXEC
89051+ if (anon && pax_find_mirror_vma(vma))
89052+ BUG_ON(!trylock_page(page));
89053+#endif
89054+
89055 flush_icache_page(vma, page);
89056 entry = mk_pte(page, vma->vm_page_prot);
89057 if (flags & FAULT_FLAG_WRITE)
89058@@ -3427,6 +3622,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
89059
89060 /* no need to invalidate: a not-present page won't be cached */
89061 update_mmu_cache(vma, address, page_table);
89062+
89063+#ifdef CONFIG_PAX_SEGMEXEC
89064+ if (anon)
89065+ pax_mirror_anon_pte(vma, address, page, ptl);
89066+ else
89067+ pax_mirror_file_pte(vma, address, page, ptl);
89068+#endif
89069+
89070 } else {
89071 if (cow_page)
89072 mem_cgroup_uncharge_page(cow_page);
89073@@ -3737,6 +3940,12 @@ static int handle_pte_fault(struct mm_struct *mm,
89074 if (flags & FAULT_FLAG_WRITE)
89075 flush_tlb_fix_spurious_fault(vma, address);
89076 }
89077+
89078+#ifdef CONFIG_PAX_SEGMEXEC
89079+ pax_mirror_pte(vma, address, pte, pmd, ptl);
89080+ return 0;
89081+#endif
89082+
89083 unlock:
89084 pte_unmap_unlock(pte, ptl);
89085 return 0;
89086@@ -3753,9 +3962,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
89087 pmd_t *pmd;
89088 pte_t *pte;
89089
89090+#ifdef CONFIG_PAX_SEGMEXEC
89091+ struct vm_area_struct *vma_m;
89092+#endif
89093+
89094 if (unlikely(is_vm_hugetlb_page(vma)))
89095 return hugetlb_fault(mm, vma, address, flags);
89096
89097+#ifdef CONFIG_PAX_SEGMEXEC
89098+ vma_m = pax_find_mirror_vma(vma);
89099+ if (vma_m) {
89100+ unsigned long address_m;
89101+ pgd_t *pgd_m;
89102+ pud_t *pud_m;
89103+ pmd_t *pmd_m;
89104+
89105+ if (vma->vm_start > vma_m->vm_start) {
89106+ address_m = address;
89107+ address -= SEGMEXEC_TASK_SIZE;
89108+ vma = vma_m;
89109+ } else
89110+ address_m = address + SEGMEXEC_TASK_SIZE;
89111+
89112+ pgd_m = pgd_offset(mm, address_m);
89113+ pud_m = pud_alloc(mm, pgd_m, address_m);
89114+ if (!pud_m)
89115+ return VM_FAULT_OOM;
89116+ pmd_m = pmd_alloc(mm, pud_m, address_m);
89117+ if (!pmd_m)
89118+ return VM_FAULT_OOM;
89119+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
89120+ return VM_FAULT_OOM;
89121+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
89122+ }
89123+#endif
89124+
89125 retry:
89126 pgd = pgd_offset(mm, address);
89127 pud = pud_alloc(mm, pgd, address);
89128@@ -3894,6 +4135,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
89129 spin_unlock(&mm->page_table_lock);
89130 return 0;
89131 }
89132+
89133+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
89134+{
89135+ pud_t *new = pud_alloc_one(mm, address);
89136+ if (!new)
89137+ return -ENOMEM;
89138+
89139+ smp_wmb(); /* See comment in __pte_alloc */
89140+
89141+ spin_lock(&mm->page_table_lock);
89142+ if (pgd_present(*pgd)) /* Another has populated it */
89143+ pud_free(mm, new);
89144+ else
89145+ pgd_populate_kernel(mm, pgd, new);
89146+ spin_unlock(&mm->page_table_lock);
89147+ return 0;
89148+}
89149 #endif /* __PAGETABLE_PUD_FOLDED */
89150
89151 #ifndef __PAGETABLE_PMD_FOLDED
89152@@ -3924,6 +4182,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
89153 spin_unlock(&mm->page_table_lock);
89154 return 0;
89155 }
89156+
89157+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
89158+{
89159+ pmd_t *new = pmd_alloc_one(mm, address);
89160+ if (!new)
89161+ return -ENOMEM;
89162+
89163+ smp_wmb(); /* See comment in __pte_alloc */
89164+
89165+ spin_lock(&mm->page_table_lock);
89166+#ifndef __ARCH_HAS_4LEVEL_HACK
89167+ if (pud_present(*pud)) /* Another has populated it */
89168+ pmd_free(mm, new);
89169+ else
89170+ pud_populate_kernel(mm, pud, new);
89171+#else
89172+ if (pgd_present(*pud)) /* Another has populated it */
89173+ pmd_free(mm, new);
89174+ else
89175+ pgd_populate_kernel(mm, pud, new);
89176+#endif /* __ARCH_HAS_4LEVEL_HACK */
89177+ spin_unlock(&mm->page_table_lock);
89178+ return 0;
89179+}
89180 #endif /* __PAGETABLE_PMD_FOLDED */
89181
89182 #if !defined(__HAVE_ARCH_GATE_AREA)
89183@@ -3937,7 +4219,7 @@ static int __init gate_vma_init(void)
89184 gate_vma.vm_start = FIXADDR_USER_START;
89185 gate_vma.vm_end = FIXADDR_USER_END;
89186 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
89187- gate_vma.vm_page_prot = __P101;
89188+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
89189
89190 return 0;
89191 }
89192@@ -4071,8 +4353,8 @@ out:
89193 return ret;
89194 }
89195
89196-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
89197- void *buf, int len, int write)
89198+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
89199+ void *buf, size_t len, int write)
89200 {
89201 resource_size_t phys_addr;
89202 unsigned long prot = 0;
89203@@ -4098,8 +4380,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
89204 * Access another process' address space as given in mm. If non-NULL, use the
89205 * given task for page fault accounting.
89206 */
89207-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89208- unsigned long addr, void *buf, int len, int write)
89209+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89210+ unsigned long addr, void *buf, size_t len, int write)
89211 {
89212 struct vm_area_struct *vma;
89213 void *old_buf = buf;
89214@@ -4107,7 +4389,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89215 down_read(&mm->mmap_sem);
89216 /* ignore errors, just check how much was successfully transferred */
89217 while (len) {
89218- int bytes, ret, offset;
89219+ ssize_t bytes, ret, offset;
89220 void *maddr;
89221 struct page *page = NULL;
89222
89223@@ -4166,8 +4448,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89224 *
89225 * The caller must hold a reference on @mm.
89226 */
89227-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
89228- void *buf, int len, int write)
89229+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
89230+ void *buf, size_t len, int write)
89231 {
89232 return __access_remote_vm(NULL, mm, addr, buf, len, write);
89233 }
89234@@ -4177,11 +4459,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
89235 * Source/target buffer must be kernel space,
89236 * Do not walk the page table directly, use get_user_pages
89237 */
89238-int access_process_vm(struct task_struct *tsk, unsigned long addr,
89239- void *buf, int len, int write)
89240+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
89241+ void *buf, size_t len, int write)
89242 {
89243 struct mm_struct *mm;
89244- int ret;
89245+ ssize_t ret;
89246
89247 mm = get_task_mm(tsk);
89248 if (!mm)
89249diff --git a/mm/mempolicy.c b/mm/mempolicy.c
89250index 0472964..7d5a0ea 100644
89251--- a/mm/mempolicy.c
89252+++ b/mm/mempolicy.c
89253@@ -746,6 +746,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
89254 unsigned long vmstart;
89255 unsigned long vmend;
89256
89257+#ifdef CONFIG_PAX_SEGMEXEC
89258+ struct vm_area_struct *vma_m;
89259+#endif
89260+
89261 vma = find_vma(mm, start);
89262 if (!vma || vma->vm_start > start)
89263 return -EFAULT;
89264@@ -789,6 +793,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
89265 err = vma_replace_policy(vma, new_pol);
89266 if (err)
89267 goto out;
89268+
89269+#ifdef CONFIG_PAX_SEGMEXEC
89270+ vma_m = pax_find_mirror_vma(vma);
89271+ if (vma_m) {
89272+ err = vma_replace_policy(vma_m, new_pol);
89273+ if (err)
89274+ goto out;
89275+ }
89276+#endif
89277+
89278 }
89279
89280 out:
89281@@ -1252,6 +1266,17 @@ static long do_mbind(unsigned long start, unsigned long len,
89282
89283 if (end < start)
89284 return -EINVAL;
89285+
89286+#ifdef CONFIG_PAX_SEGMEXEC
89287+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
89288+ if (end > SEGMEXEC_TASK_SIZE)
89289+ return -EINVAL;
89290+ } else
89291+#endif
89292+
89293+ if (end > TASK_SIZE)
89294+ return -EINVAL;
89295+
89296 if (end == start)
89297 return 0;
89298
89299@@ -1480,8 +1505,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
89300 */
89301 tcred = __task_cred(task);
89302 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
89303- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
89304- !capable(CAP_SYS_NICE)) {
89305+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
89306 rcu_read_unlock();
89307 err = -EPERM;
89308 goto out_put;
89309@@ -1512,6 +1536,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
89310 goto out;
89311 }
89312
89313+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
89314+ if (mm != current->mm &&
89315+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
89316+ mmput(mm);
89317+ err = -EPERM;
89318+ goto out;
89319+ }
89320+#endif
89321+
89322 err = do_migrate_pages(mm, old, new,
89323 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
89324
89325diff --git a/mm/migrate.c b/mm/migrate.c
89326index c046927..6996b40 100644
89327--- a/mm/migrate.c
89328+++ b/mm/migrate.c
89329@@ -1404,8 +1404,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
89330 */
89331 tcred = __task_cred(task);
89332 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
89333- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
89334- !capable(CAP_SYS_NICE)) {
89335+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
89336 rcu_read_unlock();
89337 err = -EPERM;
89338 goto out;
89339diff --git a/mm/mlock.c b/mm/mlock.c
89340index d480cd6..0f98458 100644
89341--- a/mm/mlock.c
89342+++ b/mm/mlock.c
89343@@ -14,6 +14,7 @@
89344 #include <linux/pagevec.h>
89345 #include <linux/mempolicy.h>
89346 #include <linux/syscalls.h>
89347+#include <linux/security.h>
89348 #include <linux/sched.h>
89349 #include <linux/export.h>
89350 #include <linux/rmap.h>
89351@@ -568,7 +569,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
89352 {
89353 unsigned long nstart, end, tmp;
89354 struct vm_area_struct * vma, * prev;
89355- int error;
89356+ int error = 0;
89357
89358 VM_BUG_ON(start & ~PAGE_MASK);
89359 VM_BUG_ON(len != PAGE_ALIGN(len));
89360@@ -577,6 +578,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
89361 return -EINVAL;
89362 if (end == start)
89363 return 0;
89364+ if (end > TASK_SIZE)
89365+ return -EINVAL;
89366+
89367 vma = find_vma(current->mm, start);
89368 if (!vma || vma->vm_start > start)
89369 return -ENOMEM;
89370@@ -588,6 +592,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
89371 for (nstart = start ; ; ) {
89372 vm_flags_t newflags;
89373
89374+#ifdef CONFIG_PAX_SEGMEXEC
89375+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
89376+ break;
89377+#endif
89378+
89379 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
89380
89381 newflags = vma->vm_flags & ~VM_LOCKED;
89382@@ -700,6 +709,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
89383 lock_limit >>= PAGE_SHIFT;
89384
89385 /* check against resource limits */
89386+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
89387 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
89388 error = do_mlock(start, len, 1);
89389 up_write(&current->mm->mmap_sem);
89390@@ -734,6 +744,11 @@ static int do_mlockall(int flags)
89391 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
89392 vm_flags_t newflags;
89393
89394+#ifdef CONFIG_PAX_SEGMEXEC
89395+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
89396+ break;
89397+#endif
89398+
89399 newflags = vma->vm_flags & ~VM_LOCKED;
89400 if (flags & MCL_CURRENT)
89401 newflags |= VM_LOCKED;
89402@@ -767,6 +782,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
89403 lock_limit >>= PAGE_SHIFT;
89404
89405 ret = -ENOMEM;
89406+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
89407 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
89408 capable(CAP_IPC_LOCK))
89409 ret = do_mlockall(flags);
89410diff --git a/mm/mmap.c b/mm/mmap.c
89411index 362e5f1..8968e02 100644
89412--- a/mm/mmap.c
89413+++ b/mm/mmap.c
89414@@ -36,6 +36,7 @@
89415 #include <linux/sched/sysctl.h>
89416 #include <linux/notifier.h>
89417 #include <linux/memory.h>
89418+#include <linux/random.h>
89419
89420 #include <asm/uaccess.h>
89421 #include <asm/cacheflush.h>
89422@@ -52,6 +53,16 @@
89423 #define arch_rebalance_pgtables(addr, len) (addr)
89424 #endif
89425
89426+static inline void verify_mm_writelocked(struct mm_struct *mm)
89427+{
89428+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
89429+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
89430+ up_read(&mm->mmap_sem);
89431+ BUG();
89432+ }
89433+#endif
89434+}
89435+
89436 static void unmap_region(struct mm_struct *mm,
89437 struct vm_area_struct *vma, struct vm_area_struct *prev,
89438 unsigned long start, unsigned long end);
89439@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
89440 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
89441 *
89442 */
89443-pgprot_t protection_map[16] = {
89444+pgprot_t protection_map[16] __read_only = {
89445 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
89446 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
89447 };
89448
89449-pgprot_t vm_get_page_prot(unsigned long vm_flags)
89450+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
89451 {
89452- return __pgprot(pgprot_val(protection_map[vm_flags &
89453+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
89454 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
89455 pgprot_val(arch_vm_get_page_prot(vm_flags)));
89456+
89457+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
89458+ if (!(__supported_pte_mask & _PAGE_NX) &&
89459+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
89460+ (vm_flags & (VM_READ | VM_WRITE)))
89461+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
89462+#endif
89463+
89464+ return prot;
89465 }
89466 EXPORT_SYMBOL(vm_get_page_prot);
89467
89468@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
89469 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
89470 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
89471 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
89472+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
89473 /*
89474 * Make sure vm_committed_as in one cacheline and not cacheline shared with
89475 * other variables. It can be updated by several CPUs frequently.
89476@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
89477 struct vm_area_struct *next = vma->vm_next;
89478
89479 might_sleep();
89480+ BUG_ON(vma->vm_mirror);
89481 if (vma->vm_ops && vma->vm_ops->close)
89482 vma->vm_ops->close(vma);
89483 if (vma->vm_file)
89484@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
89485 * not page aligned -Ram Gupta
89486 */
89487 rlim = rlimit(RLIMIT_DATA);
89488+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
89489 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
89490 (mm->end_data - mm->start_data) > rlim)
89491 goto out;
89492@@ -933,6 +956,12 @@ static int
89493 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
89494 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
89495 {
89496+
89497+#ifdef CONFIG_PAX_SEGMEXEC
89498+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
89499+ return 0;
89500+#endif
89501+
89502 if (is_mergeable_vma(vma, file, vm_flags) &&
89503 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
89504 if (vma->vm_pgoff == vm_pgoff)
89505@@ -952,6 +981,12 @@ static int
89506 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
89507 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
89508 {
89509+
89510+#ifdef CONFIG_PAX_SEGMEXEC
89511+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
89512+ return 0;
89513+#endif
89514+
89515 if (is_mergeable_vma(vma, file, vm_flags) &&
89516 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
89517 pgoff_t vm_pglen;
89518@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
89519 struct vm_area_struct *vma_merge(struct mm_struct *mm,
89520 struct vm_area_struct *prev, unsigned long addr,
89521 unsigned long end, unsigned long vm_flags,
89522- struct anon_vma *anon_vma, struct file *file,
89523+ struct anon_vma *anon_vma, struct file *file,
89524 pgoff_t pgoff, struct mempolicy *policy)
89525 {
89526 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
89527 struct vm_area_struct *area, *next;
89528 int err;
89529
89530+#ifdef CONFIG_PAX_SEGMEXEC
89531+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
89532+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
89533+
89534+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
89535+#endif
89536+
89537 /*
89538 * We later require that vma->vm_flags == vm_flags,
89539 * so this tests vma->vm_flags & VM_SPECIAL, too.
89540@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
89541 if (next && next->vm_end == end) /* cases 6, 7, 8 */
89542 next = next->vm_next;
89543
89544+#ifdef CONFIG_PAX_SEGMEXEC
89545+ if (prev)
89546+ prev_m = pax_find_mirror_vma(prev);
89547+ if (area)
89548+ area_m = pax_find_mirror_vma(area);
89549+ if (next)
89550+ next_m = pax_find_mirror_vma(next);
89551+#endif
89552+
89553 /*
89554 * Can it merge with the predecessor?
89555 */
89556@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
89557 /* cases 1, 6 */
89558 err = vma_adjust(prev, prev->vm_start,
89559 next->vm_end, prev->vm_pgoff, NULL);
89560- } else /* cases 2, 5, 7 */
89561+
89562+#ifdef CONFIG_PAX_SEGMEXEC
89563+ if (!err && prev_m)
89564+ err = vma_adjust(prev_m, prev_m->vm_start,
89565+ next_m->vm_end, prev_m->vm_pgoff, NULL);
89566+#endif
89567+
89568+ } else { /* cases 2, 5, 7 */
89569 err = vma_adjust(prev, prev->vm_start,
89570 end, prev->vm_pgoff, NULL);
89571+
89572+#ifdef CONFIG_PAX_SEGMEXEC
89573+ if (!err && prev_m)
89574+ err = vma_adjust(prev_m, prev_m->vm_start,
89575+ end_m, prev_m->vm_pgoff, NULL);
89576+#endif
89577+
89578+ }
89579 if (err)
89580 return NULL;
89581 khugepaged_enter_vma_merge(prev);
89582@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
89583 mpol_equal(policy, vma_policy(next)) &&
89584 can_vma_merge_before(next, vm_flags,
89585 anon_vma, file, pgoff+pglen)) {
89586- if (prev && addr < prev->vm_end) /* case 4 */
89587+ if (prev && addr < prev->vm_end) { /* case 4 */
89588 err = vma_adjust(prev, prev->vm_start,
89589 addr, prev->vm_pgoff, NULL);
89590- else /* cases 3, 8 */
89591+
89592+#ifdef CONFIG_PAX_SEGMEXEC
89593+ if (!err && prev_m)
89594+ err = vma_adjust(prev_m, prev_m->vm_start,
89595+ addr_m, prev_m->vm_pgoff, NULL);
89596+#endif
89597+
89598+ } else { /* cases 3, 8 */
89599 err = vma_adjust(area, addr, next->vm_end,
89600 next->vm_pgoff - pglen, NULL);
89601+
89602+#ifdef CONFIG_PAX_SEGMEXEC
89603+ if (!err && area_m)
89604+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
89605+ next_m->vm_pgoff - pglen, NULL);
89606+#endif
89607+
89608+ }
89609 if (err)
89610 return NULL;
89611 khugepaged_enter_vma_merge(area);
89612@@ -1165,8 +1246,10 @@ none:
89613 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
89614 struct file *file, long pages)
89615 {
89616- const unsigned long stack_flags
89617- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
89618+
89619+#ifdef CONFIG_PAX_RANDMMAP
89620+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
89621+#endif
89622
89623 mm->total_vm += pages;
89624
89625@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
89626 mm->shared_vm += pages;
89627 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
89628 mm->exec_vm += pages;
89629- } else if (flags & stack_flags)
89630+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
89631 mm->stack_vm += pages;
89632 }
89633 #endif /* CONFIG_PROC_FS */
89634@@ -1212,7 +1295,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89635 * (the exception is when the underlying filesystem is noexec
89636 * mounted, in which case we dont add PROT_EXEC.)
89637 */
89638- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
89639+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
89640 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
89641 prot |= PROT_EXEC;
89642
89643@@ -1238,7 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89644 /* Obtain the address to map to. we verify (or select) it and ensure
89645 * that it represents a valid section of the address space.
89646 */
89647- addr = get_unmapped_area(file, addr, len, pgoff, flags);
89648+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
89649 if (addr & ~PAGE_MASK)
89650 return addr;
89651
89652@@ -1249,6 +1332,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89653 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
89654 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
89655
89656+#ifdef CONFIG_PAX_MPROTECT
89657+ if (mm->pax_flags & MF_PAX_MPROTECT) {
89658+
89659+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
89660+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
89661+ mm->binfmt->handle_mmap)
89662+ mm->binfmt->handle_mmap(file);
89663+#endif
89664+
89665+#ifndef CONFIG_PAX_MPROTECT_COMPAT
89666+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
89667+ gr_log_rwxmmap(file);
89668+
89669+#ifdef CONFIG_PAX_EMUPLT
89670+ vm_flags &= ~VM_EXEC;
89671+#else
89672+ return -EPERM;
89673+#endif
89674+
89675+ }
89676+
89677+ if (!(vm_flags & VM_EXEC))
89678+ vm_flags &= ~VM_MAYEXEC;
89679+#else
89680+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
89681+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
89682+#endif
89683+ else
89684+ vm_flags &= ~VM_MAYWRITE;
89685+ }
89686+#endif
89687+
89688+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
89689+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
89690+ vm_flags &= ~VM_PAGEEXEC;
89691+#endif
89692+
89693 if (flags & MAP_LOCKED)
89694 if (!can_do_mlock())
89695 return -EPERM;
89696@@ -1260,6 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89697 locked += mm->locked_vm;
89698 lock_limit = rlimit(RLIMIT_MEMLOCK);
89699 lock_limit >>= PAGE_SHIFT;
89700+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
89701 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
89702 return -EAGAIN;
89703 }
89704@@ -1344,6 +1465,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89705 vm_flags |= VM_NORESERVE;
89706 }
89707
89708+ if (!gr_acl_handle_mmap(file, prot))
89709+ return -EACCES;
89710+
89711 addr = mmap_region(file, addr, len, vm_flags, pgoff);
89712 if (!IS_ERR_VALUE(addr) &&
89713 ((vm_flags & VM_LOCKED) ||
89714@@ -1437,7 +1561,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
89715 vm_flags_t vm_flags = vma->vm_flags;
89716
89717 /* If it was private or non-writable, the write bit is already clear */
89718- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
89719+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
89720 return 0;
89721
89722 /* The backer wishes to know when pages are first written to? */
89723@@ -1483,7 +1607,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
89724 struct rb_node **rb_link, *rb_parent;
89725 unsigned long charged = 0;
89726
89727+#ifdef CONFIG_PAX_SEGMEXEC
89728+ struct vm_area_struct *vma_m = NULL;
89729+#endif
89730+
89731+ /*
89732+ * mm->mmap_sem is required to protect against another thread
89733+ * changing the mappings in case we sleep.
89734+ */
89735+ verify_mm_writelocked(mm);
89736+
89737 /* Check against address space limit. */
89738+
89739+#ifdef CONFIG_PAX_RANDMMAP
89740+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
89741+#endif
89742+
89743 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
89744 unsigned long nr_pages;
89745
89746@@ -1502,11 +1641,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
89747
89748 /* Clear old maps */
89749 error = -ENOMEM;
89750-munmap_back:
89751 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
89752 if (do_munmap(mm, addr, len))
89753 return -ENOMEM;
89754- goto munmap_back;
89755+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
89756 }
89757
89758 /*
89759@@ -1537,6 +1675,16 @@ munmap_back:
89760 goto unacct_error;
89761 }
89762
89763+#ifdef CONFIG_PAX_SEGMEXEC
89764+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
89765+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
89766+ if (!vma_m) {
89767+ error = -ENOMEM;
89768+ goto free_vma;
89769+ }
89770+ }
89771+#endif
89772+
89773 vma->vm_mm = mm;
89774 vma->vm_start = addr;
89775 vma->vm_end = addr + len;
89776@@ -1556,6 +1704,13 @@ munmap_back:
89777 if (error)
89778 goto unmap_and_free_vma;
89779
89780+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
89781+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
89782+ vma->vm_flags |= VM_PAGEEXEC;
89783+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
89784+ }
89785+#endif
89786+
89787 /* Can addr have changed??
89788 *
89789 * Answer: Yes, several device drivers can do it in their
89790@@ -1589,6 +1744,12 @@ munmap_back:
89791 }
89792
89793 vma_link(mm, vma, prev, rb_link, rb_parent);
89794+
89795+#ifdef CONFIG_PAX_SEGMEXEC
89796+ if (vma_m)
89797+ BUG_ON(pax_mirror_vma(vma_m, vma));
89798+#endif
89799+
89800 /* Once vma denies write, undo our temporary denial count */
89801 if (vm_flags & VM_DENYWRITE)
89802 allow_write_access(file);
89803@@ -1597,6 +1758,7 @@ out:
89804 perf_event_mmap(vma);
89805
89806 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
89807+ track_exec_limit(mm, addr, addr + len, vm_flags);
89808 if (vm_flags & VM_LOCKED) {
89809 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
89810 vma == get_gate_vma(current->mm)))
89811@@ -1629,6 +1791,12 @@ unmap_and_free_vma:
89812 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
89813 charged = 0;
89814 free_vma:
89815+
89816+#ifdef CONFIG_PAX_SEGMEXEC
89817+ if (vma_m)
89818+ kmem_cache_free(vm_area_cachep, vma_m);
89819+#endif
89820+
89821 kmem_cache_free(vm_area_cachep, vma);
89822 unacct_error:
89823 if (charged)
89824@@ -1636,7 +1804,63 @@ unacct_error:
89825 return error;
89826 }
89827
89828-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
89829+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
89830+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
89831+{
89832+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
89833+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
89834+
89835+ return 0;
89836+}
89837+#endif
89838+
89839+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
89840+{
89841+ if (!vma) {
89842+#ifdef CONFIG_STACK_GROWSUP
89843+ if (addr > sysctl_heap_stack_gap)
89844+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
89845+ else
89846+ vma = find_vma(current->mm, 0);
89847+ if (vma && (vma->vm_flags & VM_GROWSUP))
89848+ return false;
89849+#endif
89850+ return true;
89851+ }
89852+
89853+ if (addr + len > vma->vm_start)
89854+ return false;
89855+
89856+ if (vma->vm_flags & VM_GROWSDOWN)
89857+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
89858+#ifdef CONFIG_STACK_GROWSUP
89859+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
89860+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
89861+#endif
89862+ else if (offset)
89863+ return offset <= vma->vm_start - addr - len;
89864+
89865+ return true;
89866+}
89867+
89868+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
89869+{
89870+ if (vma->vm_start < len)
89871+ return -ENOMEM;
89872+
89873+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
89874+ if (offset <= vma->vm_start - len)
89875+ return vma->vm_start - len - offset;
89876+ else
89877+ return -ENOMEM;
89878+ }
89879+
89880+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
89881+ return vma->vm_start - len - sysctl_heap_stack_gap;
89882+ return -ENOMEM;
89883+}
89884+
89885+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
89886 {
89887 /*
89888 * We implement the search by looking for an rbtree node that
89889@@ -1684,11 +1908,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
89890 }
89891 }
89892
89893- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
89894+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
89895 check_current:
89896 /* Check if current node has a suitable gap */
89897 if (gap_start > high_limit)
89898 return -ENOMEM;
89899+
89900+ if (gap_end - gap_start > info->threadstack_offset)
89901+ gap_start += info->threadstack_offset;
89902+ else
89903+ gap_start = gap_end;
89904+
89905+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
89906+ if (gap_end - gap_start > sysctl_heap_stack_gap)
89907+ gap_start += sysctl_heap_stack_gap;
89908+ else
89909+ gap_start = gap_end;
89910+ }
89911+ if (vma->vm_flags & VM_GROWSDOWN) {
89912+ if (gap_end - gap_start > sysctl_heap_stack_gap)
89913+ gap_end -= sysctl_heap_stack_gap;
89914+ else
89915+ gap_end = gap_start;
89916+ }
89917 if (gap_end >= low_limit && gap_end - gap_start >= length)
89918 goto found;
89919
89920@@ -1738,7 +1980,7 @@ found:
89921 return gap_start;
89922 }
89923
89924-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
89925+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
89926 {
89927 struct mm_struct *mm = current->mm;
89928 struct vm_area_struct *vma;
89929@@ -1792,6 +2034,24 @@ check_current:
89930 gap_end = vma->vm_start;
89931 if (gap_end < low_limit)
89932 return -ENOMEM;
89933+
89934+ if (gap_end - gap_start > info->threadstack_offset)
89935+ gap_end -= info->threadstack_offset;
89936+ else
89937+ gap_end = gap_start;
89938+
89939+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
89940+ if (gap_end - gap_start > sysctl_heap_stack_gap)
89941+ gap_start += sysctl_heap_stack_gap;
89942+ else
89943+ gap_start = gap_end;
89944+ }
89945+ if (vma->vm_flags & VM_GROWSDOWN) {
89946+ if (gap_end - gap_start > sysctl_heap_stack_gap)
89947+ gap_end -= sysctl_heap_stack_gap;
89948+ else
89949+ gap_end = gap_start;
89950+ }
89951 if (gap_start <= high_limit && gap_end - gap_start >= length)
89952 goto found;
89953
89954@@ -1855,6 +2115,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
89955 struct mm_struct *mm = current->mm;
89956 struct vm_area_struct *vma;
89957 struct vm_unmapped_area_info info;
89958+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
89959
89960 if (len > TASK_SIZE - mmap_min_addr)
89961 return -ENOMEM;
89962@@ -1862,19 +2123,30 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
89963 if (flags & MAP_FIXED)
89964 return addr;
89965
89966+#ifdef CONFIG_PAX_RANDMMAP
89967+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
89968+#endif
89969+
89970 if (addr) {
89971 addr = PAGE_ALIGN(addr);
89972 vma = find_vma(mm, addr);
89973 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
89974- (!vma || addr + len <= vma->vm_start))
89975+ check_heap_stack_gap(vma, addr, len, offset))
89976 return addr;
89977 }
89978
89979 info.flags = 0;
89980 info.length = len;
89981 info.low_limit = TASK_UNMAPPED_BASE;
89982+
89983+#ifdef CONFIG_PAX_RANDMMAP
89984+ if (mm->pax_flags & MF_PAX_RANDMMAP)
89985+ info.low_limit += mm->delta_mmap;
89986+#endif
89987+
89988 info.high_limit = TASK_SIZE;
89989 info.align_mask = 0;
89990+ info.threadstack_offset = offset;
89991 return vm_unmapped_area(&info);
89992 }
89993 #endif
89994@@ -1893,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
89995 struct mm_struct *mm = current->mm;
89996 unsigned long addr = addr0;
89997 struct vm_unmapped_area_info info;
89998+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
89999
90000 /* requested length too big for entire address space */
90001 if (len > TASK_SIZE - mmap_min_addr)
90002@@ -1901,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90003 if (flags & MAP_FIXED)
90004 return addr;
90005
90006+#ifdef CONFIG_PAX_RANDMMAP
90007+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
90008+#endif
90009+
90010 /* requesting a specific address */
90011 if (addr) {
90012 addr = PAGE_ALIGN(addr);
90013 vma = find_vma(mm, addr);
90014 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
90015- (!vma || addr + len <= vma->vm_start))
90016+ check_heap_stack_gap(vma, addr, len, offset))
90017 return addr;
90018 }
90019
90020@@ -1915,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90021 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
90022 info.high_limit = mm->mmap_base;
90023 info.align_mask = 0;
90024+ info.threadstack_offset = offset;
90025 addr = vm_unmapped_area(&info);
90026
90027 /*
90028@@ -1927,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90029 VM_BUG_ON(addr != -ENOMEM);
90030 info.flags = 0;
90031 info.low_limit = TASK_UNMAPPED_BASE;
90032+
90033+#ifdef CONFIG_PAX_RANDMMAP
90034+ if (mm->pax_flags & MF_PAX_RANDMMAP)
90035+ info.low_limit += mm->delta_mmap;
90036+#endif
90037+
90038 info.high_limit = TASK_SIZE;
90039 addr = vm_unmapped_area(&info);
90040 }
90041@@ -2028,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
90042 return vma;
90043 }
90044
90045+#ifdef CONFIG_PAX_SEGMEXEC
90046+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
90047+{
90048+ struct vm_area_struct *vma_m;
90049+
90050+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
90051+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
90052+ BUG_ON(vma->vm_mirror);
90053+ return NULL;
90054+ }
90055+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
90056+ vma_m = vma->vm_mirror;
90057+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
90058+ BUG_ON(vma->vm_file != vma_m->vm_file);
90059+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
90060+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
90061+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
90062+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
90063+ return vma_m;
90064+}
90065+#endif
90066+
90067 /*
90068 * Verify that the stack growth is acceptable and
90069 * update accounting. This is shared with both the
90070@@ -2044,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
90071 return -ENOMEM;
90072
90073 /* Stack limit test */
90074+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
90075 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
90076 return -ENOMEM;
90077
90078@@ -2054,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
90079 locked = mm->locked_vm + grow;
90080 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
90081 limit >>= PAGE_SHIFT;
90082+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
90083 if (locked > limit && !capable(CAP_IPC_LOCK))
90084 return -ENOMEM;
90085 }
90086@@ -2083,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
90087 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
90088 * vma is the last one with address > vma->vm_end. Have to extend vma.
90089 */
90090+#ifndef CONFIG_IA64
90091+static
90092+#endif
90093 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
90094 {
90095 int error;
90096+ bool locknext;
90097
90098 if (!(vma->vm_flags & VM_GROWSUP))
90099 return -EFAULT;
90100
90101+ /* Also guard against wrapping around to address 0. */
90102+ if (address < PAGE_ALIGN(address+1))
90103+ address = PAGE_ALIGN(address+1);
90104+ else
90105+ return -ENOMEM;
90106+
90107 /*
90108 * We must make sure the anon_vma is allocated
90109 * so that the anon_vma locking is not a noop.
90110 */
90111 if (unlikely(anon_vma_prepare(vma)))
90112 return -ENOMEM;
90113+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
90114+ if (locknext && anon_vma_prepare(vma->vm_next))
90115+ return -ENOMEM;
90116 vma_lock_anon_vma(vma);
90117+ if (locknext)
90118+ vma_lock_anon_vma(vma->vm_next);
90119
90120 /*
90121 * vma->vm_start/vm_end cannot change under us because the caller
90122 * is required to hold the mmap_sem in read mode. We need the
90123- * anon_vma lock to serialize against concurrent expand_stacks.
90124- * Also guard against wrapping around to address 0.
90125+ * anon_vma locks to serialize against concurrent expand_stacks
90126+ * and expand_upwards.
90127 */
90128- if (address < PAGE_ALIGN(address+4))
90129- address = PAGE_ALIGN(address+4);
90130- else {
90131- vma_unlock_anon_vma(vma);
90132- return -ENOMEM;
90133- }
90134 error = 0;
90135
90136 /* Somebody else might have raced and expanded it already */
90137- if (address > vma->vm_end) {
90138+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
90139+ error = -ENOMEM;
90140+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
90141 unsigned long size, grow;
90142
90143 size = address - vma->vm_start;
90144@@ -2148,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
90145 }
90146 }
90147 }
90148+ if (locknext)
90149+ vma_unlock_anon_vma(vma->vm_next);
90150 vma_unlock_anon_vma(vma);
90151 khugepaged_enter_vma_merge(vma);
90152 validate_mm(vma->vm_mm);
90153@@ -2162,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
90154 unsigned long address)
90155 {
90156 int error;
90157+ bool lockprev = false;
90158+ struct vm_area_struct *prev;
90159
90160 /*
90161 * We must make sure the anon_vma is allocated
90162@@ -2175,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
90163 if (error)
90164 return error;
90165
90166+ prev = vma->vm_prev;
90167+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
90168+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
90169+#endif
90170+ if (lockprev && anon_vma_prepare(prev))
90171+ return -ENOMEM;
90172+ if (lockprev)
90173+ vma_lock_anon_vma(prev);
90174+
90175 vma_lock_anon_vma(vma);
90176
90177 /*
90178@@ -2184,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
90179 */
90180
90181 /* Somebody else might have raced and expanded it already */
90182- if (address < vma->vm_start) {
90183+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
90184+ error = -ENOMEM;
90185+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
90186 unsigned long size, grow;
90187
90188+#ifdef CONFIG_PAX_SEGMEXEC
90189+ struct vm_area_struct *vma_m;
90190+
90191+ vma_m = pax_find_mirror_vma(vma);
90192+#endif
90193+
90194 size = vma->vm_end - address;
90195 grow = (vma->vm_start - address) >> PAGE_SHIFT;
90196
90197@@ -2211,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
90198 vma->vm_pgoff -= grow;
90199 anon_vma_interval_tree_post_update_vma(vma);
90200 vma_gap_update(vma);
90201+
90202+#ifdef CONFIG_PAX_SEGMEXEC
90203+ if (vma_m) {
90204+ anon_vma_interval_tree_pre_update_vma(vma_m);
90205+ vma_m->vm_start -= grow << PAGE_SHIFT;
90206+ vma_m->vm_pgoff -= grow;
90207+ anon_vma_interval_tree_post_update_vma(vma_m);
90208+ vma_gap_update(vma_m);
90209+ }
90210+#endif
90211+
90212 spin_unlock(&vma->vm_mm->page_table_lock);
90213
90214+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
90215 perf_event_mmap(vma);
90216 }
90217 }
90218 }
90219 vma_unlock_anon_vma(vma);
90220+ if (lockprev)
90221+ vma_unlock_anon_vma(prev);
90222 khugepaged_enter_vma_merge(vma);
90223 validate_mm(vma->vm_mm);
90224 return error;
90225@@ -2315,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
90226 do {
90227 long nrpages = vma_pages(vma);
90228
90229+#ifdef CONFIG_PAX_SEGMEXEC
90230+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
90231+ vma = remove_vma(vma);
90232+ continue;
90233+ }
90234+#endif
90235+
90236 if (vma->vm_flags & VM_ACCOUNT)
90237 nr_accounted += nrpages;
90238 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
90239@@ -2359,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
90240 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
90241 vma->vm_prev = NULL;
90242 do {
90243+
90244+#ifdef CONFIG_PAX_SEGMEXEC
90245+ if (vma->vm_mirror) {
90246+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
90247+ vma->vm_mirror->vm_mirror = NULL;
90248+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
90249+ vma->vm_mirror = NULL;
90250+ }
90251+#endif
90252+
90253 vma_rb_erase(vma, &mm->mm_rb);
90254 mm->map_count--;
90255 tail_vma = vma;
90256@@ -2384,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90257 struct vm_area_struct *new;
90258 int err = -ENOMEM;
90259
90260+#ifdef CONFIG_PAX_SEGMEXEC
90261+ struct vm_area_struct *vma_m, *new_m = NULL;
90262+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
90263+#endif
90264+
90265 if (is_vm_hugetlb_page(vma) && (addr &
90266 ~(huge_page_mask(hstate_vma(vma)))))
90267 return -EINVAL;
90268
90269+#ifdef CONFIG_PAX_SEGMEXEC
90270+ vma_m = pax_find_mirror_vma(vma);
90271+#endif
90272+
90273 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90274 if (!new)
90275 goto out_err;
90276
90277+#ifdef CONFIG_PAX_SEGMEXEC
90278+ if (vma_m) {
90279+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90280+ if (!new_m) {
90281+ kmem_cache_free(vm_area_cachep, new);
90282+ goto out_err;
90283+ }
90284+ }
90285+#endif
90286+
90287 /* most fields are the same, copy all, and then fixup */
90288 *new = *vma;
90289
90290@@ -2404,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90291 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
90292 }
90293
90294+#ifdef CONFIG_PAX_SEGMEXEC
90295+ if (vma_m) {
90296+ *new_m = *vma_m;
90297+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
90298+ new_m->vm_mirror = new;
90299+ new->vm_mirror = new_m;
90300+
90301+ if (new_below)
90302+ new_m->vm_end = addr_m;
90303+ else {
90304+ new_m->vm_start = addr_m;
90305+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
90306+ }
90307+ }
90308+#endif
90309+
90310 err = vma_dup_policy(vma, new);
90311 if (err)
90312 goto out_free_vma;
90313@@ -2423,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90314 else
90315 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
90316
90317+#ifdef CONFIG_PAX_SEGMEXEC
90318+ if (!err && vma_m) {
90319+ struct mempolicy *pol = vma_policy(new);
90320+
90321+ if (anon_vma_clone(new_m, vma_m))
90322+ goto out_free_mpol;
90323+
90324+ mpol_get(pol);
90325+ set_vma_policy(new_m, pol);
90326+
90327+ if (new_m->vm_file)
90328+ get_file(new_m->vm_file);
90329+
90330+ if (new_m->vm_ops && new_m->vm_ops->open)
90331+ new_m->vm_ops->open(new_m);
90332+
90333+ if (new_below)
90334+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
90335+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
90336+ else
90337+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
90338+
90339+ if (err) {
90340+ if (new_m->vm_ops && new_m->vm_ops->close)
90341+ new_m->vm_ops->close(new_m);
90342+ if (new_m->vm_file)
90343+ fput(new_m->vm_file);
90344+ mpol_put(pol);
90345+ }
90346+ }
90347+#endif
90348+
90349 /* Success. */
90350 if (!err)
90351 return 0;
90352@@ -2432,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90353 new->vm_ops->close(new);
90354 if (new->vm_file)
90355 fput(new->vm_file);
90356- unlink_anon_vmas(new);
90357 out_free_mpol:
90358 mpol_put(vma_policy(new));
90359 out_free_vma:
90360+
90361+#ifdef CONFIG_PAX_SEGMEXEC
90362+ if (new_m) {
90363+ unlink_anon_vmas(new_m);
90364+ kmem_cache_free(vm_area_cachep, new_m);
90365+ }
90366+#endif
90367+
90368+ unlink_anon_vmas(new);
90369 kmem_cache_free(vm_area_cachep, new);
90370 out_err:
90371 return err;
90372@@ -2448,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90373 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
90374 unsigned long addr, int new_below)
90375 {
90376+
90377+#ifdef CONFIG_PAX_SEGMEXEC
90378+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
90379+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
90380+ if (mm->map_count >= sysctl_max_map_count-1)
90381+ return -ENOMEM;
90382+ } else
90383+#endif
90384+
90385 if (mm->map_count >= sysctl_max_map_count)
90386 return -ENOMEM;
90387
90388@@ -2459,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
90389 * work. This now handles partial unmappings.
90390 * Jeremy Fitzhardinge <jeremy@goop.org>
90391 */
90392+#ifdef CONFIG_PAX_SEGMEXEC
90393 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90394 {
90395+ int ret = __do_munmap(mm, start, len);
90396+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
90397+ return ret;
90398+
90399+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
90400+}
90401+
90402+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90403+#else
90404+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90405+#endif
90406+{
90407 unsigned long end;
90408 struct vm_area_struct *vma, *prev, *last;
90409
90410+ /*
90411+ * mm->mmap_sem is required to protect against another thread
90412+ * changing the mappings in case we sleep.
90413+ */
90414+ verify_mm_writelocked(mm);
90415+
90416 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
90417 return -EINVAL;
90418
90419@@ -2538,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90420 /* Fix up all other VM information */
90421 remove_vma_list(mm, vma);
90422
90423+ track_exec_limit(mm, start, end, 0UL);
90424+
90425 return 0;
90426 }
90427
90428@@ -2546,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
90429 int ret;
90430 struct mm_struct *mm = current->mm;
90431
90432+
90433+#ifdef CONFIG_PAX_SEGMEXEC
90434+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
90435+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
90436+ return -EINVAL;
90437+#endif
90438+
90439 down_write(&mm->mmap_sem);
90440 ret = do_munmap(mm, start, len);
90441 up_write(&mm->mmap_sem);
90442@@ -2559,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
90443 return vm_munmap(addr, len);
90444 }
90445
90446-static inline void verify_mm_writelocked(struct mm_struct *mm)
90447-{
90448-#ifdef CONFIG_DEBUG_VM
90449- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
90450- WARN_ON(1);
90451- up_read(&mm->mmap_sem);
90452- }
90453-#endif
90454-}
90455-
90456 /*
90457 * this is really a simplified "do_mmap". it only handles
90458 * anonymous maps. eventually we may be able to do some
90459@@ -2582,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90460 struct rb_node ** rb_link, * rb_parent;
90461 pgoff_t pgoff = addr >> PAGE_SHIFT;
90462 int error;
90463+ unsigned long charged;
90464
90465 len = PAGE_ALIGN(len);
90466 if (!len)
90467@@ -2589,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90468
90469 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
90470
90471+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
90472+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
90473+ flags &= ~VM_EXEC;
90474+
90475+#ifdef CONFIG_PAX_MPROTECT
90476+ if (mm->pax_flags & MF_PAX_MPROTECT)
90477+ flags &= ~VM_MAYEXEC;
90478+#endif
90479+
90480+ }
90481+#endif
90482+
90483 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
90484 if (error & ~PAGE_MASK)
90485 return error;
90486
90487+ charged = len >> PAGE_SHIFT;
90488+
90489 /*
90490 * mlock MCL_FUTURE?
90491 */
90492 if (mm->def_flags & VM_LOCKED) {
90493 unsigned long locked, lock_limit;
90494- locked = len >> PAGE_SHIFT;
90495+ locked = charged;
90496 locked += mm->locked_vm;
90497 lock_limit = rlimit(RLIMIT_MEMLOCK);
90498 lock_limit >>= PAGE_SHIFT;
90499@@ -2615,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90500 /*
90501 * Clear old maps. this also does some error checking for us
90502 */
90503- munmap_back:
90504 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
90505 if (do_munmap(mm, addr, len))
90506 return -ENOMEM;
90507- goto munmap_back;
90508+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
90509 }
90510
90511 /* Check against address space limits *after* clearing old maps... */
90512- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
90513+ if (!may_expand_vm(mm, charged))
90514 return -ENOMEM;
90515
90516 if (mm->map_count > sysctl_max_map_count)
90517 return -ENOMEM;
90518
90519- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
90520+ if (security_vm_enough_memory_mm(mm, charged))
90521 return -ENOMEM;
90522
90523 /* Can we just expand an old private anonymous mapping? */
90524@@ -2643,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90525 */
90526 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
90527 if (!vma) {
90528- vm_unacct_memory(len >> PAGE_SHIFT);
90529+ vm_unacct_memory(charged);
90530 return -ENOMEM;
90531 }
90532
90533@@ -2657,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90534 vma_link(mm, vma, prev, rb_link, rb_parent);
90535 out:
90536 perf_event_mmap(vma);
90537- mm->total_vm += len >> PAGE_SHIFT;
90538+ mm->total_vm += charged;
90539 if (flags & VM_LOCKED)
90540- mm->locked_vm += (len >> PAGE_SHIFT);
90541+ mm->locked_vm += charged;
90542 vma->vm_flags |= VM_SOFTDIRTY;
90543+ track_exec_limit(mm, addr, addr + len, flags);
90544 return addr;
90545 }
90546
90547@@ -2722,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
90548 while (vma) {
90549 if (vma->vm_flags & VM_ACCOUNT)
90550 nr_accounted += vma_pages(vma);
90551+ vma->vm_mirror = NULL;
90552 vma = remove_vma(vma);
90553 }
90554 vm_unacct_memory(nr_accounted);
90555@@ -2738,6 +3227,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
90556 struct vm_area_struct *prev;
90557 struct rb_node **rb_link, *rb_parent;
90558
90559+#ifdef CONFIG_PAX_SEGMEXEC
90560+ struct vm_area_struct *vma_m = NULL;
90561+#endif
90562+
90563+ if (security_mmap_addr(vma->vm_start))
90564+ return -EPERM;
90565+
90566 /*
90567 * The vm_pgoff of a purely anonymous vma should be irrelevant
90568 * until its first write fault, when page's anon_vma and index
90569@@ -2761,7 +3257,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
90570 security_vm_enough_memory_mm(mm, vma_pages(vma)))
90571 return -ENOMEM;
90572
90573+#ifdef CONFIG_PAX_SEGMEXEC
90574+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
90575+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
90576+ if (!vma_m)
90577+ return -ENOMEM;
90578+ }
90579+#endif
90580+
90581 vma_link(mm, vma, prev, rb_link, rb_parent);
90582+
90583+#ifdef CONFIG_PAX_SEGMEXEC
90584+ if (vma_m)
90585+ BUG_ON(pax_mirror_vma(vma_m, vma));
90586+#endif
90587+
90588 return 0;
90589 }
90590
90591@@ -2780,6 +3290,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
90592 struct rb_node **rb_link, *rb_parent;
90593 bool faulted_in_anon_vma = true;
90594
90595+ BUG_ON(vma->vm_mirror);
90596+
90597 /*
90598 * If anonymous vma has not yet been faulted, update new pgoff
90599 * to match new location, to increase its chance of merging.
90600@@ -2844,6 +3356,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
90601 return NULL;
90602 }
90603
90604+#ifdef CONFIG_PAX_SEGMEXEC
90605+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
90606+{
90607+ struct vm_area_struct *prev_m;
90608+ struct rb_node **rb_link_m, *rb_parent_m;
90609+ struct mempolicy *pol_m;
90610+
90611+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
90612+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
90613+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
90614+ *vma_m = *vma;
90615+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
90616+ if (anon_vma_clone(vma_m, vma))
90617+ return -ENOMEM;
90618+ pol_m = vma_policy(vma_m);
90619+ mpol_get(pol_m);
90620+ set_vma_policy(vma_m, pol_m);
90621+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
90622+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
90623+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
90624+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
90625+ if (vma_m->vm_file)
90626+ get_file(vma_m->vm_file);
90627+ if (vma_m->vm_ops && vma_m->vm_ops->open)
90628+ vma_m->vm_ops->open(vma_m);
90629+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
90630+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
90631+ vma_m->vm_mirror = vma;
90632+ vma->vm_mirror = vma_m;
90633+ return 0;
90634+}
90635+#endif
90636+
90637 /*
90638 * Return true if the calling process may expand its vm space by the passed
90639 * number of pages
90640@@ -2855,6 +3400,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
90641
90642 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
90643
90644+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
90645 if (cur + npages > lim)
90646 return 0;
90647 return 1;
90648@@ -2925,6 +3471,22 @@ int install_special_mapping(struct mm_struct *mm,
90649 vma->vm_start = addr;
90650 vma->vm_end = addr + len;
90651
90652+#ifdef CONFIG_PAX_MPROTECT
90653+ if (mm->pax_flags & MF_PAX_MPROTECT) {
90654+#ifndef CONFIG_PAX_MPROTECT_COMPAT
90655+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
90656+ return -EPERM;
90657+ if (!(vm_flags & VM_EXEC))
90658+ vm_flags &= ~VM_MAYEXEC;
90659+#else
90660+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
90661+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
90662+#endif
90663+ else
90664+ vm_flags &= ~VM_MAYWRITE;
90665+ }
90666+#endif
90667+
90668 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
90669 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
90670
90671diff --git a/mm/mprotect.c b/mm/mprotect.c
90672index 6c3f56f..b2340b0 100644
90673--- a/mm/mprotect.c
90674+++ b/mm/mprotect.c
90675@@ -23,10 +23,18 @@
90676 #include <linux/mmu_notifier.h>
90677 #include <linux/migrate.h>
90678 #include <linux/perf_event.h>
90679+#include <linux/sched/sysctl.h>
90680+
90681+#ifdef CONFIG_PAX_MPROTECT
90682+#include <linux/elf.h>
90683+#include <linux/binfmts.h>
90684+#endif
90685+
90686 #include <asm/uaccess.h>
90687 #include <asm/pgtable.h>
90688 #include <asm/cacheflush.h>
90689 #include <asm/tlbflush.h>
90690+#include <asm/mmu_context.h>
90691
90692 #ifndef pgprot_modify
90693 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
90694@@ -241,6 +249,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
90695 return pages;
90696 }
90697
90698+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
90699+/* called while holding the mmap semaphor for writing except stack expansion */
90700+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
90701+{
90702+ unsigned long oldlimit, newlimit = 0UL;
90703+
90704+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
90705+ return;
90706+
90707+ spin_lock(&mm->page_table_lock);
90708+ oldlimit = mm->context.user_cs_limit;
90709+ if ((prot & VM_EXEC) && oldlimit < end)
90710+ /* USER_CS limit moved up */
90711+ newlimit = end;
90712+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
90713+ /* USER_CS limit moved down */
90714+ newlimit = start;
90715+
90716+ if (newlimit) {
90717+ mm->context.user_cs_limit = newlimit;
90718+
90719+#ifdef CONFIG_SMP
90720+ wmb();
90721+ cpus_clear(mm->context.cpu_user_cs_mask);
90722+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
90723+#endif
90724+
90725+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
90726+ }
90727+ spin_unlock(&mm->page_table_lock);
90728+ if (newlimit == end) {
90729+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
90730+
90731+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
90732+ if (is_vm_hugetlb_page(vma))
90733+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
90734+ else
90735+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
90736+ }
90737+}
90738+#endif
90739+
90740 int
90741 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
90742 unsigned long start, unsigned long end, unsigned long newflags)
90743@@ -253,11 +303,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
90744 int error;
90745 int dirty_accountable = 0;
90746
90747+#ifdef CONFIG_PAX_SEGMEXEC
90748+ struct vm_area_struct *vma_m = NULL;
90749+ unsigned long start_m, end_m;
90750+
90751+ start_m = start + SEGMEXEC_TASK_SIZE;
90752+ end_m = end + SEGMEXEC_TASK_SIZE;
90753+#endif
90754+
90755 if (newflags == oldflags) {
90756 *pprev = vma;
90757 return 0;
90758 }
90759
90760+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
90761+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
90762+
90763+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
90764+ return -ENOMEM;
90765+
90766+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
90767+ return -ENOMEM;
90768+ }
90769+
90770 /*
90771 * If we make a private mapping writable we increase our commit;
90772 * but (without finer accounting) cannot reduce our commit if we
90773@@ -274,6 +342,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
90774 }
90775 }
90776
90777+#ifdef CONFIG_PAX_SEGMEXEC
90778+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
90779+ if (start != vma->vm_start) {
90780+ error = split_vma(mm, vma, start, 1);
90781+ if (error)
90782+ goto fail;
90783+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
90784+ *pprev = (*pprev)->vm_next;
90785+ }
90786+
90787+ if (end != vma->vm_end) {
90788+ error = split_vma(mm, vma, end, 0);
90789+ if (error)
90790+ goto fail;
90791+ }
90792+
90793+ if (pax_find_mirror_vma(vma)) {
90794+ error = __do_munmap(mm, start_m, end_m - start_m);
90795+ if (error)
90796+ goto fail;
90797+ } else {
90798+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
90799+ if (!vma_m) {
90800+ error = -ENOMEM;
90801+ goto fail;
90802+ }
90803+ vma->vm_flags = newflags;
90804+ error = pax_mirror_vma(vma_m, vma);
90805+ if (error) {
90806+ vma->vm_flags = oldflags;
90807+ goto fail;
90808+ }
90809+ }
90810+ }
90811+#endif
90812+
90813 /*
90814 * First try to merge with previous and/or next vma.
90815 */
90816@@ -304,9 +408,21 @@ success:
90817 * vm_flags and vm_page_prot are protected by the mmap_sem
90818 * held in write mode.
90819 */
90820+
90821+#ifdef CONFIG_PAX_SEGMEXEC
90822+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
90823+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
90824+#endif
90825+
90826 vma->vm_flags = newflags;
90827+
90828+#ifdef CONFIG_PAX_MPROTECT
90829+ if (mm->binfmt && mm->binfmt->handle_mprotect)
90830+ mm->binfmt->handle_mprotect(vma, newflags);
90831+#endif
90832+
90833 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
90834- vm_get_page_prot(newflags));
90835+ vm_get_page_prot(vma->vm_flags));
90836
90837 if (vma_wants_writenotify(vma)) {
90838 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
90839@@ -345,6 +461,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90840 end = start + len;
90841 if (end <= start)
90842 return -ENOMEM;
90843+
90844+#ifdef CONFIG_PAX_SEGMEXEC
90845+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
90846+ if (end > SEGMEXEC_TASK_SIZE)
90847+ return -EINVAL;
90848+ } else
90849+#endif
90850+
90851+ if (end > TASK_SIZE)
90852+ return -EINVAL;
90853+
90854 if (!arch_validate_prot(prot))
90855 return -EINVAL;
90856
90857@@ -352,7 +479,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90858 /*
90859 * Does the application expect PROT_READ to imply PROT_EXEC:
90860 */
90861- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
90862+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
90863 prot |= PROT_EXEC;
90864
90865 vm_flags = calc_vm_prot_bits(prot);
90866@@ -384,6 +511,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90867 if (start > vma->vm_start)
90868 prev = vma;
90869
90870+#ifdef CONFIG_PAX_MPROTECT
90871+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
90872+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
90873+#endif
90874+
90875 for (nstart = start ; ; ) {
90876 unsigned long newflags;
90877
90878@@ -394,6 +526,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90879
90880 /* newflags >> 4 shift VM_MAY% in place of VM_% */
90881 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
90882+ if (prot & (PROT_WRITE | PROT_EXEC))
90883+ gr_log_rwxmprotect(vma);
90884+
90885+ error = -EACCES;
90886+ goto out;
90887+ }
90888+
90889+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
90890 error = -EACCES;
90891 goto out;
90892 }
90893@@ -408,6 +548,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90894 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
90895 if (error)
90896 goto out;
90897+
90898+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
90899+
90900 nstart = tmp;
90901
90902 if (nstart < prev->vm_end)
90903diff --git a/mm/mremap.c b/mm/mremap.c
90904index 0843feb..4f5b2e6 100644
90905--- a/mm/mremap.c
90906+++ b/mm/mremap.c
90907@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
90908 continue;
90909 pte = ptep_get_and_clear(mm, old_addr, old_pte);
90910 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
90911+
90912+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
90913+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
90914+ pte = pte_exprotect(pte);
90915+#endif
90916+
90917 pte = move_soft_dirty_pte(pte);
90918 set_pte_at(mm, new_addr, new_pte, pte);
90919 }
90920@@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
90921 if (is_vm_hugetlb_page(vma))
90922 goto Einval;
90923
90924+#ifdef CONFIG_PAX_SEGMEXEC
90925+ if (pax_find_mirror_vma(vma))
90926+ goto Einval;
90927+#endif
90928+
90929 /* We can't remap across vm area boundaries */
90930 if (old_len > vma->vm_end - addr)
90931 goto Efault;
90932@@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
90933 unsigned long ret = -EINVAL;
90934 unsigned long charged = 0;
90935 unsigned long map_flags;
90936+ unsigned long pax_task_size = TASK_SIZE;
90937
90938 if (new_addr & ~PAGE_MASK)
90939 goto out;
90940
90941- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
90942+#ifdef CONFIG_PAX_SEGMEXEC
90943+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
90944+ pax_task_size = SEGMEXEC_TASK_SIZE;
90945+#endif
90946+
90947+ pax_task_size -= PAGE_SIZE;
90948+
90949+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
90950 goto out;
90951
90952 /* Check if the location we're moving into overlaps the
90953 * old location at all, and fail if it does.
90954 */
90955- if ((new_addr <= addr) && (new_addr+new_len) > addr)
90956- goto out;
90957-
90958- if ((addr <= new_addr) && (addr+old_len) > new_addr)
90959+ if (addr + old_len > new_addr && new_addr + new_len > addr)
90960 goto out;
90961
90962 ret = do_munmap(mm, new_addr, new_len);
90963@@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90964 unsigned long ret = -EINVAL;
90965 unsigned long charged = 0;
90966 bool locked = false;
90967+ unsigned long pax_task_size = TASK_SIZE;
90968
90969 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
90970 return ret;
90971@@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90972 if (!new_len)
90973 return ret;
90974
90975+#ifdef CONFIG_PAX_SEGMEXEC
90976+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
90977+ pax_task_size = SEGMEXEC_TASK_SIZE;
90978+#endif
90979+
90980+ pax_task_size -= PAGE_SIZE;
90981+
90982+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
90983+ old_len > pax_task_size || addr > pax_task_size-old_len)
90984+ return ret;
90985+
90986 down_write(&current->mm->mmap_sem);
90987
90988 if (flags & MREMAP_FIXED) {
90989@@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90990 new_addr = addr;
90991 }
90992 ret = addr;
90993+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
90994 goto out;
90995 }
90996 }
90997@@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90998 goto out;
90999 }
91000
91001+ map_flags = vma->vm_flags;
91002 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
91003+ if (!(ret & ~PAGE_MASK)) {
91004+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
91005+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
91006+ }
91007 }
91008 out:
91009 if (ret & ~PAGE_MASK)
91010diff --git a/mm/nommu.c b/mm/nommu.c
91011index ecd1f15..77039bd 100644
91012--- a/mm/nommu.c
91013+++ b/mm/nommu.c
91014@@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
91015 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
91016 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
91017 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
91018-int heap_stack_gap = 0;
91019
91020 atomic_long_t mmap_pages_allocated;
91021
91022@@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
91023 EXPORT_SYMBOL(find_vma);
91024
91025 /*
91026- * find a VMA
91027- * - we don't extend stack VMAs under NOMMU conditions
91028- */
91029-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
91030-{
91031- return find_vma(mm, addr);
91032-}
91033-
91034-/*
91035 * expand a stack to a given address
91036 * - not supported under NOMMU conditions
91037 */
91038@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
91039
91040 /* most fields are the same, copy all, and then fixup */
91041 *new = *vma;
91042+ INIT_LIST_HEAD(&new->anon_vma_chain);
91043 *region = *vma->vm_region;
91044 new->vm_region = region;
91045
91046@@ -1993,8 +1984,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
91047 }
91048 EXPORT_SYMBOL(generic_file_remap_pages);
91049
91050-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91051- unsigned long addr, void *buf, int len, int write)
91052+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91053+ unsigned long addr, void *buf, size_t len, int write)
91054 {
91055 struct vm_area_struct *vma;
91056
91057@@ -2035,8 +2026,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91058 *
91059 * The caller must hold a reference on @mm.
91060 */
91061-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91062- void *buf, int len, int write)
91063+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91064+ void *buf, size_t len, int write)
91065 {
91066 return __access_remote_vm(NULL, mm, addr, buf, len, write);
91067 }
91068@@ -2045,7 +2036,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91069 * Access another process' address space.
91070 * - source/target buffer must be kernel space
91071 */
91072-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
91073+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
91074 {
91075 struct mm_struct *mm;
91076
91077diff --git a/mm/page-writeback.c b/mm/page-writeback.c
91078index 6380758..4064aec 100644
91079--- a/mm/page-writeback.c
91080+++ b/mm/page-writeback.c
91081@@ -690,7 +690,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
91082 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
91083 * - the bdi dirty thresh drops quickly due to change of JBOD workload
91084 */
91085-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
91086+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
91087 unsigned long thresh,
91088 unsigned long bg_thresh,
91089 unsigned long dirty,
91090diff --git a/mm/page_alloc.c b/mm/page_alloc.c
91091index dd886fa..7686339 100644
91092--- a/mm/page_alloc.c
91093+++ b/mm/page_alloc.c
91094@@ -61,6 +61,7 @@
91095 #include <linux/page-debug-flags.h>
91096 #include <linux/hugetlb.h>
91097 #include <linux/sched/rt.h>
91098+#include <linux/random.h>
91099
91100 #include <asm/sections.h>
91101 #include <asm/tlbflush.h>
91102@@ -354,7 +355,7 @@ out:
91103 * This usage means that zero-order pages may not be compound.
91104 */
91105
91106-static void free_compound_page(struct page *page)
91107+void free_compound_page(struct page *page)
91108 {
91109 __free_pages_ok(page, compound_order(page));
91110 }
91111@@ -712,6 +713,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
91112 int i;
91113 int bad = 0;
91114
91115+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91116+ unsigned long index = 1UL << order;
91117+#endif
91118+
91119 trace_mm_page_free(page, order);
91120 kmemcheck_free_shadow(page, order);
91121
91122@@ -728,6 +733,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
91123 debug_check_no_obj_freed(page_address(page),
91124 PAGE_SIZE << order);
91125 }
91126+
91127+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91128+ for (; index; --index)
91129+ sanitize_highpage(page + index - 1);
91130+#endif
91131+
91132 arch_free_page(page, order);
91133 kernel_map_pages(page, 1 << order, 0);
91134
91135@@ -750,6 +761,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
91136 local_irq_restore(flags);
91137 }
91138
91139+#ifdef CONFIG_PAX_LATENT_ENTROPY
91140+bool __meminitdata extra_latent_entropy;
91141+
91142+static int __init setup_pax_extra_latent_entropy(char *str)
91143+{
91144+ extra_latent_entropy = true;
91145+ return 0;
91146+}
91147+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
91148+
91149+volatile u64 latent_entropy __latent_entropy;
91150+EXPORT_SYMBOL(latent_entropy);
91151+#endif
91152+
91153 void __init __free_pages_bootmem(struct page *page, unsigned int order)
91154 {
91155 unsigned int nr_pages = 1 << order;
91156@@ -765,6 +790,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
91157 __ClearPageReserved(p);
91158 set_page_count(p, 0);
91159
91160+#ifdef CONFIG_PAX_LATENT_ENTROPY
91161+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
91162+ u64 hash = 0;
91163+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
91164+ const u64 *data = lowmem_page_address(page);
91165+
91166+ for (index = 0; index < end; index++)
91167+ hash ^= hash + data[index];
91168+ latent_entropy ^= hash;
91169+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
91170+ }
91171+#endif
91172+
91173 page_zone(page)->managed_pages += nr_pages;
91174 set_page_refcounted(page);
91175 __free_pages(page, order);
91176@@ -870,8 +908,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
91177 arch_alloc_page(page, order);
91178 kernel_map_pages(page, 1 << order, 1);
91179
91180+#ifndef CONFIG_PAX_MEMORY_SANITIZE
91181 if (gfp_flags & __GFP_ZERO)
91182 prep_zero_page(page, order, gfp_flags);
91183+#endif
91184
91185 if (order && (gfp_flags & __GFP_COMP))
91186 prep_compound_page(page, order);
91187diff --git a/mm/page_io.c b/mm/page_io.c
91188index 8c79a47..a689e0d 100644
91189--- a/mm/page_io.c
91190+++ b/mm/page_io.c
91191@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
91192 struct file *swap_file = sis->swap_file;
91193 struct address_space *mapping = swap_file->f_mapping;
91194 struct iovec iov = {
91195- .iov_base = kmap(page),
91196+ .iov_base = (void __force_user *)kmap(page),
91197 .iov_len = PAGE_SIZE,
91198 };
91199
91200diff --git a/mm/percpu.c b/mm/percpu.c
91201index 8c8e08f..73a5cda 100644
91202--- a/mm/percpu.c
91203+++ b/mm/percpu.c
91204@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
91205 static unsigned int pcpu_high_unit_cpu __read_mostly;
91206
91207 /* the address of the first chunk which starts with the kernel static area */
91208-void *pcpu_base_addr __read_mostly;
91209+void *pcpu_base_addr __read_only;
91210 EXPORT_SYMBOL_GPL(pcpu_base_addr);
91211
91212 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
91213diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
91214index fd26d04..0cea1b0 100644
91215--- a/mm/process_vm_access.c
91216+++ b/mm/process_vm_access.c
91217@@ -13,6 +13,7 @@
91218 #include <linux/uio.h>
91219 #include <linux/sched.h>
91220 #include <linux/highmem.h>
91221+#include <linux/security.h>
91222 #include <linux/ptrace.h>
91223 #include <linux/slab.h>
91224 #include <linux/syscalls.h>
91225@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
91226 size_t iov_l_curr_offset = 0;
91227 ssize_t iov_len;
91228
91229+ return -ENOSYS; // PaX: until properly audited
91230+
91231 /*
91232 * Work out how many pages of struct pages we're going to need
91233 * when eventually calling get_user_pages
91234 */
91235 for (i = 0; i < riovcnt; i++) {
91236 iov_len = rvec[i].iov_len;
91237- if (iov_len > 0) {
91238- nr_pages_iov = ((unsigned long)rvec[i].iov_base
91239- + iov_len)
91240- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
91241- / PAGE_SIZE + 1;
91242- nr_pages = max(nr_pages, nr_pages_iov);
91243- }
91244+ if (iov_len <= 0)
91245+ continue;
91246+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
91247+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
91248+ nr_pages = max(nr_pages, nr_pages_iov);
91249 }
91250
91251 if (nr_pages == 0)
91252@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
91253 goto free_proc_pages;
91254 }
91255
91256+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
91257+ rc = -EPERM;
91258+ goto put_task_struct;
91259+ }
91260+
91261 mm = mm_access(task, PTRACE_MODE_ATTACH);
91262 if (!mm || IS_ERR(mm)) {
91263 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
91264diff --git a/mm/rmap.c b/mm/rmap.c
91265index fd3ee7a..e4baa1f 100644
91266--- a/mm/rmap.c
91267+++ b/mm/rmap.c
91268@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91269 struct anon_vma *anon_vma = vma->anon_vma;
91270 struct anon_vma_chain *avc;
91271
91272+#ifdef CONFIG_PAX_SEGMEXEC
91273+ struct anon_vma_chain *avc_m = NULL;
91274+#endif
91275+
91276 might_sleep();
91277 if (unlikely(!anon_vma)) {
91278 struct mm_struct *mm = vma->vm_mm;
91279@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91280 if (!avc)
91281 goto out_enomem;
91282
91283+#ifdef CONFIG_PAX_SEGMEXEC
91284+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
91285+ if (!avc_m)
91286+ goto out_enomem_free_avc;
91287+#endif
91288+
91289 anon_vma = find_mergeable_anon_vma(vma);
91290 allocated = NULL;
91291 if (!anon_vma) {
91292@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91293 /* page_table_lock to protect against threads */
91294 spin_lock(&mm->page_table_lock);
91295 if (likely(!vma->anon_vma)) {
91296+
91297+#ifdef CONFIG_PAX_SEGMEXEC
91298+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
91299+
91300+ if (vma_m) {
91301+ BUG_ON(vma_m->anon_vma);
91302+ vma_m->anon_vma = anon_vma;
91303+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
91304+ avc_m = NULL;
91305+ }
91306+#endif
91307+
91308 vma->anon_vma = anon_vma;
91309 anon_vma_chain_link(vma, avc, anon_vma);
91310 allocated = NULL;
91311@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91312
91313 if (unlikely(allocated))
91314 put_anon_vma(allocated);
91315+
91316+#ifdef CONFIG_PAX_SEGMEXEC
91317+ if (unlikely(avc_m))
91318+ anon_vma_chain_free(avc_m);
91319+#endif
91320+
91321 if (unlikely(avc))
91322 anon_vma_chain_free(avc);
91323 }
91324 return 0;
91325
91326 out_enomem_free_avc:
91327+
91328+#ifdef CONFIG_PAX_SEGMEXEC
91329+ if (avc_m)
91330+ anon_vma_chain_free(avc_m);
91331+#endif
91332+
91333 anon_vma_chain_free(avc);
91334 out_enomem:
91335 return -ENOMEM;
91336@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
91337 * Attach the anon_vmas from src to dst.
91338 * Returns 0 on success, -ENOMEM on failure.
91339 */
91340-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
91341+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
91342 {
91343 struct anon_vma_chain *avc, *pavc;
91344 struct anon_vma *root = NULL;
91345@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
91346 * the corresponding VMA in the parent process is attached to.
91347 * Returns 0 on success, non-zero on failure.
91348 */
91349-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
91350+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
91351 {
91352 struct anon_vma_chain *avc;
91353 struct anon_vma *anon_vma;
91354@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
91355 void __init anon_vma_init(void)
91356 {
91357 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
91358- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
91359- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
91360+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
91361+ anon_vma_ctor);
91362+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
91363+ SLAB_PANIC|SLAB_NO_SANITIZE);
91364 }
91365
91366 /*
91367@@ -600,7 +636,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
91368 spinlock_t *ptl;
91369
91370 if (unlikely(PageHuge(page))) {
91371+ /* when pud is not present, pte will be NULL */
91372 pte = huge_pte_offset(mm, address);
91373+ if (!pte)
91374+ return NULL;
91375+
91376 ptl = &mm->page_table_lock;
91377 goto check;
91378 }
91379diff --git a/mm/shmem.c b/mm/shmem.c
91380index 8297623..6b9dfe9 100644
91381--- a/mm/shmem.c
91382+++ b/mm/shmem.c
91383@@ -33,7 +33,7 @@
91384 #include <linux/swap.h>
91385 #include <linux/aio.h>
91386
91387-static struct vfsmount *shm_mnt;
91388+struct vfsmount *shm_mnt;
91389
91390 #ifdef CONFIG_SHMEM
91391 /*
91392@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
91393 #define BOGO_DIRENT_SIZE 20
91394
91395 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
91396-#define SHORT_SYMLINK_LEN 128
91397+#define SHORT_SYMLINK_LEN 64
91398
91399 /*
91400 * shmem_fallocate and shmem_writepage communicate via inode->i_private
91401@@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
91402 static int shmem_xattr_validate(const char *name)
91403 {
91404 struct { const char *prefix; size_t len; } arr[] = {
91405+
91406+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
91407+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
91408+#endif
91409+
91410 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
91411 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
91412 };
91413@@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
91414 if (err)
91415 return err;
91416
91417+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
91418+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
91419+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
91420+ return -EOPNOTSUPP;
91421+ if (size > 8)
91422+ return -EINVAL;
91423+ }
91424+#endif
91425+
91426 return simple_xattr_set(&info->xattrs, name, value, size, flags);
91427 }
91428
91429@@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
91430 int err = -ENOMEM;
91431
91432 /* Round up to L1_CACHE_BYTES to resist false sharing */
91433- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
91434- L1_CACHE_BYTES), GFP_KERNEL);
91435+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
91436 if (!sbinfo)
91437 return -ENOMEM;
91438
91439diff --git a/mm/slab.c b/mm/slab.c
91440index 2580db0..0523956 100644
91441--- a/mm/slab.c
91442+++ b/mm/slab.c
91443@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
91444 if ((x)->max_freeable < i) \
91445 (x)->max_freeable = i; \
91446 } while (0)
91447-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
91448-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
91449-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
91450-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
91451+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
91452+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
91453+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
91454+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
91455+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
91456+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
91457 #else
91458 #define STATS_INC_ACTIVE(x) do { } while (0)
91459 #define STATS_DEC_ACTIVE(x) do { } while (0)
91460@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
91461 #define STATS_INC_ALLOCMISS(x) do { } while (0)
91462 #define STATS_INC_FREEHIT(x) do { } while (0)
91463 #define STATS_INC_FREEMISS(x) do { } while (0)
91464+#define STATS_INC_SANITIZED(x) do { } while (0)
91465+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
91466 #endif
91467
91468 #if DEBUG
91469@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
91470 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
91471 */
91472 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
91473- const struct slab *slab, void *obj)
91474+ const struct slab *slab, const void *obj)
91475 {
91476 u32 offset = (obj - slab->s_mem);
91477 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
91478@@ -1571,12 +1575,12 @@ void __init kmem_cache_init(void)
91479 */
91480
91481 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
91482- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
91483+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
91484
91485 if (INDEX_AC != INDEX_NODE)
91486 kmalloc_caches[INDEX_NODE] =
91487 create_kmalloc_cache("kmalloc-node",
91488- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
91489+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
91490
91491 slab_early_init = 0;
91492
91493@@ -3577,6 +3581,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
91494 struct array_cache *ac = cpu_cache_get(cachep);
91495
91496 check_irq_off();
91497+
91498+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91499+ if (pax_sanitize_slab) {
91500+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
91501+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
91502+
91503+ if (cachep->ctor)
91504+ cachep->ctor(objp);
91505+
91506+ STATS_INC_SANITIZED(cachep);
91507+ } else
91508+ STATS_INC_NOT_SANITIZED(cachep);
91509+ }
91510+#endif
91511+
91512 kmemleak_free_recursive(objp, cachep->flags);
91513 objp = cache_free_debugcheck(cachep, objp, caller);
91514
91515@@ -3805,6 +3824,7 @@ void kfree(const void *objp)
91516
91517 if (unlikely(ZERO_OR_NULL_PTR(objp)))
91518 return;
91519+ VM_BUG_ON(!virt_addr_valid(objp));
91520 local_irq_save(flags);
91521 kfree_debugcheck(objp);
91522 c = virt_to_cache(objp);
91523@@ -4246,14 +4266,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
91524 }
91525 /* cpu stats */
91526 {
91527- unsigned long allochit = atomic_read(&cachep->allochit);
91528- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
91529- unsigned long freehit = atomic_read(&cachep->freehit);
91530- unsigned long freemiss = atomic_read(&cachep->freemiss);
91531+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
91532+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
91533+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
91534+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
91535
91536 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
91537 allochit, allocmiss, freehit, freemiss);
91538 }
91539+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91540+ {
91541+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
91542+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
91543+
91544+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
91545+ }
91546+#endif
91547 #endif
91548 }
91549
91550@@ -4471,13 +4499,71 @@ static const struct file_operations proc_slabstats_operations = {
91551 static int __init slab_proc_init(void)
91552 {
91553 #ifdef CONFIG_DEBUG_SLAB_LEAK
91554- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
91555+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
91556 #endif
91557 return 0;
91558 }
91559 module_init(slab_proc_init);
91560 #endif
91561
91562+bool is_usercopy_object(const void *ptr)
91563+{
91564+ struct page *page;
91565+ struct kmem_cache *cachep;
91566+
91567+ if (ZERO_OR_NULL_PTR(ptr))
91568+ return false;
91569+
91570+ if (!slab_is_available())
91571+ return false;
91572+
91573+ if (!virt_addr_valid(ptr))
91574+ return false;
91575+
91576+ page = virt_to_head_page(ptr);
91577+
91578+ if (!PageSlab(page))
91579+ return false;
91580+
91581+ cachep = page->slab_cache;
91582+ return cachep->flags & SLAB_USERCOPY;
91583+}
91584+
91585+#ifdef CONFIG_PAX_USERCOPY
91586+const char *check_heap_object(const void *ptr, unsigned long n)
91587+{
91588+ struct page *page;
91589+ struct kmem_cache *cachep;
91590+ struct slab *slabp;
91591+ unsigned int objnr;
91592+ unsigned long offset;
91593+
91594+ if (ZERO_OR_NULL_PTR(ptr))
91595+ return "<null>";
91596+
91597+ if (!virt_addr_valid(ptr))
91598+ return NULL;
91599+
91600+ page = virt_to_head_page(ptr);
91601+
91602+ if (!PageSlab(page))
91603+ return NULL;
91604+
91605+ cachep = page->slab_cache;
91606+ if (!(cachep->flags & SLAB_USERCOPY))
91607+ return cachep->name;
91608+
91609+ slabp = page->slab_page;
91610+ objnr = obj_to_index(cachep, slabp, ptr);
91611+ BUG_ON(objnr >= cachep->num);
91612+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
91613+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
91614+ return NULL;
91615+
91616+ return cachep->name;
91617+}
91618+#endif
91619+
91620 /**
91621 * ksize - get the actual amount of memory allocated for a given object
91622 * @objp: Pointer to the object
91623diff --git a/mm/slab.h b/mm/slab.h
91624index a535033..2f98fe5 100644
91625--- a/mm/slab.h
91626+++ b/mm/slab.h
91627@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
91628 /* The slab cache that manages slab cache information */
91629 extern struct kmem_cache *kmem_cache;
91630
91631+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91632+#ifdef CONFIG_X86_64
91633+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
91634+#else
91635+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
91636+#endif
91637+extern bool pax_sanitize_slab;
91638+#endif
91639+
91640 unsigned long calculate_alignment(unsigned long flags,
91641 unsigned long align, unsigned long size);
91642
91643@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
91644
91645 /* Legal flag mask for kmem_cache_create(), for various configurations */
91646 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
91647- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
91648+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
91649+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
91650
91651 #if defined(CONFIG_DEBUG_SLAB)
91652 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
91653@@ -231,6 +241,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
91654 return s;
91655
91656 page = virt_to_head_page(x);
91657+
91658+ BUG_ON(!PageSlab(page));
91659+
91660 cachep = page->slab_cache;
91661 if (slab_equal_or_root(cachep, s))
91662 return cachep;
91663diff --git a/mm/slab_common.c b/mm/slab_common.c
91664index e2e98af..3b1a163 100644
91665--- a/mm/slab_common.c
91666+++ b/mm/slab_common.c
91667@@ -23,11 +23,22 @@
91668
91669 #include "slab.h"
91670
91671-enum slab_state slab_state;
91672+enum slab_state slab_state __read_only;
91673 LIST_HEAD(slab_caches);
91674 DEFINE_MUTEX(slab_mutex);
91675 struct kmem_cache *kmem_cache;
91676
91677+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91678+bool pax_sanitize_slab __read_only = true;
91679+static int __init pax_sanitize_slab_setup(char *str)
91680+{
91681+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
91682+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
91683+ return 1;
91684+}
91685+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
91686+#endif
91687+
91688 #ifdef CONFIG_DEBUG_VM
91689 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
91690 size_t size)
91691@@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
91692
91693 err = __kmem_cache_create(s, flags);
91694 if (!err) {
91695- s->refcount = 1;
91696+ atomic_set(&s->refcount, 1);
91697 list_add(&s->list, &slab_caches);
91698 memcg_cache_list_add(memcg, s);
91699 } else {
91700@@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
91701
91702 get_online_cpus();
91703 mutex_lock(&slab_mutex);
91704- s->refcount--;
91705- if (!s->refcount) {
91706+ if (atomic_dec_and_test(&s->refcount)) {
91707 list_del(&s->list);
91708
91709 if (!__kmem_cache_shutdown(s)) {
91710@@ -305,7 +315,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
91711 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
91712 name, size, err);
91713
91714- s->refcount = -1; /* Exempt from merging for now */
91715+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
91716 }
91717
91718 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
91719@@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
91720
91721 create_boot_cache(s, name, size, flags);
91722 list_add(&s->list, &slab_caches);
91723- s->refcount = 1;
91724+ atomic_set(&s->refcount, 1);
91725 return s;
91726 }
91727
91728@@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
91729 EXPORT_SYMBOL(kmalloc_dma_caches);
91730 #endif
91731
91732+#ifdef CONFIG_PAX_USERCOPY_SLABS
91733+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
91734+EXPORT_SYMBOL(kmalloc_usercopy_caches);
91735+#endif
91736+
91737 /*
91738 * Conversion table for small slabs sizes / 8 to the index in the
91739 * kmalloc array. This is necessary for slabs < 192 since we have non power
91740@@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
91741 return kmalloc_dma_caches[index];
91742
91743 #endif
91744+
91745+#ifdef CONFIG_PAX_USERCOPY_SLABS
91746+ if (unlikely((flags & GFP_USERCOPY)))
91747+ return kmalloc_usercopy_caches[index];
91748+
91749+#endif
91750+
91751 return kmalloc_caches[index];
91752 }
91753
91754@@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsigned long flags)
91755 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
91756 if (!kmalloc_caches[i]) {
91757 kmalloc_caches[i] = create_kmalloc_cache(NULL,
91758- 1 << i, flags);
91759+ 1 << i, SLAB_USERCOPY | flags);
91760 }
91761
91762 /*
91763@@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsigned long flags)
91764 * earlier power of two caches
91765 */
91766 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
91767- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
91768+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
91769
91770 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
91771- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
91772+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
91773 }
91774
91775 /* Kmalloc array is now usable */
91776@@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsigned long flags)
91777 }
91778 }
91779 #endif
91780+
91781+#ifdef CONFIG_PAX_USERCOPY_SLABS
91782+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
91783+ struct kmem_cache *s = kmalloc_caches[i];
91784+
91785+ if (s) {
91786+ int size = kmalloc_size(i);
91787+ char *n = kasprintf(GFP_NOWAIT,
91788+ "usercopy-kmalloc-%d", size);
91789+
91790+ BUG_ON(!n);
91791+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
91792+ size, SLAB_USERCOPY | flags);
91793+ }
91794+ }
91795+#endif
91796+
91797 }
91798 #endif /* !CONFIG_SLOB */
91799
91800@@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_file *m)
91801 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
91802 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
91803 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
91804+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91805+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
91806+#endif
91807 #endif
91808 seq_putc(m, '\n');
91809 }
91810diff --git a/mm/slob.c b/mm/slob.c
91811index 4bf8809..98a6914 100644
91812--- a/mm/slob.c
91813+++ b/mm/slob.c
91814@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
91815 /*
91816 * Return the size of a slob block.
91817 */
91818-static slobidx_t slob_units(slob_t *s)
91819+static slobidx_t slob_units(const slob_t *s)
91820 {
91821 if (s->units > 0)
91822 return s->units;
91823@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
91824 /*
91825 * Return the next free slob block pointer after this one.
91826 */
91827-static slob_t *slob_next(slob_t *s)
91828+static slob_t *slob_next(const slob_t *s)
91829 {
91830 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
91831 slobidx_t next;
91832@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
91833 /*
91834 * Returns true if s is the last free block in its page.
91835 */
91836-static int slob_last(slob_t *s)
91837+static int slob_last(const slob_t *s)
91838 {
91839 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
91840 }
91841
91842-static void *slob_new_pages(gfp_t gfp, int order, int node)
91843+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
91844 {
91845- void *page;
91846+ struct page *page;
91847
91848 #ifdef CONFIG_NUMA
91849 if (node != NUMA_NO_NODE)
91850@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
91851 if (!page)
91852 return NULL;
91853
91854- return page_address(page);
91855+ __SetPageSlab(page);
91856+ return page;
91857 }
91858
91859-static void slob_free_pages(void *b, int order)
91860+static void slob_free_pages(struct page *sp, int order)
91861 {
91862 if (current->reclaim_state)
91863 current->reclaim_state->reclaimed_slab += 1 << order;
91864- free_pages((unsigned long)b, order);
91865+ __ClearPageSlab(sp);
91866+ page_mapcount_reset(sp);
91867+ sp->private = 0;
91868+ __free_pages(sp, order);
91869 }
91870
91871 /*
91872@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
91873
91874 /* Not enough space: must allocate a new page */
91875 if (!b) {
91876- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
91877- if (!b)
91878+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
91879+ if (!sp)
91880 return NULL;
91881- sp = virt_to_page(b);
91882- __SetPageSlab(sp);
91883+ b = page_address(sp);
91884
91885 spin_lock_irqsave(&slob_lock, flags);
91886 sp->units = SLOB_UNITS(PAGE_SIZE);
91887 sp->freelist = b;
91888+ sp->private = 0;
91889 INIT_LIST_HEAD(&sp->list);
91890 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
91891 set_slob_page_free(sp, slob_list);
91892@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
91893 if (slob_page_free(sp))
91894 clear_slob_page_free(sp);
91895 spin_unlock_irqrestore(&slob_lock, flags);
91896- __ClearPageSlab(sp);
91897- page_mapcount_reset(sp);
91898- slob_free_pages(b, 0);
91899+ slob_free_pages(sp, 0);
91900 return;
91901 }
91902
91903+#ifdef CONFIG_PAX_MEMORY_SANITIZE
91904+ if (pax_sanitize_slab)
91905+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
91906+#endif
91907+
91908 if (!slob_page_free(sp)) {
91909 /* This slob page is about to become partially free. Easy! */
91910 sp->units = units;
91911@@ -424,11 +431,10 @@ out:
91912 */
91913
91914 static __always_inline void *
91915-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
91916+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
91917 {
91918- unsigned int *m;
91919- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
91920- void *ret;
91921+ slob_t *m;
91922+ void *ret = NULL;
91923
91924 gfp &= gfp_allowed_mask;
91925
91926@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
91927
91928 if (!m)
91929 return NULL;
91930- *m = size;
91931+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
91932+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
91933+ m[0].units = size;
91934+ m[1].units = align;
91935 ret = (void *)m + align;
91936
91937 trace_kmalloc_node(caller, ret,
91938 size, size + align, gfp, node);
91939 } else {
91940 unsigned int order = get_order(size);
91941+ struct page *page;
91942
91943 if (likely(order))
91944 gfp |= __GFP_COMP;
91945- ret = slob_new_pages(gfp, order, node);
91946+ page = slob_new_pages(gfp, order, node);
91947+ if (page) {
91948+ ret = page_address(page);
91949+ page->private = size;
91950+ }
91951
91952 trace_kmalloc_node(caller, ret,
91953 size, PAGE_SIZE << order, gfp, node);
91954 }
91955
91956- kmemleak_alloc(ret, size, 1, gfp);
91957+ return ret;
91958+}
91959+
91960+static __always_inline void *
91961+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
91962+{
91963+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
91964+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
91965+
91966+ if (!ZERO_OR_NULL_PTR(ret))
91967+ kmemleak_alloc(ret, size, 1, gfp);
91968 return ret;
91969 }
91970
91971@@ -493,34 +517,112 @@ void kfree(const void *block)
91972 return;
91973 kmemleak_free(block);
91974
91975+ VM_BUG_ON(!virt_addr_valid(block));
91976 sp = virt_to_page(block);
91977- if (PageSlab(sp)) {
91978+ VM_BUG_ON(!PageSlab(sp));
91979+ if (!sp->private) {
91980 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
91981- unsigned int *m = (unsigned int *)(block - align);
91982- slob_free(m, *m + align);
91983- } else
91984+ slob_t *m = (slob_t *)(block - align);
91985+ slob_free(m, m[0].units + align);
91986+ } else {
91987+ __ClearPageSlab(sp);
91988+ page_mapcount_reset(sp);
91989+ sp->private = 0;
91990 __free_pages(sp, compound_order(sp));
91991+ }
91992 }
91993 EXPORT_SYMBOL(kfree);
91994
91995+bool is_usercopy_object(const void *ptr)
91996+{
91997+ if (!slab_is_available())
91998+ return false;
91999+
92000+ // PAX: TODO
92001+
92002+ return false;
92003+}
92004+
92005+#ifdef CONFIG_PAX_USERCOPY
92006+const char *check_heap_object(const void *ptr, unsigned long n)
92007+{
92008+ struct page *page;
92009+ const slob_t *free;
92010+ const void *base;
92011+ unsigned long flags;
92012+
92013+ if (ZERO_OR_NULL_PTR(ptr))
92014+ return "<null>";
92015+
92016+ if (!virt_addr_valid(ptr))
92017+ return NULL;
92018+
92019+ page = virt_to_head_page(ptr);
92020+ if (!PageSlab(page))
92021+ return NULL;
92022+
92023+ if (page->private) {
92024+ base = page;
92025+ if (base <= ptr && n <= page->private - (ptr - base))
92026+ return NULL;
92027+ return "<slob>";
92028+ }
92029+
92030+ /* some tricky double walking to find the chunk */
92031+ spin_lock_irqsave(&slob_lock, flags);
92032+ base = (void *)((unsigned long)ptr & PAGE_MASK);
92033+ free = page->freelist;
92034+
92035+ while (!slob_last(free) && (void *)free <= ptr) {
92036+ base = free + slob_units(free);
92037+ free = slob_next(free);
92038+ }
92039+
92040+ while (base < (void *)free) {
92041+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
92042+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
92043+ int offset;
92044+
92045+ if (ptr < base + align)
92046+ break;
92047+
92048+ offset = ptr - base - align;
92049+ if (offset >= m) {
92050+ base += size;
92051+ continue;
92052+ }
92053+
92054+ if (n > m - offset)
92055+ break;
92056+
92057+ spin_unlock_irqrestore(&slob_lock, flags);
92058+ return NULL;
92059+ }
92060+
92061+ spin_unlock_irqrestore(&slob_lock, flags);
92062+ return "<slob>";
92063+}
92064+#endif
92065+
92066 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
92067 size_t ksize(const void *block)
92068 {
92069 struct page *sp;
92070 int align;
92071- unsigned int *m;
92072+ slob_t *m;
92073
92074 BUG_ON(!block);
92075 if (unlikely(block == ZERO_SIZE_PTR))
92076 return 0;
92077
92078 sp = virt_to_page(block);
92079- if (unlikely(!PageSlab(sp)))
92080- return PAGE_SIZE << compound_order(sp);
92081+ VM_BUG_ON(!PageSlab(sp));
92082+ if (sp->private)
92083+ return sp->private;
92084
92085 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
92086- m = (unsigned int *)(block - align);
92087- return SLOB_UNITS(*m) * SLOB_UNIT;
92088+ m = (slob_t *)(block - align);
92089+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
92090 }
92091 EXPORT_SYMBOL(ksize);
92092
92093@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
92094
92095 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
92096 {
92097- void *b;
92098+ void *b = NULL;
92099
92100 flags &= gfp_allowed_mask;
92101
92102 lockdep_trace_alloc(flags);
92103
92104+#ifdef CONFIG_PAX_USERCOPY_SLABS
92105+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
92106+#else
92107 if (c->size < PAGE_SIZE) {
92108 b = slob_alloc(c->size, flags, c->align, node);
92109 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
92110 SLOB_UNITS(c->size) * SLOB_UNIT,
92111 flags, node);
92112 } else {
92113- b = slob_new_pages(flags, get_order(c->size), node);
92114+ struct page *sp;
92115+
92116+ sp = slob_new_pages(flags, get_order(c->size), node);
92117+ if (sp) {
92118+ b = page_address(sp);
92119+ sp->private = c->size;
92120+ }
92121 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
92122 PAGE_SIZE << get_order(c->size),
92123 flags, node);
92124 }
92125+#endif
92126
92127 if (b && c->ctor)
92128 c->ctor(b);
92129@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
92130
92131 static void __kmem_cache_free(void *b, int size)
92132 {
92133- if (size < PAGE_SIZE)
92134+ struct page *sp;
92135+
92136+ sp = virt_to_page(b);
92137+ BUG_ON(!PageSlab(sp));
92138+ if (!sp->private)
92139 slob_free(b, size);
92140 else
92141- slob_free_pages(b, get_order(size));
92142+ slob_free_pages(sp, get_order(size));
92143 }
92144
92145 static void kmem_rcu_free(struct rcu_head *head)
92146@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
92147
92148 void kmem_cache_free(struct kmem_cache *c, void *b)
92149 {
92150+ int size = c->size;
92151+
92152+#ifdef CONFIG_PAX_USERCOPY_SLABS
92153+ if (size + c->align < PAGE_SIZE) {
92154+ size += c->align;
92155+ b -= c->align;
92156+ }
92157+#endif
92158+
92159 kmemleak_free_recursive(b, c->flags);
92160 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
92161 struct slob_rcu *slob_rcu;
92162- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
92163- slob_rcu->size = c->size;
92164+ slob_rcu = b + (size - sizeof(struct slob_rcu));
92165+ slob_rcu->size = size;
92166 call_rcu(&slob_rcu->head, kmem_rcu_free);
92167 } else {
92168- __kmem_cache_free(b, c->size);
92169+ __kmem_cache_free(b, size);
92170 }
92171
92172+#ifdef CONFIG_PAX_USERCOPY_SLABS
92173+ trace_kfree(_RET_IP_, b);
92174+#else
92175 trace_kmem_cache_free(_RET_IP_, b);
92176+#endif
92177+
92178 }
92179 EXPORT_SYMBOL(kmem_cache_free);
92180
92181diff --git a/mm/slub.c b/mm/slub.c
92182index 96f2169..9111a59 100644
92183--- a/mm/slub.c
92184+++ b/mm/slub.c
92185@@ -207,7 +207,7 @@ struct track {
92186
92187 enum track_item { TRACK_ALLOC, TRACK_FREE };
92188
92189-#ifdef CONFIG_SYSFS
92190+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92191 static int sysfs_slab_add(struct kmem_cache *);
92192 static int sysfs_slab_alias(struct kmem_cache *, const char *);
92193 static void sysfs_slab_remove(struct kmem_cache *);
92194@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
92195 if (!t->addr)
92196 return;
92197
92198- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
92199+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
92200 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
92201 #ifdef CONFIG_STACKTRACE
92202 {
92203@@ -2616,6 +2616,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
92204
92205 slab_free_hook(s, x);
92206
92207+#ifdef CONFIG_PAX_MEMORY_SANITIZE
92208+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
92209+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
92210+ if (s->ctor)
92211+ s->ctor(x);
92212+ }
92213+#endif
92214+
92215 redo:
92216 /*
92217 * Determine the currently cpus per cpu slab.
92218@@ -2683,7 +2691,7 @@ static int slub_min_objects;
92219 * Merge control. If this is set then no merging of slab caches will occur.
92220 * (Could be removed. This was introduced to pacify the merge skeptics.)
92221 */
92222-static int slub_nomerge;
92223+static int slub_nomerge = 1;
92224
92225 /*
92226 * Calculate the order of allocation given an slab object size.
92227@@ -2960,6 +2968,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
92228 s->inuse = size;
92229
92230 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
92231+#ifdef CONFIG_PAX_MEMORY_SANITIZE
92232+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
92233+#endif
92234 s->ctor)) {
92235 /*
92236 * Relocate free pointer after the object if it is not
92237@@ -3305,6 +3316,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
92238 EXPORT_SYMBOL(__kmalloc_node);
92239 #endif
92240
92241+bool is_usercopy_object(const void *ptr)
92242+{
92243+ struct page *page;
92244+ struct kmem_cache *s;
92245+
92246+ if (ZERO_OR_NULL_PTR(ptr))
92247+ return false;
92248+
92249+ if (!slab_is_available())
92250+ return false;
92251+
92252+ if (!virt_addr_valid(ptr))
92253+ return false;
92254+
92255+ page = virt_to_head_page(ptr);
92256+
92257+ if (!PageSlab(page))
92258+ return false;
92259+
92260+ s = page->slab_cache;
92261+ return s->flags & SLAB_USERCOPY;
92262+}
92263+
92264+#ifdef CONFIG_PAX_USERCOPY
92265+const char *check_heap_object(const void *ptr, unsigned long n)
92266+{
92267+ struct page *page;
92268+ struct kmem_cache *s;
92269+ unsigned long offset;
92270+
92271+ if (ZERO_OR_NULL_PTR(ptr))
92272+ return "<null>";
92273+
92274+ if (!virt_addr_valid(ptr))
92275+ return NULL;
92276+
92277+ page = virt_to_head_page(ptr);
92278+
92279+ if (!PageSlab(page))
92280+ return NULL;
92281+
92282+ s = page->slab_cache;
92283+ if (!(s->flags & SLAB_USERCOPY))
92284+ return s->name;
92285+
92286+ offset = (ptr - page_address(page)) % s->size;
92287+ if (offset <= s->object_size && n <= s->object_size - offset)
92288+ return NULL;
92289+
92290+ return s->name;
92291+}
92292+#endif
92293+
92294 size_t ksize(const void *object)
92295 {
92296 struct page *page;
92297@@ -3333,6 +3397,7 @@ void kfree(const void *x)
92298 if (unlikely(ZERO_OR_NULL_PTR(x)))
92299 return;
92300
92301+ VM_BUG_ON(!virt_addr_valid(x));
92302 page = virt_to_head_page(x);
92303 if (unlikely(!PageSlab(page))) {
92304 BUG_ON(!PageCompound(page));
92305@@ -3638,7 +3703,7 @@ static int slab_unmergeable(struct kmem_cache *s)
92306 /*
92307 * We may have set a slab to be unmergeable during bootstrap.
92308 */
92309- if (s->refcount < 0)
92310+ if (atomic_read(&s->refcount) < 0)
92311 return 1;
92312
92313 return 0;
92314@@ -3696,7 +3761,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
92315
92316 s = find_mergeable(memcg, size, align, flags, name, ctor);
92317 if (s) {
92318- s->refcount++;
92319+ atomic_inc(&s->refcount);
92320 /*
92321 * Adjust the object sizes so that we clear
92322 * the complete object on kzalloc.
92323@@ -3705,7 +3770,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
92324 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
92325
92326 if (sysfs_slab_alias(s, name)) {
92327- s->refcount--;
92328+ atomic_dec(&s->refcount);
92329 s = NULL;
92330 }
92331 }
92332@@ -3825,7 +3890,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
92333 }
92334 #endif
92335
92336-#ifdef CONFIG_SYSFS
92337+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92338 static int count_inuse(struct page *page)
92339 {
92340 return page->inuse;
92341@@ -4214,12 +4279,12 @@ static void resiliency_test(void)
92342 validate_slab_cache(kmalloc_caches[9]);
92343 }
92344 #else
92345-#ifdef CONFIG_SYSFS
92346+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92347 static void resiliency_test(void) {};
92348 #endif
92349 #endif
92350
92351-#ifdef CONFIG_SYSFS
92352+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92353 enum slab_stat_type {
92354 SL_ALL, /* All slabs */
92355 SL_PARTIAL, /* Only partially allocated slabs */
92356@@ -4459,7 +4524,7 @@ SLAB_ATTR_RO(ctor);
92357
92358 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
92359 {
92360- return sprintf(buf, "%d\n", s->refcount - 1);
92361+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
92362 }
92363 SLAB_ATTR_RO(aliases);
92364
92365@@ -4547,6 +4612,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
92366 SLAB_ATTR_RO(cache_dma);
92367 #endif
92368
92369+#ifdef CONFIG_PAX_USERCOPY_SLABS
92370+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
92371+{
92372+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
92373+}
92374+SLAB_ATTR_RO(usercopy);
92375+#endif
92376+
92377 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
92378 {
92379 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
92380@@ -4881,6 +4954,9 @@ static struct attribute *slab_attrs[] = {
92381 #ifdef CONFIG_ZONE_DMA
92382 &cache_dma_attr.attr,
92383 #endif
92384+#ifdef CONFIG_PAX_USERCOPY_SLABS
92385+ &usercopy_attr.attr,
92386+#endif
92387 #ifdef CONFIG_NUMA
92388 &remote_node_defrag_ratio_attr.attr,
92389 #endif
92390@@ -5113,6 +5189,7 @@ static char *create_unique_id(struct kmem_cache *s)
92391 return name;
92392 }
92393
92394+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92395 static int sysfs_slab_add(struct kmem_cache *s)
92396 {
92397 int err;
92398@@ -5136,7 +5213,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
92399 }
92400
92401 s->kobj.kset = slab_kset;
92402- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
92403+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
92404 if (err) {
92405 kobject_put(&s->kobj);
92406 return err;
92407@@ -5170,6 +5247,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
92408 kobject_del(&s->kobj);
92409 kobject_put(&s->kobj);
92410 }
92411+#endif
92412
92413 /*
92414 * Need to buffer aliases during bootup until sysfs becomes
92415@@ -5183,6 +5261,7 @@ struct saved_alias {
92416
92417 static struct saved_alias *alias_list;
92418
92419+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92420 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
92421 {
92422 struct saved_alias *al;
92423@@ -5205,6 +5284,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
92424 alias_list = al;
92425 return 0;
92426 }
92427+#endif
92428
92429 static int __init slab_sysfs_init(void)
92430 {
92431diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
92432index 27eeab3..7c3f7f2 100644
92433--- a/mm/sparse-vmemmap.c
92434+++ b/mm/sparse-vmemmap.c
92435@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
92436 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
92437 if (!p)
92438 return NULL;
92439- pud_populate(&init_mm, pud, p);
92440+ pud_populate_kernel(&init_mm, pud, p);
92441 }
92442 return pud;
92443 }
92444@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
92445 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
92446 if (!p)
92447 return NULL;
92448- pgd_populate(&init_mm, pgd, p);
92449+ pgd_populate_kernel(&init_mm, pgd, p);
92450 }
92451 return pgd;
92452 }
92453diff --git a/mm/sparse.c b/mm/sparse.c
92454index 4ac1d7e..bbfcb1f 100644
92455--- a/mm/sparse.c
92456+++ b/mm/sparse.c
92457@@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
92458
92459 for (i = 0; i < PAGES_PER_SECTION; i++) {
92460 if (PageHWPoison(&memmap[i])) {
92461- atomic_long_sub(1, &num_poisoned_pages);
92462+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
92463 ClearPageHWPoison(&memmap[i]);
92464 }
92465 }
92466diff --git a/mm/swap.c b/mm/swap.c
92467index 759c3ca..7c1a5b4 100644
92468--- a/mm/swap.c
92469+++ b/mm/swap.c
92470@@ -77,6 +77,8 @@ static void __put_compound_page(struct page *page)
92471
92472 __page_cache_release(page);
92473 dtor = get_compound_page_dtor(page);
92474+ if (!PageHuge(page))
92475+ BUG_ON(dtor != free_compound_page);
92476 (*dtor)(page);
92477 }
92478
92479diff --git a/mm/swapfile.c b/mm/swapfile.c
92480index de7c904..c84bf11 100644
92481--- a/mm/swapfile.c
92482+++ b/mm/swapfile.c
92483@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
92484
92485 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
92486 /* Activity counter to indicate that a swapon or swapoff has occurred */
92487-static atomic_t proc_poll_event = ATOMIC_INIT(0);
92488+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
92489
92490 static inline unsigned char swap_count(unsigned char ent)
92491 {
92492@@ -1949,7 +1949,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
92493 }
92494 filp_close(swap_file, NULL);
92495 err = 0;
92496- atomic_inc(&proc_poll_event);
92497+ atomic_inc_unchecked(&proc_poll_event);
92498 wake_up_interruptible(&proc_poll_wait);
92499
92500 out_dput:
92501@@ -1966,8 +1966,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
92502
92503 poll_wait(file, &proc_poll_wait, wait);
92504
92505- if (seq->poll_event != atomic_read(&proc_poll_event)) {
92506- seq->poll_event = atomic_read(&proc_poll_event);
92507+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
92508+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
92509 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
92510 }
92511
92512@@ -2065,7 +2065,7 @@ static int swaps_open(struct inode *inode, struct file *file)
92513 return ret;
92514
92515 seq = file->private_data;
92516- seq->poll_event = atomic_read(&proc_poll_event);
92517+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
92518 return 0;
92519 }
92520
92521@@ -2524,7 +2524,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
92522 (frontswap_map) ? "FS" : "");
92523
92524 mutex_unlock(&swapon_mutex);
92525- atomic_inc(&proc_poll_event);
92526+ atomic_inc_unchecked(&proc_poll_event);
92527 wake_up_interruptible(&proc_poll_wait);
92528
92529 if (S_ISREG(inode->i_mode))
92530diff --git a/mm/util.c b/mm/util.c
92531index eaf63fc2..32b2629 100644
92532--- a/mm/util.c
92533+++ b/mm/util.c
92534@@ -294,6 +294,12 @@ done:
92535 void arch_pick_mmap_layout(struct mm_struct *mm)
92536 {
92537 mm->mmap_base = TASK_UNMAPPED_BASE;
92538+
92539+#ifdef CONFIG_PAX_RANDMMAP
92540+ if (mm->pax_flags & MF_PAX_RANDMMAP)
92541+ mm->mmap_base += mm->delta_mmap;
92542+#endif
92543+
92544 mm->get_unmapped_area = arch_get_unmapped_area;
92545 }
92546 #endif
92547diff --git a/mm/vmalloc.c b/mm/vmalloc.c
92548index 1074543..136dbe0 100644
92549--- a/mm/vmalloc.c
92550+++ b/mm/vmalloc.c
92551@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
92552
92553 pte = pte_offset_kernel(pmd, addr);
92554 do {
92555- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
92556- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
92557+
92558+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
92559+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
92560+ BUG_ON(!pte_exec(*pte));
92561+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
92562+ continue;
92563+ }
92564+#endif
92565+
92566+ {
92567+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
92568+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
92569+ }
92570 } while (pte++, addr += PAGE_SIZE, addr != end);
92571 }
92572
92573@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
92574 pte = pte_alloc_kernel(pmd, addr);
92575 if (!pte)
92576 return -ENOMEM;
92577+
92578+ pax_open_kernel();
92579 do {
92580 struct page *page = pages[*nr];
92581
92582- if (WARN_ON(!pte_none(*pte)))
92583+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
92584+ if (pgprot_val(prot) & _PAGE_NX)
92585+#endif
92586+
92587+ if (!pte_none(*pte)) {
92588+ pax_close_kernel();
92589+ WARN_ON(1);
92590 return -EBUSY;
92591- if (WARN_ON(!page))
92592+ }
92593+ if (!page) {
92594+ pax_close_kernel();
92595+ WARN_ON(1);
92596 return -ENOMEM;
92597+ }
92598 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
92599 (*nr)++;
92600 } while (pte++, addr += PAGE_SIZE, addr != end);
92601+ pax_close_kernel();
92602 return 0;
92603 }
92604
92605@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
92606 pmd_t *pmd;
92607 unsigned long next;
92608
92609- pmd = pmd_alloc(&init_mm, pud, addr);
92610+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
92611 if (!pmd)
92612 return -ENOMEM;
92613 do {
92614@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
92615 pud_t *pud;
92616 unsigned long next;
92617
92618- pud = pud_alloc(&init_mm, pgd, addr);
92619+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
92620 if (!pud)
92621 return -ENOMEM;
92622 do {
92623@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
92624 if (addr >= MODULES_VADDR && addr < MODULES_END)
92625 return 1;
92626 #endif
92627+
92628+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
92629+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
92630+ return 1;
92631+#endif
92632+
92633 return is_vmalloc_addr(x);
92634 }
92635
92636@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
92637
92638 if (!pgd_none(*pgd)) {
92639 pud_t *pud = pud_offset(pgd, addr);
92640+#ifdef CONFIG_X86
92641+ if (!pud_large(*pud))
92642+#endif
92643 if (!pud_none(*pud)) {
92644 pmd_t *pmd = pmd_offset(pud, addr);
92645+#ifdef CONFIG_X86
92646+ if (!pmd_large(*pmd))
92647+#endif
92648 if (!pmd_none(*pmd)) {
92649 pte_t *ptep, pte;
92650
92651@@ -1303,6 +1339,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
92652 struct vm_struct *area;
92653
92654 BUG_ON(in_interrupt());
92655+
92656+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
92657+ if (flags & VM_KERNEXEC) {
92658+ if (start != VMALLOC_START || end != VMALLOC_END)
92659+ return NULL;
92660+ start = (unsigned long)MODULES_EXEC_VADDR;
92661+ end = (unsigned long)MODULES_EXEC_END;
92662+ }
92663+#endif
92664+
92665 if (flags & VM_IOREMAP)
92666 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
92667
92668@@ -1528,6 +1574,11 @@ void *vmap(struct page **pages, unsigned int count,
92669 if (count > totalram_pages)
92670 return NULL;
92671
92672+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
92673+ if (!(pgprot_val(prot) & _PAGE_NX))
92674+ flags |= VM_KERNEXEC;
92675+#endif
92676+
92677 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
92678 __builtin_return_address(0));
92679 if (!area)
92680@@ -1629,6 +1680,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
92681 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
92682 goto fail;
92683
92684+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
92685+ if (!(pgprot_val(prot) & _PAGE_NX))
92686+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
92687+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
92688+ else
92689+#endif
92690+
92691 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
92692 start, end, node, gfp_mask, caller);
92693 if (!area)
92694@@ -1805,10 +1863,9 @@ EXPORT_SYMBOL(vzalloc_node);
92695 * For tight control over page level allocator and protection flags
92696 * use __vmalloc() instead.
92697 */
92698-
92699 void *vmalloc_exec(unsigned long size)
92700 {
92701- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
92702+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
92703 NUMA_NO_NODE, __builtin_return_address(0));
92704 }
92705
92706@@ -2115,6 +2172,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
92707 {
92708 struct vm_struct *area;
92709
92710+ BUG_ON(vma->vm_mirror);
92711+
92712 size = PAGE_ALIGN(size);
92713
92714 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
92715@@ -2600,7 +2659,11 @@ static int s_show(struct seq_file *m, void *p)
92716 v->addr, v->addr + v->size, v->size);
92717
92718 if (v->caller)
92719+#ifdef CONFIG_GRKERNSEC_HIDESYM
92720+ seq_printf(m, " %pK", v->caller);
92721+#else
92722 seq_printf(m, " %pS", v->caller);
92723+#endif
92724
92725 if (v->nr_pages)
92726 seq_printf(m, " pages=%d", v->nr_pages);
92727diff --git a/mm/vmstat.c b/mm/vmstat.c
92728index 5a442a7..5eb281e 100644
92729--- a/mm/vmstat.c
92730+++ b/mm/vmstat.c
92731@@ -79,7 +79,7 @@ void vm_events_fold_cpu(int cpu)
92732 *
92733 * vm_stat contains the global counters
92734 */
92735-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
92736+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
92737 EXPORT_SYMBOL(vm_stat);
92738
92739 #ifdef CONFIG_SMP
92740@@ -423,7 +423,7 @@ static inline void fold_diff(int *diff)
92741
92742 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
92743 if (diff[i])
92744- atomic_long_add(diff[i], &vm_stat[i]);
92745+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
92746 }
92747
92748 /*
92749@@ -455,7 +455,7 @@ static void refresh_cpu_vm_stats(void)
92750 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
92751 if (v) {
92752
92753- atomic_long_add(v, &zone->vm_stat[i]);
92754+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
92755 global_diff[i] += v;
92756 #ifdef CONFIG_NUMA
92757 /* 3 seconds idle till flush */
92758@@ -517,7 +517,7 @@ void cpu_vm_stats_fold(int cpu)
92759
92760 v = p->vm_stat_diff[i];
92761 p->vm_stat_diff[i] = 0;
92762- atomic_long_add(v, &zone->vm_stat[i]);
92763+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
92764 global_diff[i] += v;
92765 }
92766 }
92767@@ -537,8 +537,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
92768 if (pset->vm_stat_diff[i]) {
92769 int v = pset->vm_stat_diff[i];
92770 pset->vm_stat_diff[i] = 0;
92771- atomic_long_add(v, &zone->vm_stat[i]);
92772- atomic_long_add(v, &vm_stat[i]);
92773+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
92774+ atomic_long_add_unchecked(v, &vm_stat[i]);
92775 }
92776 }
92777 #endif
92778@@ -1281,10 +1281,20 @@ static int __init setup_vmstat(void)
92779 start_cpu_timer(cpu);
92780 #endif
92781 #ifdef CONFIG_PROC_FS
92782- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
92783- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
92784- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
92785- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
92786+ {
92787+ mode_t gr_mode = S_IRUGO;
92788+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92789+ gr_mode = S_IRUSR;
92790+#endif
92791+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
92792+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
92793+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
92794+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
92795+#else
92796+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
92797+#endif
92798+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
92799+ }
92800 #endif
92801 return 0;
92802 }
92803diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
92804index 61fc573..b5e47d0 100644
92805--- a/net/8021q/vlan.c
92806+++ b/net/8021q/vlan.c
92807@@ -472,7 +472,7 @@ out:
92808 return NOTIFY_DONE;
92809 }
92810
92811-static struct notifier_block vlan_notifier_block __read_mostly = {
92812+static struct notifier_block vlan_notifier_block = {
92813 .notifier_call = vlan_device_event,
92814 };
92815
92816@@ -547,8 +547,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
92817 err = -EPERM;
92818 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
92819 break;
92820- if ((args.u.name_type >= 0) &&
92821- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
92822+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
92823 struct vlan_net *vn;
92824
92825 vn = net_generic(net, vlan_net_id);
92826diff --git a/net/9p/mod.c b/net/9p/mod.c
92827index 6ab36ae..6f1841b 100644
92828--- a/net/9p/mod.c
92829+++ b/net/9p/mod.c
92830@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
92831 void v9fs_register_trans(struct p9_trans_module *m)
92832 {
92833 spin_lock(&v9fs_trans_lock);
92834- list_add_tail(&m->list, &v9fs_trans_list);
92835+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
92836 spin_unlock(&v9fs_trans_lock);
92837 }
92838 EXPORT_SYMBOL(v9fs_register_trans);
92839@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
92840 void v9fs_unregister_trans(struct p9_trans_module *m)
92841 {
92842 spin_lock(&v9fs_trans_lock);
92843- list_del_init(&m->list);
92844+ pax_list_del_init((struct list_head *)&m->list);
92845 spin_unlock(&v9fs_trans_lock);
92846 }
92847 EXPORT_SYMBOL(v9fs_unregister_trans);
92848diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
92849index 3ffda1b..fceac96 100644
92850--- a/net/9p/trans_fd.c
92851+++ b/net/9p/trans_fd.c
92852@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
92853 oldfs = get_fs();
92854 set_fs(get_ds());
92855 /* The cast to a user pointer is valid due to the set_fs() */
92856- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
92857+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
92858 set_fs(oldfs);
92859
92860 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
92861diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
92862index 876fbe8..8bbea9f 100644
92863--- a/net/atm/atm_misc.c
92864+++ b/net/atm/atm_misc.c
92865@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
92866 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
92867 return 1;
92868 atm_return(vcc, truesize);
92869- atomic_inc(&vcc->stats->rx_drop);
92870+ atomic_inc_unchecked(&vcc->stats->rx_drop);
92871 return 0;
92872 }
92873 EXPORT_SYMBOL(atm_charge);
92874@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
92875 }
92876 }
92877 atm_return(vcc, guess);
92878- atomic_inc(&vcc->stats->rx_drop);
92879+ atomic_inc_unchecked(&vcc->stats->rx_drop);
92880 return NULL;
92881 }
92882 EXPORT_SYMBOL(atm_alloc_charge);
92883@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
92884
92885 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
92886 {
92887-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92888+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
92889 __SONET_ITEMS
92890 #undef __HANDLE_ITEM
92891 }
92892@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
92893
92894 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
92895 {
92896-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
92897+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
92898 __SONET_ITEMS
92899 #undef __HANDLE_ITEM
92900 }
92901diff --git a/net/atm/lec.h b/net/atm/lec.h
92902index 4149db1..f2ab682 100644
92903--- a/net/atm/lec.h
92904+++ b/net/atm/lec.h
92905@@ -48,7 +48,7 @@ struct lane2_ops {
92906 const u8 *tlvs, u32 sizeoftlvs);
92907 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
92908 const u8 *tlvs, u32 sizeoftlvs);
92909-};
92910+} __no_const;
92911
92912 /*
92913 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
92914diff --git a/net/atm/proc.c b/net/atm/proc.c
92915index bbb6461..cf04016 100644
92916--- a/net/atm/proc.c
92917+++ b/net/atm/proc.c
92918@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
92919 const struct k_atm_aal_stats *stats)
92920 {
92921 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
92922- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
92923- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
92924- atomic_read(&stats->rx_drop));
92925+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
92926+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
92927+ atomic_read_unchecked(&stats->rx_drop));
92928 }
92929
92930 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
92931diff --git a/net/atm/resources.c b/net/atm/resources.c
92932index 0447d5d..3cf4728 100644
92933--- a/net/atm/resources.c
92934+++ b/net/atm/resources.c
92935@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
92936 static void copy_aal_stats(struct k_atm_aal_stats *from,
92937 struct atm_aal_stats *to)
92938 {
92939-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92940+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
92941 __AAL_STAT_ITEMS
92942 #undef __HANDLE_ITEM
92943 }
92944@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
92945 static void subtract_aal_stats(struct k_atm_aal_stats *from,
92946 struct atm_aal_stats *to)
92947 {
92948-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
92949+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
92950 __AAL_STAT_ITEMS
92951 #undef __HANDLE_ITEM
92952 }
92953diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
92954index 919a5ce..cc6b444 100644
92955--- a/net/ax25/sysctl_net_ax25.c
92956+++ b/net/ax25/sysctl_net_ax25.c
92957@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
92958 {
92959 char path[sizeof("net/ax25/") + IFNAMSIZ];
92960 int k;
92961- struct ctl_table *table;
92962+ ctl_table_no_const *table;
92963
92964 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
92965 if (!table)
92966diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
92967index 0a8a80c..f7e89aa 100644
92968--- a/net/batman-adv/bat_iv_ogm.c
92969+++ b/net/batman-adv/bat_iv_ogm.c
92970@@ -121,7 +121,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
92971
92972 /* randomize initial seqno to avoid collision */
92973 get_random_bytes(&random_seqno, sizeof(random_seqno));
92974- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
92975+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
92976
92977 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
92978 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
92979@@ -703,9 +703,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
92980 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
92981
92982 /* change sequence number to network order */
92983- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
92984+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
92985 batadv_ogm_packet->seqno = htonl(seqno);
92986- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
92987+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
92988
92989 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
92990 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
92991@@ -1111,7 +1111,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
92992 return;
92993
92994 /* could be changed by schedule_own_packet() */
92995- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
92996+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
92997
92998 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
92999 has_directlink_flag = 1;
93000diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
93001index c478e6b..469fd2f 100644
93002--- a/net/batman-adv/hard-interface.c
93003+++ b/net/batman-adv/hard-interface.c
93004@@ -453,7 +453,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
93005 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
93006 dev_add_pack(&hard_iface->batman_adv_ptype);
93007
93008- atomic_set(&hard_iface->frag_seqno, 1);
93009+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
93010 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
93011 hard_iface->net_dev->name);
93012
93013diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
93014index 813db4e..847edac 100644
93015--- a/net/batman-adv/soft-interface.c
93016+++ b/net/batman-adv/soft-interface.c
93017@@ -263,7 +263,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
93018 primary_if->net_dev->dev_addr, ETH_ALEN);
93019
93020 /* set broadcast sequence number */
93021- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
93022+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
93023 bcast_packet->seqno = htonl(seqno);
93024
93025 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
93026@@ -483,7 +483,7 @@ static int batadv_softif_init_late(struct net_device *dev)
93027 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
93028
93029 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
93030- atomic_set(&bat_priv->bcast_seqno, 1);
93031+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
93032 atomic_set(&bat_priv->tt.vn, 0);
93033 atomic_set(&bat_priv->tt.local_changes, 0);
93034 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
93035diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
93036index b2c94e1..3d47e07 100644
93037--- a/net/batman-adv/types.h
93038+++ b/net/batman-adv/types.h
93039@@ -51,7 +51,7 @@
93040 struct batadv_hard_iface_bat_iv {
93041 unsigned char *ogm_buff;
93042 int ogm_buff_len;
93043- atomic_t ogm_seqno;
93044+ atomic_unchecked_t ogm_seqno;
93045 };
93046
93047 /**
93048@@ -76,7 +76,7 @@ struct batadv_hard_iface {
93049 int16_t if_num;
93050 char if_status;
93051 struct net_device *net_dev;
93052- atomic_t frag_seqno;
93053+ atomic_unchecked_t frag_seqno;
93054 uint8_t num_bcasts;
93055 struct kobject *hardif_obj;
93056 atomic_t refcount;
93057@@ -560,7 +560,7 @@ struct batadv_priv {
93058 #ifdef CONFIG_BATMAN_ADV_DEBUG
93059 atomic_t log_level;
93060 #endif
93061- atomic_t bcast_seqno;
93062+ atomic_unchecked_t bcast_seqno;
93063 atomic_t bcast_queue_left;
93064 atomic_t batman_queue_left;
93065 char num_ifaces;
93066diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
93067index 48b31d3..62a0bcb 100644
93068--- a/net/batman-adv/unicast.c
93069+++ b/net/batman-adv/unicast.c
93070@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
93071 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
93072 frag2->flags = large_tail;
93073
93074- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
93075+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
93076 frag1->seqno = htons(seqno - 1);
93077 frag2->seqno = htons(seqno);
93078
93079diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
93080index fa4bf66..e92948f 100644
93081--- a/net/bluetooth/hci_sock.c
93082+++ b/net/bluetooth/hci_sock.c
93083@@ -932,7 +932,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
93084 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
93085 }
93086
93087- len = min_t(unsigned int, len, sizeof(uf));
93088+ len = min((size_t)len, sizeof(uf));
93089 if (copy_from_user(&uf, optval, len)) {
93090 err = -EFAULT;
93091 break;
93092diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
93093index 63fa111..b166ec6 100644
93094--- a/net/bluetooth/l2cap_core.c
93095+++ b/net/bluetooth/l2cap_core.c
93096@@ -3511,8 +3511,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
93097 break;
93098
93099 case L2CAP_CONF_RFC:
93100- if (olen == sizeof(rfc))
93101- memcpy(&rfc, (void *)val, olen);
93102+ if (olen != sizeof(rfc))
93103+ break;
93104+
93105+ memcpy(&rfc, (void *)val, olen);
93106
93107 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
93108 rfc.mode != chan->mode)
93109diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
93110index 0098af8..fb5a31f 100644
93111--- a/net/bluetooth/l2cap_sock.c
93112+++ b/net/bluetooth/l2cap_sock.c
93113@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
93114 struct sock *sk = sock->sk;
93115 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
93116 struct l2cap_options opts;
93117- int len, err = 0;
93118+ int err = 0;
93119+ size_t len = optlen;
93120 u32 opt;
93121
93122 BT_DBG("sk %p", sk);
93123@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
93124 opts.max_tx = chan->max_tx;
93125 opts.txwin_size = chan->tx_win;
93126
93127- len = min_t(unsigned int, sizeof(opts), optlen);
93128+ len = min(sizeof(opts), len);
93129 if (copy_from_user((char *) &opts, optval, len)) {
93130 err = -EFAULT;
93131 break;
93132@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
93133 struct bt_security sec;
93134 struct bt_power pwr;
93135 struct l2cap_conn *conn;
93136- int len, err = 0;
93137+ int err = 0;
93138+ size_t len = optlen;
93139 u32 opt;
93140
93141 BT_DBG("sk %p", sk);
93142@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
93143
93144 sec.level = BT_SECURITY_LOW;
93145
93146- len = min_t(unsigned int, sizeof(sec), optlen);
93147+ len = min(sizeof(sec), len);
93148 if (copy_from_user((char *) &sec, optval, len)) {
93149 err = -EFAULT;
93150 break;
93151@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
93152
93153 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
93154
93155- len = min_t(unsigned int, sizeof(pwr), optlen);
93156+ len = min(sizeof(pwr), len);
93157 if (copy_from_user((char *) &pwr, optval, len)) {
93158 err = -EFAULT;
93159 break;
93160diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
93161index c1c6028..17e8dcc 100644
93162--- a/net/bluetooth/rfcomm/sock.c
93163+++ b/net/bluetooth/rfcomm/sock.c
93164@@ -665,7 +665,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
93165 struct sock *sk = sock->sk;
93166 struct bt_security sec;
93167 int err = 0;
93168- size_t len;
93169+ size_t len = optlen;
93170 u32 opt;
93171
93172 BT_DBG("sk %p", sk);
93173@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
93174
93175 sec.level = BT_SECURITY_LOW;
93176
93177- len = min_t(unsigned int, sizeof(sec), optlen);
93178+ len = min(sizeof(sec), len);
93179 if (copy_from_user((char *) &sec, optval, len)) {
93180 err = -EFAULT;
93181 break;
93182diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
93183index 84fcf9f..e389b27 100644
93184--- a/net/bluetooth/rfcomm/tty.c
93185+++ b/net/bluetooth/rfcomm/tty.c
93186@@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
93187 BT_DBG("tty %p id %d", tty, tty->index);
93188
93189 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
93190- dev->channel, dev->port.count);
93191+ dev->channel, atomic_read(&dev->port.count));
93192
93193 err = tty_port_open(&dev->port, tty, filp);
93194 if (err)
93195@@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
93196 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
93197
93198 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
93199- dev->port.count);
93200+ atomic_read(&dev->port.count));
93201
93202 tty_port_close(&dev->port, tty, filp);
93203 }
93204diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
93205index 5180938..7c470c3 100644
93206--- a/net/bridge/netfilter/ebt_ulog.c
93207+++ b/net/bridge/netfilter/ebt_ulog.c
93208@@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
93209 ub->qlen++;
93210
93211 pm = nlmsg_data(nlh);
93212+ memset(pm, 0, sizeof(*pm));
93213
93214 /* Fill in the ulog data */
93215 pm->version = EBT_ULOG_VERSION;
93216@@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
93217 pm->hook = hooknr;
93218 if (uloginfo->prefix != NULL)
93219 strcpy(pm->prefix, uloginfo->prefix);
93220- else
93221- *(pm->prefix) = '\0';
93222
93223 if (in) {
93224 strcpy(pm->physindev, in->name);
93225@@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
93226 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
93227 else
93228 strcpy(pm->indev, in->name);
93229- } else
93230- pm->indev[0] = pm->physindev[0] = '\0';
93231+ }
93232
93233 if (out) {
93234 /* If out exists, then out is a bridge port */
93235 strcpy(pm->physoutdev, out->name);
93236 /* rcu_read_lock()ed by nf_hook_slow */
93237 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
93238- } else
93239- pm->outdev[0] = pm->physoutdev[0] = '\0';
93240+ }
93241
93242 if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
93243 BUG();
93244diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
93245index ac78024..161a80c 100644
93246--- a/net/bridge/netfilter/ebtables.c
93247+++ b/net/bridge/netfilter/ebtables.c
93248@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93249 tmp.valid_hooks = t->table->valid_hooks;
93250 }
93251 mutex_unlock(&ebt_mutex);
93252- if (copy_to_user(user, &tmp, *len) != 0){
93253+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
93254 BUGPRINT("c2u Didn't work\n");
93255 ret = -EFAULT;
93256 break;
93257@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
93258 goto out;
93259 tmp.valid_hooks = t->valid_hooks;
93260
93261- if (copy_to_user(user, &tmp, *len) != 0) {
93262+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
93263 ret = -EFAULT;
93264 break;
93265 }
93266@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
93267 tmp.entries_size = t->table->entries_size;
93268 tmp.valid_hooks = t->table->valid_hooks;
93269
93270- if (copy_to_user(user, &tmp, *len) != 0) {
93271+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
93272 ret = -EFAULT;
93273 break;
93274 }
93275diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
93276index 0f45522..dab651f 100644
93277--- a/net/caif/cfctrl.c
93278+++ b/net/caif/cfctrl.c
93279@@ -10,6 +10,7 @@
93280 #include <linux/spinlock.h>
93281 #include <linux/slab.h>
93282 #include <linux/pkt_sched.h>
93283+#include <linux/sched.h>
93284 #include <net/caif/caif_layer.h>
93285 #include <net/caif/cfpkt.h>
93286 #include <net/caif/cfctrl.h>
93287@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
93288 memset(&dev_info, 0, sizeof(dev_info));
93289 dev_info.id = 0xff;
93290 cfsrvl_init(&this->serv, 0, &dev_info, false);
93291- atomic_set(&this->req_seq_no, 1);
93292- atomic_set(&this->rsp_seq_no, 1);
93293+ atomic_set_unchecked(&this->req_seq_no, 1);
93294+ atomic_set_unchecked(&this->rsp_seq_no, 1);
93295 this->serv.layer.receive = cfctrl_recv;
93296 sprintf(this->serv.layer.name, "ctrl");
93297 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
93298@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
93299 struct cfctrl_request_info *req)
93300 {
93301 spin_lock_bh(&ctrl->info_list_lock);
93302- atomic_inc(&ctrl->req_seq_no);
93303- req->sequence_no = atomic_read(&ctrl->req_seq_no);
93304+ atomic_inc_unchecked(&ctrl->req_seq_no);
93305+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
93306 list_add_tail(&req->list, &ctrl->list);
93307 spin_unlock_bh(&ctrl->info_list_lock);
93308 }
93309@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
93310 if (p != first)
93311 pr_warn("Requests are not received in order\n");
93312
93313- atomic_set(&ctrl->rsp_seq_no,
93314+ atomic_set_unchecked(&ctrl->rsp_seq_no,
93315 p->sequence_no);
93316 list_del(&p->list);
93317 goto out;
93318diff --git a/net/can/af_can.c b/net/can/af_can.c
93319index 3ab8dd2..b9aef13 100644
93320--- a/net/can/af_can.c
93321+++ b/net/can/af_can.c
93322@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
93323 };
93324
93325 /* notifier block for netdevice event */
93326-static struct notifier_block can_netdev_notifier __read_mostly = {
93327+static struct notifier_block can_netdev_notifier = {
93328 .notifier_call = can_notifier,
93329 };
93330
93331diff --git a/net/can/gw.c b/net/can/gw.c
93332index 3f9b0f3..fc6d4fa 100644
93333--- a/net/can/gw.c
93334+++ b/net/can/gw.c
93335@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
93336 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
93337
93338 static HLIST_HEAD(cgw_list);
93339-static struct notifier_block notifier;
93340
93341 static struct kmem_cache *cgw_cache __read_mostly;
93342
93343@@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
93344 return err;
93345 }
93346
93347+static struct notifier_block notifier = {
93348+ .notifier_call = cgw_notifier
93349+};
93350+
93351 static __init int cgw_module_init(void)
93352 {
93353 /* sanitize given module parameter */
93354@@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
93355 return -ENOMEM;
93356
93357 /* set notifier */
93358- notifier.notifier_call = cgw_notifier;
93359 register_netdevice_notifier(&notifier);
93360
93361 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
93362diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
93363index 4a5df7b..9ad1f1d 100644
93364--- a/net/ceph/messenger.c
93365+++ b/net/ceph/messenger.c
93366@@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
93367 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
93368
93369 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
93370-static atomic_t addr_str_seq = ATOMIC_INIT(0);
93371+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
93372
93373 static struct page *zero_page; /* used in certain error cases */
93374
93375@@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
93376 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
93377 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
93378
93379- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
93380+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
93381 s = addr_str[i];
93382
93383 switch (ss->ss_family) {
93384diff --git a/net/compat.c b/net/compat.c
93385index dd32e34..6066f87 100644
93386--- a/net/compat.c
93387+++ b/net/compat.c
93388@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
93389 return -EFAULT;
93390 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
93391 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
93392- kmsg->msg_name = compat_ptr(tmp1);
93393- kmsg->msg_iov = compat_ptr(tmp2);
93394- kmsg->msg_control = compat_ptr(tmp3);
93395+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
93396+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
93397+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
93398 return 0;
93399 }
93400
93401@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93402
93403 if (kern_msg->msg_namelen) {
93404 if (mode == VERIFY_READ) {
93405- int err = move_addr_to_kernel(kern_msg->msg_name,
93406+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
93407 kern_msg->msg_namelen,
93408 kern_address);
93409 if (err < 0)
93410@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93411 kern_msg->msg_name = NULL;
93412
93413 tot_len = iov_from_user_compat_to_kern(kern_iov,
93414- (struct compat_iovec __user *)kern_msg->msg_iov,
93415+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
93416 kern_msg->msg_iovlen);
93417 if (tot_len >= 0)
93418 kern_msg->msg_iov = kern_iov;
93419@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93420
93421 #define CMSG_COMPAT_FIRSTHDR(msg) \
93422 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
93423- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
93424+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
93425 (struct compat_cmsghdr __user *)NULL)
93426
93427 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
93428 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
93429 (ucmlen) <= (unsigned long) \
93430 ((mhdr)->msg_controllen - \
93431- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
93432+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
93433
93434 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
93435 struct compat_cmsghdr __user *cmsg, int cmsg_len)
93436 {
93437 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
93438- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
93439+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
93440 msg->msg_controllen)
93441 return NULL;
93442 return (struct compat_cmsghdr __user *)ptr;
93443@@ -222,7 +222,7 @@ Efault:
93444
93445 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
93446 {
93447- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
93448+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
93449 struct compat_cmsghdr cmhdr;
93450 struct compat_timeval ctv;
93451 struct compat_timespec cts[3];
93452@@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
93453
93454 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
93455 {
93456- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
93457+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
93458 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
93459 int fdnum = scm->fp->count;
93460 struct file **fp = scm->fp->fp;
93461@@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
93462 return -EFAULT;
93463 old_fs = get_fs();
93464 set_fs(KERNEL_DS);
93465- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
93466+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
93467 set_fs(old_fs);
93468
93469 return err;
93470@@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
93471 len = sizeof(ktime);
93472 old_fs = get_fs();
93473 set_fs(KERNEL_DS);
93474- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
93475+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
93476 set_fs(old_fs);
93477
93478 if (!err) {
93479@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
93480 case MCAST_JOIN_GROUP:
93481 case MCAST_LEAVE_GROUP:
93482 {
93483- struct compat_group_req __user *gr32 = (void *)optval;
93484+ struct compat_group_req __user *gr32 = (void __user *)optval;
93485 struct group_req __user *kgr =
93486 compat_alloc_user_space(sizeof(struct group_req));
93487 u32 interface;
93488@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
93489 case MCAST_BLOCK_SOURCE:
93490 case MCAST_UNBLOCK_SOURCE:
93491 {
93492- struct compat_group_source_req __user *gsr32 = (void *)optval;
93493+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
93494 struct group_source_req __user *kgsr = compat_alloc_user_space(
93495 sizeof(struct group_source_req));
93496 u32 interface;
93497@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
93498 }
93499 case MCAST_MSFILTER:
93500 {
93501- struct compat_group_filter __user *gf32 = (void *)optval;
93502+ struct compat_group_filter __user *gf32 = (void __user *)optval;
93503 struct group_filter __user *kgf;
93504 u32 interface, fmode, numsrc;
93505
93506@@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
93507 char __user *optval, int __user *optlen,
93508 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
93509 {
93510- struct compat_group_filter __user *gf32 = (void *)optval;
93511+ struct compat_group_filter __user *gf32 = (void __user *)optval;
93512 struct group_filter __user *kgf;
93513 int __user *koptlen;
93514 u32 interface, fmode, numsrc;
93515@@ -808,7 +808,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
93516
93517 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
93518 return -EINVAL;
93519- if (copy_from_user(a, args, nas[call]))
93520+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
93521 return -EFAULT;
93522 a0 = a[0];
93523 a1 = a[1];
93524diff --git a/net/core/datagram.c b/net/core/datagram.c
93525index af814e7..3d761de 100644
93526--- a/net/core/datagram.c
93527+++ b/net/core/datagram.c
93528@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
93529 }
93530
93531 kfree_skb(skb);
93532- atomic_inc(&sk->sk_drops);
93533+ atomic_inc_unchecked(&sk->sk_drops);
93534 sk_mem_reclaim_partial(sk);
93535
93536 return err;
93537diff --git a/net/core/dev.c b/net/core/dev.c
93538index 3d13874..6e78dc7 100644
93539--- a/net/core/dev.c
93540+++ b/net/core/dev.c
93541@@ -1680,14 +1680,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
93542 {
93543 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
93544 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
93545- atomic_long_inc(&dev->rx_dropped);
93546+ atomic_long_inc_unchecked(&dev->rx_dropped);
93547 kfree_skb(skb);
93548 return NET_RX_DROP;
93549 }
93550 }
93551
93552 if (unlikely(!is_skb_forwardable(dev, skb))) {
93553- atomic_long_inc(&dev->rx_dropped);
93554+ atomic_long_inc_unchecked(&dev->rx_dropped);
93555 kfree_skb(skb);
93556 return NET_RX_DROP;
93557 }
93558@@ -2428,7 +2428,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
93559
93560 struct dev_gso_cb {
93561 void (*destructor)(struct sk_buff *skb);
93562-};
93563+} __no_const;
93564
93565 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
93566
93567@@ -3203,7 +3203,7 @@ enqueue:
93568
93569 local_irq_restore(flags);
93570
93571- atomic_long_inc(&skb->dev->rx_dropped);
93572+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
93573 kfree_skb(skb);
93574 return NET_RX_DROP;
93575 }
93576@@ -3275,7 +3275,7 @@ int netif_rx_ni(struct sk_buff *skb)
93577 }
93578 EXPORT_SYMBOL(netif_rx_ni);
93579
93580-static void net_tx_action(struct softirq_action *h)
93581+static __latent_entropy void net_tx_action(void)
93582 {
93583 struct softnet_data *sd = &__get_cpu_var(softnet_data);
93584
93585@@ -3609,7 +3609,7 @@ ncls:
93586 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
93587 } else {
93588 drop:
93589- atomic_long_inc(&skb->dev->rx_dropped);
93590+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
93591 kfree_skb(skb);
93592 /* Jamal, now you will not able to escape explaining
93593 * me how you were going to use this. :-)
93594@@ -4269,7 +4269,7 @@ void netif_napi_del(struct napi_struct *napi)
93595 }
93596 EXPORT_SYMBOL(netif_napi_del);
93597
93598-static void net_rx_action(struct softirq_action *h)
93599+static __latent_entropy void net_rx_action(void)
93600 {
93601 struct softnet_data *sd = &__get_cpu_var(softnet_data);
93602 unsigned long time_limit = jiffies + 2;
93603@@ -5973,7 +5973,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
93604 } else {
93605 netdev_stats_to_stats64(storage, &dev->stats);
93606 }
93607- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
93608+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
93609 return storage;
93610 }
93611 EXPORT_SYMBOL(dev_get_stats);
93612diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
93613index 5b7d0e1..cb960fc 100644
93614--- a/net/core/dev_ioctl.c
93615+++ b/net/core/dev_ioctl.c
93616@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
93617 if (no_module && capable(CAP_NET_ADMIN))
93618 no_module = request_module("netdev-%s", name);
93619 if (no_module && capable(CAP_SYS_MODULE)) {
93620+#ifdef CONFIG_GRKERNSEC_MODHARDEN
93621+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
93622+#else
93623 if (!request_module("%s", name))
93624 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
93625 name);
93626+#endif
93627 }
93628 }
93629 EXPORT_SYMBOL(dev_load);
93630diff --git a/net/core/flow.c b/net/core/flow.c
93631index dfa602c..3103d88 100644
93632--- a/net/core/flow.c
93633+++ b/net/core/flow.c
93634@@ -61,7 +61,7 @@ struct flow_cache {
93635 struct timer_list rnd_timer;
93636 };
93637
93638-atomic_t flow_cache_genid = ATOMIC_INIT(0);
93639+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
93640 EXPORT_SYMBOL(flow_cache_genid);
93641 static struct flow_cache flow_cache_global;
93642 static struct kmem_cache *flow_cachep __read_mostly;
93643@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
93644
93645 static int flow_entry_valid(struct flow_cache_entry *fle)
93646 {
93647- if (atomic_read(&flow_cache_genid) != fle->genid)
93648+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
93649 return 0;
93650 if (fle->object && !fle->object->ops->check(fle->object))
93651 return 0;
93652@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
93653 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
93654 fcp->hash_count++;
93655 }
93656- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
93657+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
93658 flo = fle->object;
93659 if (!flo)
93660 goto ret_object;
93661@@ -279,7 +279,7 @@ nocache:
93662 }
93663 flo = resolver(net, key, family, dir, flo, ctx);
93664 if (fle) {
93665- fle->genid = atomic_read(&flow_cache_genid);
93666+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
93667 if (!IS_ERR(flo))
93668 fle->object = flo;
93669 else
93670diff --git a/net/core/iovec.c b/net/core/iovec.c
93671index 7d84ea1..55385ae 100644
93672--- a/net/core/iovec.c
93673+++ b/net/core/iovec.c
93674@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
93675 if (m->msg_namelen) {
93676 if (mode == VERIFY_READ) {
93677 void __user *namep;
93678- namep = (void __user __force *) m->msg_name;
93679+ namep = (void __force_user *) m->msg_name;
93680 err = move_addr_to_kernel(namep, m->msg_namelen,
93681 address);
93682 if (err < 0)
93683@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
93684 }
93685
93686 size = m->msg_iovlen * sizeof(struct iovec);
93687- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
93688+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
93689 return -EFAULT;
93690
93691 m->msg_iov = iov;
93692diff --git a/net/core/neighbour.c b/net/core/neighbour.c
93693index 6072610..7374c18 100644
93694--- a/net/core/neighbour.c
93695+++ b/net/core/neighbour.c
93696@@ -2774,7 +2774,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
93697 void __user *buffer, size_t *lenp, loff_t *ppos)
93698 {
93699 int size, ret;
93700- struct ctl_table tmp = *ctl;
93701+ ctl_table_no_const tmp = *ctl;
93702
93703 tmp.extra1 = &zero;
93704 tmp.extra2 = &unres_qlen_max;
93705diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
93706index 2bf8329..7960607 100644
93707--- a/net/core/net-procfs.c
93708+++ b/net/core/net-procfs.c
93709@@ -283,8 +283,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
93710 else
93711 seq_printf(seq, "%04x", ntohs(pt->type));
93712
93713+#ifdef CONFIG_GRKERNSEC_HIDESYM
93714+ seq_printf(seq, " %-8s %pf\n",
93715+ pt->dev ? pt->dev->name : "", NULL);
93716+#else
93717 seq_printf(seq, " %-8s %pf\n",
93718 pt->dev ? pt->dev->name : "", pt->func);
93719+#endif
93720 }
93721
93722 return 0;
93723diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
93724index d954b56..b0a0f7a 100644
93725--- a/net/core/net-sysfs.c
93726+++ b/net/core/net-sysfs.c
93727@@ -1356,7 +1356,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
93728 }
93729 EXPORT_SYMBOL(netdev_class_remove_file);
93730
93731-int netdev_kobject_init(void)
93732+int __init netdev_kobject_init(void)
93733 {
93734 kobj_ns_type_register(&net_ns_type_operations);
93735 return class_register(&net_class);
93736diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
93737index 81d3a9a..a0bd7a8 100644
93738--- a/net/core/net_namespace.c
93739+++ b/net/core/net_namespace.c
93740@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
93741 int error;
93742 LIST_HEAD(net_exit_list);
93743
93744- list_add_tail(&ops->list, list);
93745+ pax_list_add_tail((struct list_head *)&ops->list, list);
93746 if (ops->init || (ops->id && ops->size)) {
93747 for_each_net(net) {
93748 error = ops_init(ops, net);
93749@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
93750
93751 out_undo:
93752 /* If I have an error cleanup all namespaces I initialized */
93753- list_del(&ops->list);
93754+ pax_list_del((struct list_head *)&ops->list);
93755 ops_exit_list(ops, &net_exit_list);
93756 ops_free_list(ops, &net_exit_list);
93757 return error;
93758@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
93759 struct net *net;
93760 LIST_HEAD(net_exit_list);
93761
93762- list_del(&ops->list);
93763+ pax_list_del((struct list_head *)&ops->list);
93764 for_each_net(net)
93765 list_add_tail(&net->exit_list, &net_exit_list);
93766 ops_exit_list(ops, &net_exit_list);
93767@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
93768 mutex_lock(&net_mutex);
93769 error = register_pernet_operations(&pernet_list, ops);
93770 if (!error && (first_device == &pernet_list))
93771- first_device = &ops->list;
93772+ first_device = (struct list_head *)&ops->list;
93773 mutex_unlock(&net_mutex);
93774 return error;
93775 }
93776diff --git a/net/core/netpoll.c b/net/core/netpoll.c
93777index fc75c9e..8c8e9be 100644
93778--- a/net/core/netpoll.c
93779+++ b/net/core/netpoll.c
93780@@ -428,7 +428,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
93781 struct udphdr *udph;
93782 struct iphdr *iph;
93783 struct ethhdr *eth;
93784- static atomic_t ip_ident;
93785+ static atomic_unchecked_t ip_ident;
93786 struct ipv6hdr *ip6h;
93787
93788 udp_len = len + sizeof(*udph);
93789@@ -499,7 +499,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
93790 put_unaligned(0x45, (unsigned char *)iph);
93791 iph->tos = 0;
93792 put_unaligned(htons(ip_len), &(iph->tot_len));
93793- iph->id = htons(atomic_inc_return(&ip_ident));
93794+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
93795 iph->frag_off = 0;
93796 iph->ttl = 64;
93797 iph->protocol = IPPROTO_UDP;
93798diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
93799index 2a0e21d..6ad7642 100644
93800--- a/net/core/rtnetlink.c
93801+++ b/net/core/rtnetlink.c
93802@@ -58,7 +58,7 @@ struct rtnl_link {
93803 rtnl_doit_func doit;
93804 rtnl_dumpit_func dumpit;
93805 rtnl_calcit_func calcit;
93806-};
93807+} __no_const;
93808
93809 static DEFINE_MUTEX(rtnl_mutex);
93810
93811@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
93812 if (rtnl_link_ops_get(ops->kind))
93813 return -EEXIST;
93814
93815- if (!ops->dellink)
93816- ops->dellink = unregister_netdevice_queue;
93817+ if (!ops->dellink) {
93818+ pax_open_kernel();
93819+ *(void **)&ops->dellink = unregister_netdevice_queue;
93820+ pax_close_kernel();
93821+ }
93822
93823- list_add_tail(&ops->list, &link_ops);
93824+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
93825 return 0;
93826 }
93827 EXPORT_SYMBOL_GPL(__rtnl_link_register);
93828@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
93829 for_each_net(net) {
93830 __rtnl_kill_links(net, ops);
93831 }
93832- list_del(&ops->list);
93833+ pax_list_del((struct list_head *)&ops->list);
93834 }
93835 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
93836
93837diff --git a/net/core/scm.c b/net/core/scm.c
93838index b442e7e..6f5b5a2 100644
93839--- a/net/core/scm.c
93840+++ b/net/core/scm.c
93841@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
93842 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
93843 {
93844 struct cmsghdr __user *cm
93845- = (__force struct cmsghdr __user *)msg->msg_control;
93846+ = (struct cmsghdr __force_user *)msg->msg_control;
93847 struct cmsghdr cmhdr;
93848 int cmlen = CMSG_LEN(len);
93849 int err;
93850@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
93851 err = -EFAULT;
93852 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
93853 goto out;
93854- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
93855+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
93856 goto out;
93857 cmlen = CMSG_SPACE(len);
93858 if (msg->msg_controllen < cmlen)
93859@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
93860 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
93861 {
93862 struct cmsghdr __user *cm
93863- = (__force struct cmsghdr __user*)msg->msg_control;
93864+ = (struct cmsghdr __force_user *)msg->msg_control;
93865
93866 int fdmax = 0;
93867 int fdnum = scm->fp->count;
93868@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
93869 if (fdnum < fdmax)
93870 fdmax = fdnum;
93871
93872- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
93873+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
93874 i++, cmfptr++)
93875 {
93876 struct socket *sock;
93877diff --git a/net/core/skbuff.c b/net/core/skbuff.c
93878index c28c7fe..a399a6d 100644
93879--- a/net/core/skbuff.c
93880+++ b/net/core/skbuff.c
93881@@ -3104,13 +3104,15 @@ void __init skb_init(void)
93882 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
93883 sizeof(struct sk_buff),
93884 0,
93885- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
93886+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
93887+ SLAB_NO_SANITIZE,
93888 NULL);
93889 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
93890 (2*sizeof(struct sk_buff)) +
93891 sizeof(atomic_t),
93892 0,
93893- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
93894+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
93895+ SLAB_NO_SANITIZE,
93896 NULL);
93897 }
93898
93899@@ -3541,6 +3543,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
93900 skb->tstamp.tv64 = 0;
93901 skb->pkt_type = PACKET_HOST;
93902 skb->skb_iif = 0;
93903+ skb->local_df = 0;
93904 skb_dst_drop(skb);
93905 skb->mark = 0;
93906 secpath_reset(skb);
93907diff --git a/net/core/sock.c b/net/core/sock.c
93908index 0b39e7a..5e9f91e 100644
93909--- a/net/core/sock.c
93910+++ b/net/core/sock.c
93911@@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
93912 struct sk_buff_head *list = &sk->sk_receive_queue;
93913
93914 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
93915- atomic_inc(&sk->sk_drops);
93916+ atomic_inc_unchecked(&sk->sk_drops);
93917 trace_sock_rcvqueue_full(sk, skb);
93918 return -ENOMEM;
93919 }
93920@@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
93921 return err;
93922
93923 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
93924- atomic_inc(&sk->sk_drops);
93925+ atomic_inc_unchecked(&sk->sk_drops);
93926 return -ENOBUFS;
93927 }
93928
93929@@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
93930 skb_dst_force(skb);
93931
93932 spin_lock_irqsave(&list->lock, flags);
93933- skb->dropcount = atomic_read(&sk->sk_drops);
93934+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
93935 __skb_queue_tail(list, skb);
93936 spin_unlock_irqrestore(&list->lock, flags);
93937
93938@@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
93939 skb->dev = NULL;
93940
93941 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
93942- atomic_inc(&sk->sk_drops);
93943+ atomic_inc_unchecked(&sk->sk_drops);
93944 goto discard_and_relse;
93945 }
93946 if (nested)
93947@@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
93948 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
93949 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
93950 bh_unlock_sock(sk);
93951- atomic_inc(&sk->sk_drops);
93952+ atomic_inc_unchecked(&sk->sk_drops);
93953 goto discard_and_relse;
93954 }
93955
93956@@ -949,12 +949,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
93957 struct timeval tm;
93958 } v;
93959
93960- int lv = sizeof(int);
93961- int len;
93962+ unsigned int lv = sizeof(int);
93963+ unsigned int len;
93964
93965 if (get_user(len, optlen))
93966 return -EFAULT;
93967- if (len < 0)
93968+ if (len > INT_MAX)
93969 return -EINVAL;
93970
93971 memset(&v, 0, sizeof(v));
93972@@ -1106,11 +1106,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
93973
93974 case SO_PEERNAME:
93975 {
93976- char address[128];
93977+ char address[_K_SS_MAXSIZE];
93978
93979 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
93980 return -ENOTCONN;
93981- if (lv < len)
93982+ if (lv < len || sizeof address < len)
93983 return -EINVAL;
93984 if (copy_to_user(optval, address, len))
93985 return -EFAULT;
93986@@ -1183,7 +1183,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
93987
93988 if (len > lv)
93989 len = lv;
93990- if (copy_to_user(optval, &v, len))
93991+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
93992 return -EFAULT;
93993 lenout:
93994 if (put_user(len, optlen))
93995@@ -2326,7 +2326,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
93996 */
93997 smp_wmb();
93998 atomic_set(&sk->sk_refcnt, 1);
93999- atomic_set(&sk->sk_drops, 0);
94000+ atomic_set_unchecked(&sk->sk_drops, 0);
94001 }
94002 EXPORT_SYMBOL(sock_init_data);
94003
94004@@ -2451,6 +2451,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
94005 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
94006 int level, int type)
94007 {
94008+ struct sock_extended_err ee;
94009 struct sock_exterr_skb *serr;
94010 struct sk_buff *skb, *skb2;
94011 int copied, err;
94012@@ -2472,7 +2473,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
94013 sock_recv_timestamp(msg, sk, skb);
94014
94015 serr = SKB_EXT_ERR(skb);
94016- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
94017+ ee = serr->ee;
94018+ put_cmsg(msg, level, type, sizeof ee, &ee);
94019
94020 msg->msg_flags |= MSG_ERRQUEUE;
94021 err = copied;
94022diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
94023index a0e9cf6..ef7f9ed 100644
94024--- a/net/core/sock_diag.c
94025+++ b/net/core/sock_diag.c
94026@@ -9,26 +9,33 @@
94027 #include <linux/inet_diag.h>
94028 #include <linux/sock_diag.h>
94029
94030-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
94031+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
94032 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
94033 static DEFINE_MUTEX(sock_diag_table_mutex);
94034
94035 int sock_diag_check_cookie(void *sk, __u32 *cookie)
94036 {
94037+#ifndef CONFIG_GRKERNSEC_HIDESYM
94038 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
94039 cookie[1] != INET_DIAG_NOCOOKIE) &&
94040 ((u32)(unsigned long)sk != cookie[0] ||
94041 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
94042 return -ESTALE;
94043 else
94044+#endif
94045 return 0;
94046 }
94047 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
94048
94049 void sock_diag_save_cookie(void *sk, __u32 *cookie)
94050 {
94051+#ifdef CONFIG_GRKERNSEC_HIDESYM
94052+ cookie[0] = 0;
94053+ cookie[1] = 0;
94054+#else
94055 cookie[0] = (u32)(unsigned long)sk;
94056 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
94057+#endif
94058 }
94059 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
94060
94061@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
94062 mutex_lock(&sock_diag_table_mutex);
94063 if (sock_diag_handlers[hndl->family])
94064 err = -EBUSY;
94065- else
94066+ else {
94067+ pax_open_kernel();
94068 sock_diag_handlers[hndl->family] = hndl;
94069+ pax_close_kernel();
94070+ }
94071 mutex_unlock(&sock_diag_table_mutex);
94072
94073 return err;
94074@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
94075
94076 mutex_lock(&sock_diag_table_mutex);
94077 BUG_ON(sock_diag_handlers[family] != hnld);
94078+ pax_open_kernel();
94079 sock_diag_handlers[family] = NULL;
94080+ pax_close_kernel();
94081 mutex_unlock(&sock_diag_table_mutex);
94082 }
94083 EXPORT_SYMBOL_GPL(sock_diag_unregister);
94084diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
94085index cca4441..5e616de 100644
94086--- a/net/core/sysctl_net_core.c
94087+++ b/net/core/sysctl_net_core.c
94088@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
94089 {
94090 unsigned int orig_size, size;
94091 int ret, i;
94092- struct ctl_table tmp = {
94093+ ctl_table_no_const tmp = {
94094 .data = &size,
94095 .maxlen = sizeof(size),
94096 .mode = table->mode
94097@@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
94098 void __user *buffer, size_t *lenp, loff_t *ppos)
94099 {
94100 char id[IFNAMSIZ];
94101- struct ctl_table tbl = {
94102+ ctl_table_no_const tbl = {
94103 .data = id,
94104 .maxlen = IFNAMSIZ,
94105 };
94106@@ -378,13 +378,12 @@ static struct ctl_table netns_core_table[] = {
94107
94108 static __net_init int sysctl_core_net_init(struct net *net)
94109 {
94110- struct ctl_table *tbl;
94111+ ctl_table_no_const *tbl = NULL;
94112
94113 net->core.sysctl_somaxconn = SOMAXCONN;
94114
94115- tbl = netns_core_table;
94116 if (!net_eq(net, &init_net)) {
94117- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
94118+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
94119 if (tbl == NULL)
94120 goto err_dup;
94121
94122@@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
94123 if (net->user_ns != &init_user_ns) {
94124 tbl[0].procname = NULL;
94125 }
94126- }
94127-
94128- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
94129+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
94130+ } else
94131+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
94132 if (net->core.sysctl_hdr == NULL)
94133 goto err_reg;
94134
94135 return 0;
94136
94137 err_reg:
94138- if (tbl != netns_core_table)
94139- kfree(tbl);
94140+ kfree(tbl);
94141 err_dup:
94142 return -ENOMEM;
94143 }
94144@@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
94145 kfree(tbl);
94146 }
94147
94148-static __net_initdata struct pernet_operations sysctl_core_ops = {
94149+static __net_initconst struct pernet_operations sysctl_core_ops = {
94150 .init = sysctl_core_net_init,
94151 .exit = sysctl_core_net_exit,
94152 };
94153diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
94154index dd4d506..fb2fb87 100644
94155--- a/net/decnet/af_decnet.c
94156+++ b/net/decnet/af_decnet.c
94157@@ -465,6 +465,7 @@ static struct proto dn_proto = {
94158 .sysctl_rmem = sysctl_decnet_rmem,
94159 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
94160 .obj_size = sizeof(struct dn_sock),
94161+ .slab_flags = SLAB_USERCOPY,
94162 };
94163
94164 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
94165diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
94166index 5325b54..a0d4d69 100644
94167--- a/net/decnet/sysctl_net_decnet.c
94168+++ b/net/decnet/sysctl_net_decnet.c
94169@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
94170
94171 if (len > *lenp) len = *lenp;
94172
94173- if (copy_to_user(buffer, addr, len))
94174+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
94175 return -EFAULT;
94176
94177 *lenp = len;
94178@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
94179
94180 if (len > *lenp) len = *lenp;
94181
94182- if (copy_to_user(buffer, devname, len))
94183+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
94184 return -EFAULT;
94185
94186 *lenp = len;
94187diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
94188index 008f337..b03b8c9 100644
94189--- a/net/ieee802154/6lowpan.c
94190+++ b/net/ieee802154/6lowpan.c
94191@@ -548,7 +548,7 @@ static int lowpan_header_create(struct sk_buff *skb,
94192 hc06_ptr += 3;
94193 } else {
94194 /* compress nothing */
94195- memcpy(hc06_ptr, &hdr, 4);
94196+ memcpy(hc06_ptr, hdr, 4);
94197 /* replace the top byte with new ECN | DSCP format */
94198 *hc06_ptr = tmp;
94199 hc06_ptr += 4;
94200diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
94201index cfeb85c..385989a 100644
94202--- a/net/ipv4/af_inet.c
94203+++ b/net/ipv4/af_inet.c
94204@@ -1675,13 +1675,9 @@ static int __init inet_init(void)
94205
94206 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
94207
94208- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
94209- if (!sysctl_local_reserved_ports)
94210- goto out;
94211-
94212 rc = proto_register(&tcp_prot, 1);
94213 if (rc)
94214- goto out_free_reserved_ports;
94215+ goto out;
94216
94217 rc = proto_register(&udp_prot, 1);
94218 if (rc)
94219@@ -1790,8 +1786,6 @@ out_unregister_udp_proto:
94220 proto_unregister(&udp_prot);
94221 out_unregister_tcp_proto:
94222 proto_unregister(&tcp_prot);
94223-out_free_reserved_ports:
94224- kfree(sysctl_local_reserved_ports);
94225 goto out;
94226 }
94227
94228diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
94229index a1b5bcb..62ec5c6 100644
94230--- a/net/ipv4/devinet.c
94231+++ b/net/ipv4/devinet.c
94232@@ -1533,7 +1533,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
94233 idx = 0;
94234 head = &net->dev_index_head[h];
94235 rcu_read_lock();
94236- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
94237+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
94238 net->dev_base_seq;
94239 hlist_for_each_entry_rcu(dev, head, index_hlist) {
94240 if (idx < s_idx)
94241@@ -1844,7 +1844,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
94242 idx = 0;
94243 head = &net->dev_index_head[h];
94244 rcu_read_lock();
94245- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
94246+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
94247 net->dev_base_seq;
94248 hlist_for_each_entry_rcu(dev, head, index_hlist) {
94249 if (idx < s_idx)
94250@@ -2069,7 +2069,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
94251 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
94252 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
94253
94254-static struct devinet_sysctl_table {
94255+static const struct devinet_sysctl_table {
94256 struct ctl_table_header *sysctl_header;
94257 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
94258 } devinet_sysctl = {
94259@@ -2191,7 +2191,7 @@ static __net_init int devinet_init_net(struct net *net)
94260 int err;
94261 struct ipv4_devconf *all, *dflt;
94262 #ifdef CONFIG_SYSCTL
94263- struct ctl_table *tbl = ctl_forward_entry;
94264+ ctl_table_no_const *tbl = NULL;
94265 struct ctl_table_header *forw_hdr;
94266 #endif
94267
94268@@ -2209,7 +2209,7 @@ static __net_init int devinet_init_net(struct net *net)
94269 goto err_alloc_dflt;
94270
94271 #ifdef CONFIG_SYSCTL
94272- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
94273+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
94274 if (tbl == NULL)
94275 goto err_alloc_ctl;
94276
94277@@ -2229,7 +2229,10 @@ static __net_init int devinet_init_net(struct net *net)
94278 goto err_reg_dflt;
94279
94280 err = -ENOMEM;
94281- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
94282+ if (!net_eq(net, &init_net))
94283+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
94284+ else
94285+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
94286 if (forw_hdr == NULL)
94287 goto err_reg_ctl;
94288 net->ipv4.forw_hdr = forw_hdr;
94289@@ -2245,8 +2248,7 @@ err_reg_ctl:
94290 err_reg_dflt:
94291 __devinet_sysctl_unregister(all);
94292 err_reg_all:
94293- if (tbl != ctl_forward_entry)
94294- kfree(tbl);
94295+ kfree(tbl);
94296 err_alloc_ctl:
94297 #endif
94298 if (dflt != &ipv4_devconf_dflt)
94299diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
94300index b3f627a..b0f3e99 100644
94301--- a/net/ipv4/fib_frontend.c
94302+++ b/net/ipv4/fib_frontend.c
94303@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
94304 #ifdef CONFIG_IP_ROUTE_MULTIPATH
94305 fib_sync_up(dev);
94306 #endif
94307- atomic_inc(&net->ipv4.dev_addr_genid);
94308+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
94309 rt_cache_flush(dev_net(dev));
94310 break;
94311 case NETDEV_DOWN:
94312 fib_del_ifaddr(ifa, NULL);
94313- atomic_inc(&net->ipv4.dev_addr_genid);
94314+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
94315 if (ifa->ifa_dev->ifa_list == NULL) {
94316 /* Last address was deleted from this interface.
94317 * Disable IP.
94318@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
94319 #ifdef CONFIG_IP_ROUTE_MULTIPATH
94320 fib_sync_up(dev);
94321 #endif
94322- atomic_inc(&net->ipv4.dev_addr_genid);
94323+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
94324 rt_cache_flush(net);
94325 break;
94326 case NETDEV_DOWN:
94327diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
94328index d5dbca5..6251d5f 100644
94329--- a/net/ipv4/fib_semantics.c
94330+++ b/net/ipv4/fib_semantics.c
94331@@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
94332 nh->nh_saddr = inet_select_addr(nh->nh_dev,
94333 nh->nh_gw,
94334 nh->nh_parent->fib_scope);
94335- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
94336+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
94337
94338 return nh->nh_saddr;
94339 }
94340diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
94341index 6acb541..9ea617d 100644
94342--- a/net/ipv4/inet_connection_sock.c
94343+++ b/net/ipv4/inet_connection_sock.c
94344@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
94345 .range = { 32768, 61000 },
94346 };
94347
94348-unsigned long *sysctl_local_reserved_ports;
94349+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
94350 EXPORT_SYMBOL(sysctl_local_reserved_ports);
94351
94352 void inet_get_local_port_range(int *low, int *high)
94353diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
94354index 5f64875..31cf54d 100644
94355--- a/net/ipv4/inet_diag.c
94356+++ b/net/ipv4/inet_diag.c
94357@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
94358
94359 r->id.idiag_sport = inet->inet_sport;
94360 r->id.idiag_dport = inet->inet_dport;
94361+
94362+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
94363+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
94364+
94365 r->id.idiag_src[0] = inet->inet_rcv_saddr;
94366 r->id.idiag_dst[0] = inet->inet_daddr;
94367
94368@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
94369
94370 r->idiag_family = tw->tw_family;
94371 r->idiag_retrans = 0;
94372+
94373 r->id.idiag_if = tw->tw_bound_dev_if;
94374 sock_diag_save_cookie(tw, r->id.idiag_cookie);
94375+
94376 r->id.idiag_sport = tw->tw_sport;
94377 r->id.idiag_dport = tw->tw_dport;
94378+
94379+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
94380+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
94381+
94382 r->id.idiag_src[0] = tw->tw_rcv_saddr;
94383 r->id.idiag_dst[0] = tw->tw_daddr;
94384+
94385 r->idiag_state = tw->tw_substate;
94386 r->idiag_timer = 3;
94387 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
94388@@ -732,8 +743,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
94389
94390 r->id.idiag_sport = inet->inet_sport;
94391 r->id.idiag_dport = ireq->rmt_port;
94392+
94393+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
94394+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
94395+
94396 r->id.idiag_src[0] = ireq->loc_addr;
94397 r->id.idiag_dst[0] = ireq->rmt_addr;
94398+
94399 r->idiag_expires = jiffies_to_msecs(tmo);
94400 r->idiag_rqueue = 0;
94401 r->idiag_wqueue = 0;
94402diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
94403index 96da9c7..b956690 100644
94404--- a/net/ipv4/inet_hashtables.c
94405+++ b/net/ipv4/inet_hashtables.c
94406@@ -18,12 +18,15 @@
94407 #include <linux/sched.h>
94408 #include <linux/slab.h>
94409 #include <linux/wait.h>
94410+#include <linux/security.h>
94411
94412 #include <net/inet_connection_sock.h>
94413 #include <net/inet_hashtables.h>
94414 #include <net/secure_seq.h>
94415 #include <net/ip.h>
94416
94417+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
94418+
94419 /*
94420 * Allocate and initialize a new local port bind bucket.
94421 * The bindhash mutex for snum's hash chain must be held here.
94422@@ -554,6 +557,8 @@ ok:
94423 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
94424 spin_unlock(&head->lock);
94425
94426+ gr_update_task_in_ip_table(current, inet_sk(sk));
94427+
94428 if (tw) {
94429 inet_twsk_deschedule(tw, death_row);
94430 while (twrefcnt) {
94431diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
94432index 33d5537..da337a4 100644
94433--- a/net/ipv4/inetpeer.c
94434+++ b/net/ipv4/inetpeer.c
94435@@ -503,8 +503,8 @@ relookup:
94436 if (p) {
94437 p->daddr = *daddr;
94438 atomic_set(&p->refcnt, 1);
94439- atomic_set(&p->rid, 0);
94440- atomic_set(&p->ip_id_count,
94441+ atomic_set_unchecked(&p->rid, 0);
94442+ atomic_set_unchecked(&p->ip_id_count,
94443 (daddr->family == AF_INET) ?
94444 secure_ip_id(daddr->addr.a4) :
94445 secure_ipv6_id(daddr->addr.a6));
94446diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
94447index b66910a..cfe416e 100644
94448--- a/net/ipv4/ip_fragment.c
94449+++ b/net/ipv4/ip_fragment.c
94450@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
94451 return 0;
94452
94453 start = qp->rid;
94454- end = atomic_inc_return(&peer->rid);
94455+ end = atomic_inc_return_unchecked(&peer->rid);
94456 qp->rid = end;
94457
94458 rc = qp->q.fragments && (end - start) > max;
94459@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
94460
94461 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
94462 {
94463- struct ctl_table *table;
94464+ ctl_table_no_const *table = NULL;
94465 struct ctl_table_header *hdr;
94466
94467- table = ip4_frags_ns_ctl_table;
94468 if (!net_eq(net, &init_net)) {
94469- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
94470+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
94471 if (table == NULL)
94472 goto err_alloc;
94473
94474@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
94475 /* Don't export sysctls to unprivileged users */
94476 if (net->user_ns != &init_user_ns)
94477 table[0].procname = NULL;
94478- }
94479+ hdr = register_net_sysctl(net, "net/ipv4", table);
94480+ } else
94481+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
94482
94483- hdr = register_net_sysctl(net, "net/ipv4", table);
94484 if (hdr == NULL)
94485 goto err_reg;
94486
94487@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
94488 return 0;
94489
94490 err_reg:
94491- if (!net_eq(net, &init_net))
94492- kfree(table);
94493+ kfree(table);
94494 err_alloc:
94495 return -ENOMEM;
94496 }
94497diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
94498index d7aea4c..a8ee872 100644
94499--- a/net/ipv4/ip_gre.c
94500+++ b/net/ipv4/ip_gre.c
94501@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
94502 module_param(log_ecn_error, bool, 0644);
94503 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
94504
94505-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
94506+static struct rtnl_link_ops ipgre_link_ops;
94507 static int ipgre_tunnel_init(struct net_device *dev);
94508
94509 static int ipgre_net_id __read_mostly;
94510@@ -731,7 +731,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
94511 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
94512 };
94513
94514-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
94515+static struct rtnl_link_ops ipgre_link_ops = {
94516 .kind = "gre",
94517 .maxtype = IFLA_GRE_MAX,
94518 .policy = ipgre_policy,
94519@@ -745,7 +745,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
94520 .fill_info = ipgre_fill_info,
94521 };
94522
94523-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
94524+static struct rtnl_link_ops ipgre_tap_ops = {
94525 .kind = "gretap",
94526 .maxtype = IFLA_GRE_MAX,
94527 .policy = ipgre_policy,
94528diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
94529index 23e6ab0..be67a57 100644
94530--- a/net/ipv4/ip_sockglue.c
94531+++ b/net/ipv4/ip_sockglue.c
94532@@ -1153,7 +1153,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
94533 len = min_t(unsigned int, len, opt->optlen);
94534 if (put_user(len, optlen))
94535 return -EFAULT;
94536- if (copy_to_user(optval, opt->__data, len))
94537+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
94538+ copy_to_user(optval, opt->__data, len))
94539 return -EFAULT;
94540 return 0;
94541 }
94542@@ -1284,7 +1285,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
94543 if (sk->sk_type != SOCK_STREAM)
94544 return -ENOPROTOOPT;
94545
94546- msg.msg_control = optval;
94547+ msg.msg_control = (void __force_kernel *)optval;
94548 msg.msg_controllen = len;
94549 msg.msg_flags = flags;
94550
94551diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
94552index 26847e1..75d2d2f 100644
94553--- a/net/ipv4/ip_vti.c
94554+++ b/net/ipv4/ip_vti.c
94555@@ -44,7 +44,7 @@
94556 #include <net/net_namespace.h>
94557 #include <net/netns/generic.h>
94558
94559-static struct rtnl_link_ops vti_link_ops __read_mostly;
94560+static struct rtnl_link_ops vti_link_ops;
94561
94562 static int vti_net_id __read_mostly;
94563 static int vti_tunnel_init(struct net_device *dev);
94564@@ -425,7 +425,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
94565 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
94566 };
94567
94568-static struct rtnl_link_ops vti_link_ops __read_mostly = {
94569+static struct rtnl_link_ops vti_link_ops = {
94570 .kind = "vti",
94571 .maxtype = IFLA_VTI_MAX,
94572 .policy = vti_policy,
94573diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
94574index efa1138..20dbba0 100644
94575--- a/net/ipv4/ipconfig.c
94576+++ b/net/ipv4/ipconfig.c
94577@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
94578
94579 mm_segment_t oldfs = get_fs();
94580 set_fs(get_ds());
94581- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
94582+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
94583 set_fs(oldfs);
94584 return res;
94585 }
94586@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
94587
94588 mm_segment_t oldfs = get_fs();
94589 set_fs(get_ds());
94590- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
94591+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
94592 set_fs(oldfs);
94593 return res;
94594 }
94595@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
94596
94597 mm_segment_t oldfs = get_fs();
94598 set_fs(get_ds());
94599- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
94600+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
94601 set_fs(oldfs);
94602 return res;
94603 }
94604diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
94605index 7f80fb4..b0328f6 100644
94606--- a/net/ipv4/ipip.c
94607+++ b/net/ipv4/ipip.c
94608@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
94609 static int ipip_net_id __read_mostly;
94610
94611 static int ipip_tunnel_init(struct net_device *dev);
94612-static struct rtnl_link_ops ipip_link_ops __read_mostly;
94613+static struct rtnl_link_ops ipip_link_ops;
94614
94615 static int ipip_err(struct sk_buff *skb, u32 info)
94616 {
94617@@ -408,7 +408,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
94618 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
94619 };
94620
94621-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
94622+static struct rtnl_link_ops ipip_link_ops = {
94623 .kind = "ipip",
94624 .maxtype = IFLA_IPTUN_MAX,
94625 .policy = ipip_policy,
94626diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
94627index 85a4f21..1beb1f5 100644
94628--- a/net/ipv4/netfilter/arp_tables.c
94629+++ b/net/ipv4/netfilter/arp_tables.c
94630@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
94631 #endif
94632
94633 static int get_info(struct net *net, void __user *user,
94634- const int *len, int compat)
94635+ int len, int compat)
94636 {
94637 char name[XT_TABLE_MAXNAMELEN];
94638 struct xt_table *t;
94639 int ret;
94640
94641- if (*len != sizeof(struct arpt_getinfo)) {
94642- duprintf("length %u != %Zu\n", *len,
94643+ if (len != sizeof(struct arpt_getinfo)) {
94644+ duprintf("length %u != %Zu\n", len,
94645 sizeof(struct arpt_getinfo));
94646 return -EINVAL;
94647 }
94648@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
94649 info.size = private->size;
94650 strcpy(info.name, name);
94651
94652- if (copy_to_user(user, &info, *len) != 0)
94653+ if (copy_to_user(user, &info, len) != 0)
94654 ret = -EFAULT;
94655 else
94656 ret = 0;
94657@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
94658
94659 switch (cmd) {
94660 case ARPT_SO_GET_INFO:
94661- ret = get_info(sock_net(sk), user, len, 1);
94662+ ret = get_info(sock_net(sk), user, *len, 1);
94663 break;
94664 case ARPT_SO_GET_ENTRIES:
94665 ret = compat_get_entries(sock_net(sk), user, len);
94666@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
94667
94668 switch (cmd) {
94669 case ARPT_SO_GET_INFO:
94670- ret = get_info(sock_net(sk), user, len, 0);
94671+ ret = get_info(sock_net(sk), user, *len, 0);
94672 break;
94673
94674 case ARPT_SO_GET_ENTRIES:
94675diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
94676index d23118d..6ad7277 100644
94677--- a/net/ipv4/netfilter/ip_tables.c
94678+++ b/net/ipv4/netfilter/ip_tables.c
94679@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
94680 #endif
94681
94682 static int get_info(struct net *net, void __user *user,
94683- const int *len, int compat)
94684+ int len, int compat)
94685 {
94686 char name[XT_TABLE_MAXNAMELEN];
94687 struct xt_table *t;
94688 int ret;
94689
94690- if (*len != sizeof(struct ipt_getinfo)) {
94691- duprintf("length %u != %zu\n", *len,
94692+ if (len != sizeof(struct ipt_getinfo)) {
94693+ duprintf("length %u != %zu\n", len,
94694 sizeof(struct ipt_getinfo));
94695 return -EINVAL;
94696 }
94697@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
94698 info.size = private->size;
94699 strcpy(info.name, name);
94700
94701- if (copy_to_user(user, &info, *len) != 0)
94702+ if (copy_to_user(user, &info, len) != 0)
94703 ret = -EFAULT;
94704 else
94705 ret = 0;
94706@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
94707
94708 switch (cmd) {
94709 case IPT_SO_GET_INFO:
94710- ret = get_info(sock_net(sk), user, len, 1);
94711+ ret = get_info(sock_net(sk), user, *len, 1);
94712 break;
94713 case IPT_SO_GET_ENTRIES:
94714 ret = compat_get_entries(sock_net(sk), user, len);
94715@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
94716
94717 switch (cmd) {
94718 case IPT_SO_GET_INFO:
94719- ret = get_info(sock_net(sk), user, len, 0);
94720+ ret = get_info(sock_net(sk), user, *len, 0);
94721 break;
94722
94723 case IPT_SO_GET_ENTRIES:
94724diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
94725index cbc2215..9cb993c 100644
94726--- a/net/ipv4/netfilter/ipt_ULOG.c
94727+++ b/net/ipv4/netfilter/ipt_ULOG.c
94728@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
94729 ub->qlen++;
94730
94731 pm = nlmsg_data(nlh);
94732+ memset(pm, 0, sizeof(*pm));
94733
94734 /* We might not have a timestamp, get one */
94735 if (skb->tstamp.tv64 == 0)
94736@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
94737 }
94738 else if (loginfo->prefix[0] != '\0')
94739 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
94740- else
94741- *(pm->prefix) = '\0';
94742
94743 if (in && in->hard_header_len > 0 &&
94744 skb->mac_header != skb->network_header &&
94745@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
94746
94747 if (in)
94748 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
94749- else
94750- pm->indev_name[0] = '\0';
94751
94752 if (out)
94753 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
94754- else
94755- pm->outdev_name[0] = '\0';
94756
94757 /* copy_len <= skb->len, so can't fail. */
94758 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
94759diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
94760index c482f7c..2784262 100644
94761--- a/net/ipv4/ping.c
94762+++ b/net/ipv4/ping.c
94763@@ -55,7 +55,7 @@
94764
94765
94766 struct ping_table ping_table;
94767-struct pingv6_ops pingv6_ops;
94768+struct pingv6_ops *pingv6_ops;
94769 EXPORT_SYMBOL_GPL(pingv6_ops);
94770
94771 static u16 ping_port_rover;
94772@@ -335,7 +335,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
94773 return -ENODEV;
94774 }
94775 }
94776- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
94777+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
94778 scoped);
94779 rcu_read_unlock();
94780
94781@@ -541,7 +541,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
94782 }
94783 #if IS_ENABLED(CONFIG_IPV6)
94784 } else if (skb->protocol == htons(ETH_P_IPV6)) {
94785- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
94786+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
94787 #endif
94788 }
94789
94790@@ -559,7 +559,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
94791 info, (u8 *)icmph);
94792 #if IS_ENABLED(CONFIG_IPV6)
94793 } else if (family == AF_INET6) {
94794- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
94795+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
94796 info, (u8 *)icmph);
94797 #endif
94798 }
94799@@ -841,7 +841,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
94800 return ip_recv_error(sk, msg, len, addr_len);
94801 #if IS_ENABLED(CONFIG_IPV6)
94802 } else if (family == AF_INET6) {
94803- return pingv6_ops.ipv6_recv_error(sk, msg, len,
94804+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
94805 addr_len);
94806 #endif
94807 }
94808@@ -900,7 +900,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
94809 }
94810
94811 if (inet6_sk(sk)->rxopt.all)
94812- pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
94813+ pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
94814 #endif
94815 } else {
94816 BUG();
94817@@ -1090,7 +1090,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
94818 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
94819 0, sock_i_ino(sp),
94820 atomic_read(&sp->sk_refcnt), sp,
94821- atomic_read(&sp->sk_drops), len);
94822+ atomic_read_unchecked(&sp->sk_drops), len);
94823 }
94824
94825 static int ping_v4_seq_show(struct seq_file *seq, void *v)
94826diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
94827index 7d3db78..9fd511d 100644
94828--- a/net/ipv4/raw.c
94829+++ b/net/ipv4/raw.c
94830@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
94831 int raw_rcv(struct sock *sk, struct sk_buff *skb)
94832 {
94833 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
94834- atomic_inc(&sk->sk_drops);
94835+ atomic_inc_unchecked(&sk->sk_drops);
94836 kfree_skb(skb);
94837 return NET_RX_DROP;
94838 }
94839@@ -746,16 +746,20 @@ static int raw_init(struct sock *sk)
94840
94841 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
94842 {
94843+ struct icmp_filter filter;
94844+
94845 if (optlen > sizeof(struct icmp_filter))
94846 optlen = sizeof(struct icmp_filter);
94847- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
94848+ if (copy_from_user(&filter, optval, optlen))
94849 return -EFAULT;
94850+ raw_sk(sk)->filter = filter;
94851 return 0;
94852 }
94853
94854 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
94855 {
94856 int len, ret = -EFAULT;
94857+ struct icmp_filter filter;
94858
94859 if (get_user(len, optlen))
94860 goto out;
94861@@ -765,8 +769,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
94862 if (len > sizeof(struct icmp_filter))
94863 len = sizeof(struct icmp_filter);
94864 ret = -EFAULT;
94865- if (put_user(len, optlen) ||
94866- copy_to_user(optval, &raw_sk(sk)->filter, len))
94867+ filter = raw_sk(sk)->filter;
94868+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
94869 goto out;
94870 ret = 0;
94871 out: return ret;
94872@@ -995,7 +999,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
94873 0, 0L, 0,
94874 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
94875 0, sock_i_ino(sp),
94876- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
94877+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
94878 }
94879
94880 static int raw_seq_show(struct seq_file *seq, void *v)
94881diff --git a/net/ipv4/route.c b/net/ipv4/route.c
94882index 62290b5..f0d944f 100644
94883--- a/net/ipv4/route.c
94884+++ b/net/ipv4/route.c
94885@@ -2617,34 +2617,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
94886 .maxlen = sizeof(int),
94887 .mode = 0200,
94888 .proc_handler = ipv4_sysctl_rtcache_flush,
94889+ .extra1 = &init_net,
94890 },
94891 { },
94892 };
94893
94894 static __net_init int sysctl_route_net_init(struct net *net)
94895 {
94896- struct ctl_table *tbl;
94897+ ctl_table_no_const *tbl = NULL;
94898
94899- tbl = ipv4_route_flush_table;
94900 if (!net_eq(net, &init_net)) {
94901- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
94902+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
94903 if (tbl == NULL)
94904 goto err_dup;
94905
94906 /* Don't export sysctls to unprivileged users */
94907 if (net->user_ns != &init_user_ns)
94908 tbl[0].procname = NULL;
94909- }
94910- tbl[0].extra1 = net;
94911+ tbl[0].extra1 = net;
94912+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
94913+ } else
94914+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
94915
94916- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
94917 if (net->ipv4.route_hdr == NULL)
94918 goto err_reg;
94919 return 0;
94920
94921 err_reg:
94922- if (tbl != ipv4_route_flush_table)
94923- kfree(tbl);
94924+ kfree(tbl);
94925 err_dup:
94926 return -ENOMEM;
94927 }
94928@@ -2667,8 +2667,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
94929
94930 static __net_init int rt_genid_init(struct net *net)
94931 {
94932- atomic_set(&net->ipv4.rt_genid, 0);
94933- atomic_set(&net->fnhe_genid, 0);
94934+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
94935+ atomic_set_unchecked(&net->fnhe_genid, 0);
94936 get_random_bytes(&net->ipv4.dev_addr_genid,
94937 sizeof(net->ipv4.dev_addr_genid));
94938 return 0;
94939diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
94940index 540279f..9855b16 100644
94941--- a/net/ipv4/sysctl_net_ipv4.c
94942+++ b/net/ipv4/sysctl_net_ipv4.c
94943@@ -58,7 +58,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
94944 {
94945 int ret;
94946 int range[2];
94947- struct ctl_table tmp = {
94948+ ctl_table_no_const tmp = {
94949 .data = &range,
94950 .maxlen = sizeof(range),
94951 .mode = table->mode,
94952@@ -111,7 +111,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
94953 int ret;
94954 gid_t urange[2];
94955 kgid_t low, high;
94956- struct ctl_table tmp = {
94957+ ctl_table_no_const tmp = {
94958 .data = &urange,
94959 .maxlen = sizeof(urange),
94960 .mode = table->mode,
94961@@ -142,7 +142,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
94962 void __user *buffer, size_t *lenp, loff_t *ppos)
94963 {
94964 char val[TCP_CA_NAME_MAX];
94965- struct ctl_table tbl = {
94966+ ctl_table_no_const tbl = {
94967 .data = val,
94968 .maxlen = TCP_CA_NAME_MAX,
94969 };
94970@@ -161,7 +161,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
94971 void __user *buffer, size_t *lenp,
94972 loff_t *ppos)
94973 {
94974- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
94975+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
94976 int ret;
94977
94978 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
94979@@ -178,7 +178,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
94980 void __user *buffer, size_t *lenp,
94981 loff_t *ppos)
94982 {
94983- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
94984+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
94985 int ret;
94986
94987 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
94988@@ -204,15 +204,17 @@ static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
94989 struct mem_cgroup *memcg;
94990 #endif
94991
94992- struct ctl_table tmp = {
94993+ ctl_table_no_const tmp = {
94994 .data = &vec,
94995 .maxlen = sizeof(vec),
94996 .mode = ctl->mode,
94997 };
94998
94999 if (!write) {
95000- ctl->data = &net->ipv4.sysctl_tcp_mem;
95001- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
95002+ ctl_table_no_const tcp_mem = *ctl;
95003+
95004+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
95005+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
95006 }
95007
95008 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
95009@@ -240,7 +242,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
95010 void __user *buffer, size_t *lenp,
95011 loff_t *ppos)
95012 {
95013- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
95014+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
95015 struct tcp_fastopen_context *ctxt;
95016 int ret;
95017 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
95018@@ -483,7 +485,7 @@ static struct ctl_table ipv4_table[] = {
95019 },
95020 {
95021 .procname = "ip_local_reserved_ports",
95022- .data = NULL, /* initialized in sysctl_ipv4_init */
95023+ .data = sysctl_local_reserved_ports,
95024 .maxlen = 65536,
95025 .mode = 0644,
95026 .proc_handler = proc_do_large_bitmap,
95027@@ -864,11 +866,10 @@ static struct ctl_table ipv4_net_table[] = {
95028
95029 static __net_init int ipv4_sysctl_init_net(struct net *net)
95030 {
95031- struct ctl_table *table;
95032+ ctl_table_no_const *table = NULL;
95033
95034- table = ipv4_net_table;
95035 if (!net_eq(net, &init_net)) {
95036- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
95037+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
95038 if (table == NULL)
95039 goto err_alloc;
95040
95041@@ -903,15 +904,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
95042
95043 tcp_init_mem(net);
95044
95045- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
95046+ if (!net_eq(net, &init_net))
95047+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
95048+ else
95049+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
95050 if (net->ipv4.ipv4_hdr == NULL)
95051 goto err_reg;
95052
95053 return 0;
95054
95055 err_reg:
95056- if (!net_eq(net, &init_net))
95057- kfree(table);
95058+ kfree(table);
95059 err_alloc:
95060 return -ENOMEM;
95061 }
95062@@ -933,16 +936,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
95063 static __init int sysctl_ipv4_init(void)
95064 {
95065 struct ctl_table_header *hdr;
95066- struct ctl_table *i;
95067-
95068- for (i = ipv4_table; i->procname; i++) {
95069- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
95070- i->data = sysctl_local_reserved_ports;
95071- break;
95072- }
95073- }
95074- if (!i->procname)
95075- return -EINVAL;
95076
95077 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
95078 if (hdr == NULL)
95079diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
95080index 068c8fb..a755c52 100644
95081--- a/net/ipv4/tcp_input.c
95082+++ b/net/ipv4/tcp_input.c
95083@@ -4435,7 +4435,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
95084 * simplifies code)
95085 */
95086 static void
95087-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
95088+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
95089 struct sk_buff *head, struct sk_buff *tail,
95090 u32 start, u32 end)
95091 {
95092@@ -5520,6 +5520,7 @@ discard:
95093 tcp_paws_reject(&tp->rx_opt, 0))
95094 goto discard_and_undo;
95095
95096+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
95097 if (th->syn) {
95098 /* We see SYN without ACK. It is attempt of
95099 * simultaneous connect with crossed SYNs.
95100@@ -5570,6 +5571,7 @@ discard:
95101 goto discard;
95102 #endif
95103 }
95104+#endif
95105 /* "fifth, if neither of the SYN or RST bits is set then
95106 * drop the segment and return."
95107 */
95108@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
95109 goto discard;
95110
95111 if (th->syn) {
95112- if (th->fin)
95113+ if (th->fin || th->urg || th->psh)
95114 goto discard;
95115 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
95116 return 1;
95117diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
95118index 5031f68..91569e2 100644
95119--- a/net/ipv4/tcp_ipv4.c
95120+++ b/net/ipv4/tcp_ipv4.c
95121@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
95122 EXPORT_SYMBOL(sysctl_tcp_low_latency);
95123
95124
95125+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95126+extern int grsec_enable_blackhole;
95127+#endif
95128+
95129 #ifdef CONFIG_TCP_MD5SIG
95130 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95131 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95132@@ -1829,6 +1833,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
95133 return 0;
95134
95135 reset:
95136+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95137+ if (!grsec_enable_blackhole)
95138+#endif
95139 tcp_v4_send_reset(rsk, skb);
95140 discard:
95141 kfree_skb(skb);
95142@@ -1974,12 +1981,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
95143 TCP_SKB_CB(skb)->sacked = 0;
95144
95145 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
95146- if (!sk)
95147+ if (!sk) {
95148+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95149+ ret = 1;
95150+#endif
95151 goto no_tcp_socket;
95152-
95153+ }
95154 process:
95155- if (sk->sk_state == TCP_TIME_WAIT)
95156+ if (sk->sk_state == TCP_TIME_WAIT) {
95157+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95158+ ret = 2;
95159+#endif
95160 goto do_time_wait;
95161+ }
95162
95163 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
95164 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
95165@@ -2033,6 +2047,10 @@ csum_error:
95166 bad_packet:
95167 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
95168 } else {
95169+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95170+ if (!grsec_enable_blackhole || (ret == 1 &&
95171+ (skb->dev->flags & IFF_LOOPBACK)))
95172+#endif
95173 tcp_v4_send_reset(NULL, skb);
95174 }
95175
95176diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
95177index 58a3e69..7f0626e 100644
95178--- a/net/ipv4/tcp_minisocks.c
95179+++ b/net/ipv4/tcp_minisocks.c
95180@@ -27,6 +27,10 @@
95181 #include <net/inet_common.h>
95182 #include <net/xfrm.h>
95183
95184+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95185+extern int grsec_enable_blackhole;
95186+#endif
95187+
95188 int sysctl_tcp_syncookies __read_mostly = 1;
95189 EXPORT_SYMBOL(sysctl_tcp_syncookies);
95190
95191@@ -711,7 +715,10 @@ embryonic_reset:
95192 * avoid becoming vulnerable to outside attack aiming at
95193 * resetting legit local connections.
95194 */
95195- req->rsk_ops->send_reset(sk, skb);
95196+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95197+ if (!grsec_enable_blackhole)
95198+#endif
95199+ req->rsk_ops->send_reset(sk, skb);
95200 } else if (fastopen) { /* received a valid RST pkt */
95201 reqsk_fastopen_remove(sk, req, true);
95202 tcp_reset(sk);
95203diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
95204index 611beab..c4b6e1d 100644
95205--- a/net/ipv4/tcp_probe.c
95206+++ b/net/ipv4/tcp_probe.c
95207@@ -245,7 +245,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
95208 if (cnt + width >= len)
95209 break;
95210
95211- if (copy_to_user(buf + cnt, tbuf, width))
95212+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
95213 return -EFAULT;
95214 cnt += width;
95215 }
95216diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
95217index 4b85e6f..22f9ac9 100644
95218--- a/net/ipv4/tcp_timer.c
95219+++ b/net/ipv4/tcp_timer.c
95220@@ -22,6 +22,10 @@
95221 #include <linux/gfp.h>
95222 #include <net/tcp.h>
95223
95224+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95225+extern int grsec_lastack_retries;
95226+#endif
95227+
95228 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
95229 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
95230 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
95231@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
95232 }
95233 }
95234
95235+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95236+ if ((sk->sk_state == TCP_LAST_ACK) &&
95237+ (grsec_lastack_retries > 0) &&
95238+ (grsec_lastack_retries < retry_until))
95239+ retry_until = grsec_lastack_retries;
95240+#endif
95241+
95242 if (retransmits_timed_out(sk, retry_until,
95243 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
95244 /* Has it gone just too far? */
95245diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
95246index 5e2c2f1..6473c22 100644
95247--- a/net/ipv4/udp.c
95248+++ b/net/ipv4/udp.c
95249@@ -87,6 +87,7 @@
95250 #include <linux/types.h>
95251 #include <linux/fcntl.h>
95252 #include <linux/module.h>
95253+#include <linux/security.h>
95254 #include <linux/socket.h>
95255 #include <linux/sockios.h>
95256 #include <linux/igmp.h>
95257@@ -112,6 +113,10 @@
95258 #include <net/busy_poll.h>
95259 #include "udp_impl.h"
95260
95261+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95262+extern int grsec_enable_blackhole;
95263+#endif
95264+
95265 struct udp_table udp_table __read_mostly;
95266 EXPORT_SYMBOL(udp_table);
95267
95268@@ -595,6 +600,9 @@ found:
95269 return s;
95270 }
95271
95272+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
95273+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
95274+
95275 /*
95276 * This routine is called by the ICMP module when it gets some
95277 * sort of error condition. If err < 0 then the socket should
95278@@ -892,9 +900,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
95279 dport = usin->sin_port;
95280 if (dport == 0)
95281 return -EINVAL;
95282+
95283+ err = gr_search_udp_sendmsg(sk, usin);
95284+ if (err)
95285+ return err;
95286 } else {
95287 if (sk->sk_state != TCP_ESTABLISHED)
95288 return -EDESTADDRREQ;
95289+
95290+ err = gr_search_udp_sendmsg(sk, NULL);
95291+ if (err)
95292+ return err;
95293+
95294 daddr = inet->inet_daddr;
95295 dport = inet->inet_dport;
95296 /* Open fast path for connected socket.
95297@@ -1141,7 +1158,7 @@ static unsigned int first_packet_length(struct sock *sk)
95298 IS_UDPLITE(sk));
95299 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
95300 IS_UDPLITE(sk));
95301- atomic_inc(&sk->sk_drops);
95302+ atomic_inc_unchecked(&sk->sk_drops);
95303 __skb_unlink(skb, rcvq);
95304 __skb_queue_tail(&list_kill, skb);
95305 }
95306@@ -1221,6 +1238,10 @@ try_again:
95307 if (!skb)
95308 goto out;
95309
95310+ err = gr_search_udp_recvmsg(sk, skb);
95311+ if (err)
95312+ goto out_free;
95313+
95314 ulen = skb->len - sizeof(struct udphdr);
95315 copied = len;
95316 if (copied > ulen)
95317@@ -1254,7 +1275,7 @@ try_again:
95318 if (unlikely(err)) {
95319 trace_kfree_skb(skb, udp_recvmsg);
95320 if (!peeked) {
95321- atomic_inc(&sk->sk_drops);
95322+ atomic_inc_unchecked(&sk->sk_drops);
95323 UDP_INC_STATS_USER(sock_net(sk),
95324 UDP_MIB_INERRORS, is_udplite);
95325 }
95326@@ -1542,7 +1563,7 @@ csum_error:
95327 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
95328 drop:
95329 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
95330- atomic_inc(&sk->sk_drops);
95331+ atomic_inc_unchecked(&sk->sk_drops);
95332 kfree_skb(skb);
95333 return -1;
95334 }
95335@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
95336 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
95337
95338 if (!skb1) {
95339- atomic_inc(&sk->sk_drops);
95340+ atomic_inc_unchecked(&sk->sk_drops);
95341 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
95342 IS_UDPLITE(sk));
95343 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
95344@@ -1733,6 +1754,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
95345 goto csum_error;
95346
95347 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
95348+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95349+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
95350+#endif
95351 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
95352
95353 /*
95354@@ -2165,7 +2189,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
95355 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
95356 0, sock_i_ino(sp),
95357 atomic_read(&sp->sk_refcnt), sp,
95358- atomic_read(&sp->sk_drops), len);
95359+ atomic_read_unchecked(&sp->sk_drops), len);
95360 }
95361
95362 int udp4_seq_show(struct seq_file *seq, void *v)
95363diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
95364index adf9983..8e45d0a 100644
95365--- a/net/ipv4/xfrm4_policy.c
95366+++ b/net/ipv4/xfrm4_policy.c
95367@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
95368 fl4->flowi4_tos = iph->tos;
95369 }
95370
95371-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
95372+static int xfrm4_garbage_collect(struct dst_ops *ops)
95373 {
95374 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
95375
95376- xfrm4_policy_afinfo.garbage_collect(net);
95377+ xfrm_garbage_collect_deferred(net);
95378 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
95379 }
95380
95381@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
95382
95383 static int __net_init xfrm4_net_init(struct net *net)
95384 {
95385- struct ctl_table *table;
95386+ ctl_table_no_const *table = NULL;
95387 struct ctl_table_header *hdr;
95388
95389- table = xfrm4_policy_table;
95390 if (!net_eq(net, &init_net)) {
95391- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
95392+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
95393 if (!table)
95394 goto err_alloc;
95395
95396 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
95397- }
95398-
95399- hdr = register_net_sysctl(net, "net/ipv4", table);
95400+ hdr = register_net_sysctl(net, "net/ipv4", table);
95401+ } else
95402+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
95403 if (!hdr)
95404 goto err_reg;
95405
95406@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
95407 return 0;
95408
95409 err_reg:
95410- if (!net_eq(net, &init_net))
95411- kfree(table);
95412+ kfree(table);
95413 err_alloc:
95414 return -ENOMEM;
95415 }
95416diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
95417index cd3fb30..b7dfef7 100644
95418--- a/net/ipv6/addrconf.c
95419+++ b/net/ipv6/addrconf.c
95420@@ -586,7 +586,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
95421 idx = 0;
95422 head = &net->dev_index_head[h];
95423 rcu_read_lock();
95424- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
95425+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
95426 net->dev_base_seq;
95427 hlist_for_each_entry_rcu(dev, head, index_hlist) {
95428 if (idx < s_idx)
95429@@ -2364,7 +2364,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
95430 p.iph.ihl = 5;
95431 p.iph.protocol = IPPROTO_IPV6;
95432 p.iph.ttl = 64;
95433- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
95434+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
95435
95436 if (ops->ndo_do_ioctl) {
95437 mm_segment_t oldfs = get_fs();
95438@@ -3977,7 +3977,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
95439 s_ip_idx = ip_idx = cb->args[2];
95440
95441 rcu_read_lock();
95442- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
95443+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
95444 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
95445 idx = 0;
95446 head = &net->dev_index_head[h];
95447@@ -4603,7 +4603,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
95448 dst_free(&ifp->rt->dst);
95449 break;
95450 }
95451- atomic_inc(&net->ipv6.dev_addr_genid);
95452+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
95453 rt_genid_bump_ipv6(net);
95454 }
95455
95456@@ -4624,7 +4624,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
95457 int *valp = ctl->data;
95458 int val = *valp;
95459 loff_t pos = *ppos;
95460- struct ctl_table lctl;
95461+ ctl_table_no_const lctl;
95462 int ret;
95463
95464 /*
95465@@ -4709,7 +4709,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
95466 int *valp = ctl->data;
95467 int val = *valp;
95468 loff_t pos = *ppos;
95469- struct ctl_table lctl;
95470+ ctl_table_no_const lctl;
95471 int ret;
95472
95473 /*
95474diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
95475index 8132b44..b8eca70 100644
95476--- a/net/ipv6/af_inet6.c
95477+++ b/net/ipv6/af_inet6.c
95478@@ -767,7 +767,7 @@ static int __net_init inet6_net_init(struct net *net)
95479
95480 net->ipv6.sysctl.bindv6only = 0;
95481 net->ipv6.sysctl.icmpv6_time = 1*HZ;
95482- atomic_set(&net->ipv6.rt_genid, 0);
95483+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
95484
95485 err = ipv6_init_mibs(net);
95486 if (err)
95487diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
95488index c66c6df..f375d3c 100644
95489--- a/net/ipv6/datagram.c
95490+++ b/net/ipv6/datagram.c
95491@@ -908,5 +908,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
95492 0,
95493 sock_i_ino(sp),
95494 atomic_read(&sp->sk_refcnt), sp,
95495- atomic_read(&sp->sk_drops));
95496+ atomic_read_unchecked(&sp->sk_drops));
95497 }
95498diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
95499index eef8d94..cfa1852 100644
95500--- a/net/ipv6/icmp.c
95501+++ b/net/ipv6/icmp.c
95502@@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_template[] = {
95503
95504 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
95505 {
95506- struct ctl_table *table;
95507+ ctl_table_no_const *table;
95508
95509 table = kmemdup(ipv6_icmp_table_template,
95510 sizeof(ipv6_icmp_table_template),
95511diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
95512index bf4a9a0..e5f6ac5 100644
95513--- a/net/ipv6/ip6_gre.c
95514+++ b/net/ipv6/ip6_gre.c
95515@@ -74,7 +74,7 @@ struct ip6gre_net {
95516 struct net_device *fb_tunnel_dev;
95517 };
95518
95519-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
95520+static struct rtnl_link_ops ip6gre_link_ops;
95521 static int ip6gre_tunnel_init(struct net_device *dev);
95522 static void ip6gre_tunnel_setup(struct net_device *dev);
95523 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
95524@@ -1286,7 +1286,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
95525 }
95526
95527
95528-static struct inet6_protocol ip6gre_protocol __read_mostly = {
95529+static struct inet6_protocol ip6gre_protocol = {
95530 .handler = ip6gre_rcv,
95531 .err_handler = ip6gre_err,
95532 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
95533@@ -1622,7 +1622,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
95534 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
95535 };
95536
95537-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
95538+static struct rtnl_link_ops ip6gre_link_ops = {
95539 .kind = "ip6gre",
95540 .maxtype = IFLA_GRE_MAX,
95541 .policy = ip6gre_policy,
95542@@ -1635,7 +1635,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
95543 .fill_info = ip6gre_fill_info,
95544 };
95545
95546-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
95547+static struct rtnl_link_ops ip6gre_tap_ops = {
95548 .kind = "ip6gretap",
95549 .maxtype = IFLA_GRE_MAX,
95550 .policy = ip6gre_policy,
95551diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
95552index c1e11b5..568e633 100644
95553--- a/net/ipv6/ip6_tunnel.c
95554+++ b/net/ipv6/ip6_tunnel.c
95555@@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
95556
95557 static int ip6_tnl_dev_init(struct net_device *dev);
95558 static void ip6_tnl_dev_setup(struct net_device *dev);
95559-static struct rtnl_link_ops ip6_link_ops __read_mostly;
95560+static struct rtnl_link_ops ip6_link_ops;
95561
95562 static int ip6_tnl_net_id __read_mostly;
95563 struct ip6_tnl_net {
95564@@ -1699,7 +1699,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
95565 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
95566 };
95567
95568-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
95569+static struct rtnl_link_ops ip6_link_ops = {
95570 .kind = "ip6tnl",
95571 .maxtype = IFLA_IPTUN_MAX,
95572 .policy = ip6_tnl_policy,
95573diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
95574index d1e2e8e..51c19ae 100644
95575--- a/net/ipv6/ipv6_sockglue.c
95576+++ b/net/ipv6/ipv6_sockglue.c
95577@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
95578 if (sk->sk_type != SOCK_STREAM)
95579 return -ENOPROTOOPT;
95580
95581- msg.msg_control = optval;
95582+ msg.msg_control = (void __force_kernel *)optval;
95583 msg.msg_controllen = len;
95584 msg.msg_flags = flags;
95585
95586diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
95587index 44400c2..8e11f52 100644
95588--- a/net/ipv6/netfilter/ip6_tables.c
95589+++ b/net/ipv6/netfilter/ip6_tables.c
95590@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
95591 #endif
95592
95593 static int get_info(struct net *net, void __user *user,
95594- const int *len, int compat)
95595+ int len, int compat)
95596 {
95597 char name[XT_TABLE_MAXNAMELEN];
95598 struct xt_table *t;
95599 int ret;
95600
95601- if (*len != sizeof(struct ip6t_getinfo)) {
95602- duprintf("length %u != %zu\n", *len,
95603+ if (len != sizeof(struct ip6t_getinfo)) {
95604+ duprintf("length %u != %zu\n", len,
95605 sizeof(struct ip6t_getinfo));
95606 return -EINVAL;
95607 }
95608@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
95609 info.size = private->size;
95610 strcpy(info.name, name);
95611
95612- if (copy_to_user(user, &info, *len) != 0)
95613+ if (copy_to_user(user, &info, len) != 0)
95614 ret = -EFAULT;
95615 else
95616 ret = 0;
95617@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
95618
95619 switch (cmd) {
95620 case IP6T_SO_GET_INFO:
95621- ret = get_info(sock_net(sk), user, len, 1);
95622+ ret = get_info(sock_net(sk), user, *len, 1);
95623 break;
95624 case IP6T_SO_GET_ENTRIES:
95625 ret = compat_get_entries(sock_net(sk), user, len);
95626@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
95627
95628 switch (cmd) {
95629 case IP6T_SO_GET_INFO:
95630- ret = get_info(sock_net(sk), user, len, 0);
95631+ ret = get_info(sock_net(sk), user, *len, 0);
95632 break;
95633
95634 case IP6T_SO_GET_ENTRIES:
95635diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
95636index 253566a..9fa50c7 100644
95637--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
95638+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
95639@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
95640
95641 static int nf_ct_frag6_sysctl_register(struct net *net)
95642 {
95643- struct ctl_table *table;
95644+ ctl_table_no_const *table = NULL;
95645 struct ctl_table_header *hdr;
95646
95647- table = nf_ct_frag6_sysctl_table;
95648 if (!net_eq(net, &init_net)) {
95649- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
95650+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
95651 GFP_KERNEL);
95652 if (table == NULL)
95653 goto err_alloc;
95654@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
95655 table[0].data = &net->nf_frag.frags.timeout;
95656 table[1].data = &net->nf_frag.frags.low_thresh;
95657 table[2].data = &net->nf_frag.frags.high_thresh;
95658- }
95659-
95660- hdr = register_net_sysctl(net, "net/netfilter", table);
95661+ hdr = register_net_sysctl(net, "net/netfilter", table);
95662+ } else
95663+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
95664 if (hdr == NULL)
95665 goto err_reg;
95666
95667@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
95668 return 0;
95669
95670 err_reg:
95671- if (!net_eq(net, &init_net))
95672- kfree(table);
95673+ kfree(table);
95674 err_alloc:
95675 return -ENOMEM;
95676 }
95677diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
95678index 827f795..7e28e82 100644
95679--- a/net/ipv6/output_core.c
95680+++ b/net/ipv6/output_core.c
95681@@ -9,8 +9,8 @@
95682
95683 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
95684 {
95685- static atomic_t ipv6_fragmentation_id;
95686- int old, new;
95687+ static atomic_unchecked_t ipv6_fragmentation_id;
95688+ int id;
95689
95690 #if IS_ENABLED(CONFIG_IPV6)
95691 if (rt && !(rt->dst.flags & DST_NOPEER)) {
95692@@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
95693 }
95694 }
95695 #endif
95696- do {
95697- old = atomic_read(&ipv6_fragmentation_id);
95698- new = old + 1;
95699- if (!new)
95700- new = 1;
95701- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
95702- fhdr->identification = htonl(new);
95703+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
95704+ if (!id)
95705+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
95706+ fhdr->identification = htonl(id);
95707 }
95708 EXPORT_SYMBOL(ipv6_select_ident);
95709
95710diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
95711index 7856e96..75ebc7f 100644
95712--- a/net/ipv6/ping.c
95713+++ b/net/ipv6/ping.c
95714@@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_net_ops = {
95715 };
95716 #endif
95717
95718+static struct pingv6_ops real_pingv6_ops = {
95719+ .ipv6_recv_error = ipv6_recv_error,
95720+ .ip6_datagram_recv_ctl = ip6_datagram_recv_ctl,
95721+ .icmpv6_err_convert = icmpv6_err_convert,
95722+ .ipv6_icmp_error = ipv6_icmp_error,
95723+ .ipv6_chk_addr = ipv6_chk_addr,
95724+};
95725+
95726+static struct pingv6_ops dummy_pingv6_ops = {
95727+ .ipv6_recv_error = dummy_ipv6_recv_error,
95728+ .ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl,
95729+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
95730+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
95731+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
95732+};
95733+
95734 int __init pingv6_init(void)
95735 {
95736 #ifdef CONFIG_PROC_FS
95737@@ -253,11 +269,7 @@ int __init pingv6_init(void)
95738 if (ret)
95739 return ret;
95740 #endif
95741- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
95742- pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
95743- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
95744- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
95745- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
95746+ pingv6_ops = &real_pingv6_ops;
95747 return inet6_register_protosw(&pingv6_protosw);
95748 }
95749
95750@@ -266,11 +278,7 @@ int __init pingv6_init(void)
95751 */
95752 void pingv6_exit(void)
95753 {
95754- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
95755- pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
95756- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
95757- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
95758- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
95759+ pingv6_ops = &dummy_pingv6_ops;
95760 #ifdef CONFIG_PROC_FS
95761 unregister_pernet_subsys(&ping_v6_net_ops);
95762 #endif
95763diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
95764index 430067c..4adf088 100644
95765--- a/net/ipv6/raw.c
95766+++ b/net/ipv6/raw.c
95767@@ -385,7 +385,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
95768 {
95769 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
95770 skb_checksum_complete(skb)) {
95771- atomic_inc(&sk->sk_drops);
95772+ atomic_inc_unchecked(&sk->sk_drops);
95773 kfree_skb(skb);
95774 return NET_RX_DROP;
95775 }
95776@@ -413,7 +413,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
95777 struct raw6_sock *rp = raw6_sk(sk);
95778
95779 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
95780- atomic_inc(&sk->sk_drops);
95781+ atomic_inc_unchecked(&sk->sk_drops);
95782 kfree_skb(skb);
95783 return NET_RX_DROP;
95784 }
95785@@ -437,7 +437,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
95786
95787 if (inet->hdrincl) {
95788 if (skb_checksum_complete(skb)) {
95789- atomic_inc(&sk->sk_drops);
95790+ atomic_inc_unchecked(&sk->sk_drops);
95791 kfree_skb(skb);
95792 return NET_RX_DROP;
95793 }
95794@@ -607,7 +607,7 @@ out:
95795 return err;
95796 }
95797
95798-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
95799+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
95800 struct flowi6 *fl6, struct dst_entry **dstp,
95801 unsigned int flags)
95802 {
95803@@ -920,12 +920,15 @@ do_confirm:
95804 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
95805 char __user *optval, int optlen)
95806 {
95807+ struct icmp6_filter filter;
95808+
95809 switch (optname) {
95810 case ICMPV6_FILTER:
95811 if (optlen > sizeof(struct icmp6_filter))
95812 optlen = sizeof(struct icmp6_filter);
95813- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
95814+ if (copy_from_user(&filter, optval, optlen))
95815 return -EFAULT;
95816+ raw6_sk(sk)->filter = filter;
95817 return 0;
95818 default:
95819 return -ENOPROTOOPT;
95820@@ -938,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
95821 char __user *optval, int __user *optlen)
95822 {
95823 int len;
95824+ struct icmp6_filter filter;
95825
95826 switch (optname) {
95827 case ICMPV6_FILTER:
95828@@ -949,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
95829 len = sizeof(struct icmp6_filter);
95830 if (put_user(len, optlen))
95831 return -EFAULT;
95832- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
95833+ filter = raw6_sk(sk)->filter;
95834+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
95835 return -EFAULT;
95836 return 0;
95837 default:
95838diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
95839index 1aeb473..bea761c 100644
95840--- a/net/ipv6/reassembly.c
95841+++ b/net/ipv6/reassembly.c
95842@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
95843
95844 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
95845 {
95846- struct ctl_table *table;
95847+ ctl_table_no_const *table = NULL;
95848 struct ctl_table_header *hdr;
95849
95850- table = ip6_frags_ns_ctl_table;
95851 if (!net_eq(net, &init_net)) {
95852- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
95853+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
95854 if (table == NULL)
95855 goto err_alloc;
95856
95857@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
95858 /* Don't export sysctls to unprivileged users */
95859 if (net->user_ns != &init_user_ns)
95860 table[0].procname = NULL;
95861- }
95862+ hdr = register_net_sysctl(net, "net/ipv6", table);
95863+ } else
95864+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
95865
95866- hdr = register_net_sysctl(net, "net/ipv6", table);
95867 if (hdr == NULL)
95868 goto err_reg;
95869
95870@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
95871 return 0;
95872
95873 err_reg:
95874- if (!net_eq(net, &init_net))
95875- kfree(table);
95876+ kfree(table);
95877 err_alloc:
95878 return -ENOMEM;
95879 }
95880diff --git a/net/ipv6/route.c b/net/ipv6/route.c
95881index 77308af..36ed509 100644
95882--- a/net/ipv6/route.c
95883+++ b/net/ipv6/route.c
95884@@ -3009,7 +3009,7 @@ struct ctl_table ipv6_route_table_template[] = {
95885
95886 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
95887 {
95888- struct ctl_table *table;
95889+ ctl_table_no_const *table;
95890
95891 table = kmemdup(ipv6_route_table_template,
95892 sizeof(ipv6_route_table_template),
95893diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
95894index b433884..3875266 100644
95895--- a/net/ipv6/sit.c
95896+++ b/net/ipv6/sit.c
95897@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
95898 static void ipip6_dev_free(struct net_device *dev);
95899 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
95900 __be32 *v4dst);
95901-static struct rtnl_link_ops sit_link_ops __read_mostly;
95902+static struct rtnl_link_ops sit_link_ops;
95903
95904 static int sit_net_id __read_mostly;
95905 struct sit_net {
95906@@ -1603,7 +1603,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
95907 unregister_netdevice_queue(dev, head);
95908 }
95909
95910-static struct rtnl_link_ops sit_link_ops __read_mostly = {
95911+static struct rtnl_link_ops sit_link_ops = {
95912 .kind = "sit",
95913 .maxtype = IFLA_IPTUN_MAX,
95914 .policy = ipip6_policy,
95915diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
95916index 107b2f1..72741a9 100644
95917--- a/net/ipv6/sysctl_net_ipv6.c
95918+++ b/net/ipv6/sysctl_net_ipv6.c
95919@@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] = {
95920
95921 static int __net_init ipv6_sysctl_net_init(struct net *net)
95922 {
95923- struct ctl_table *ipv6_table;
95924+ ctl_table_no_const *ipv6_table;
95925 struct ctl_table *ipv6_route_table;
95926 struct ctl_table *ipv6_icmp_table;
95927 int err;
95928diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
95929index 5c71501..7e8d5d3 100644
95930--- a/net/ipv6/tcp_ipv6.c
95931+++ b/net/ipv6/tcp_ipv6.c
95932@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95933 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
95934 }
95935
95936+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95937+extern int grsec_enable_blackhole;
95938+#endif
95939+
95940 static void tcp_v6_hash(struct sock *sk)
95941 {
95942 if (sk->sk_state != TCP_CLOSE) {
95943@@ -1397,6 +1401,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
95944 return 0;
95945
95946 reset:
95947+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95948+ if (!grsec_enable_blackhole)
95949+#endif
95950 tcp_v6_send_reset(sk, skb);
95951 discard:
95952 if (opt_skb)
95953@@ -1479,12 +1486,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
95954 TCP_SKB_CB(skb)->sacked = 0;
95955
95956 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
95957- if (!sk)
95958+ if (!sk) {
95959+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95960+ ret = 1;
95961+#endif
95962 goto no_tcp_socket;
95963+ }
95964
95965 process:
95966- if (sk->sk_state == TCP_TIME_WAIT)
95967+ if (sk->sk_state == TCP_TIME_WAIT) {
95968+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95969+ ret = 2;
95970+#endif
95971 goto do_time_wait;
95972+ }
95973
95974 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
95975 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
95976@@ -1536,6 +1551,10 @@ csum_error:
95977 bad_packet:
95978 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
95979 } else {
95980+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95981+ if (!grsec_enable_blackhole || (ret == 1 &&
95982+ (skb->dev->flags & IFF_LOOPBACK)))
95983+#endif
95984 tcp_v6_send_reset(NULL, skb);
95985 }
95986
95987diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
95988index 3d2758d..626c422 100644
95989--- a/net/ipv6/udp.c
95990+++ b/net/ipv6/udp.c
95991@@ -53,6 +53,10 @@
95992 #include <trace/events/skb.h>
95993 #include "udp_impl.h"
95994
95995+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95996+extern int grsec_enable_blackhole;
95997+#endif
95998+
95999 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
96000 {
96001 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
96002@@ -417,7 +421,7 @@ try_again:
96003 if (unlikely(err)) {
96004 trace_kfree_skb(skb, udpv6_recvmsg);
96005 if (!peeked) {
96006- atomic_inc(&sk->sk_drops);
96007+ atomic_inc_unchecked(&sk->sk_drops);
96008 if (is_udp4)
96009 UDP_INC_STATS_USER(sock_net(sk),
96010 UDP_MIB_INERRORS,
96011@@ -665,7 +669,7 @@ csum_error:
96012 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
96013 drop:
96014 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
96015- atomic_inc(&sk->sk_drops);
96016+ atomic_inc_unchecked(&sk->sk_drops);
96017 kfree_skb(skb);
96018 return -1;
96019 }
96020@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
96021 if (likely(skb1 == NULL))
96022 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
96023 if (!skb1) {
96024- atomic_inc(&sk->sk_drops);
96025+ atomic_inc_unchecked(&sk->sk_drops);
96026 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
96027 IS_UDPLITE(sk));
96028 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
96029@@ -863,6 +867,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
96030 goto csum_error;
96031
96032 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
96033+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
96034+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
96035+#endif
96036 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
96037
96038 kfree_skb(skb);
96039diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
96040index 550b195..6acea83 100644
96041--- a/net/ipv6/xfrm6_policy.c
96042+++ b/net/ipv6/xfrm6_policy.c
96043@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
96044 }
96045 }
96046
96047-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
96048+static int xfrm6_garbage_collect(struct dst_ops *ops)
96049 {
96050 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
96051
96052- xfrm6_policy_afinfo.garbage_collect(net);
96053+ xfrm_garbage_collect_deferred(net);
96054 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
96055 }
96056
96057@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
96058
96059 static int __net_init xfrm6_net_init(struct net *net)
96060 {
96061- struct ctl_table *table;
96062+ ctl_table_no_const *table = NULL;
96063 struct ctl_table_header *hdr;
96064
96065- table = xfrm6_policy_table;
96066 if (!net_eq(net, &init_net)) {
96067- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
96068+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
96069 if (!table)
96070 goto err_alloc;
96071
96072 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
96073- }
96074+ hdr = register_net_sysctl(net, "net/ipv6", table);
96075+ } else
96076+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
96077
96078- hdr = register_net_sysctl(net, "net/ipv6", table);
96079 if (!hdr)
96080 goto err_reg;
96081
96082@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
96083 return 0;
96084
96085 err_reg:
96086- if (!net_eq(net, &init_net))
96087- kfree(table);
96088+ kfree(table);
96089 err_alloc:
96090 return -ENOMEM;
96091 }
96092diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
96093index 41ac7938..75e3bb1 100644
96094--- a/net/irda/ircomm/ircomm_tty.c
96095+++ b/net/irda/ircomm/ircomm_tty.c
96096@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
96097 add_wait_queue(&port->open_wait, &wait);
96098
96099 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
96100- __FILE__, __LINE__, tty->driver->name, port->count);
96101+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
96102
96103 spin_lock_irqsave(&port->lock, flags);
96104 if (!tty_hung_up_p(filp))
96105- port->count--;
96106+ atomic_dec(&port->count);
96107 port->blocked_open++;
96108 spin_unlock_irqrestore(&port->lock, flags);
96109
96110@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
96111 }
96112
96113 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
96114- __FILE__, __LINE__, tty->driver->name, port->count);
96115+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
96116
96117 schedule();
96118 }
96119@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
96120
96121 spin_lock_irqsave(&port->lock, flags);
96122 if (!tty_hung_up_p(filp))
96123- port->count++;
96124+ atomic_inc(&port->count);
96125 port->blocked_open--;
96126 spin_unlock_irqrestore(&port->lock, flags);
96127
96128 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
96129- __FILE__, __LINE__, tty->driver->name, port->count);
96130+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
96131
96132 if (!retval)
96133 port->flags |= ASYNC_NORMAL_ACTIVE;
96134@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
96135
96136 /* ++ is not atomic, so this should be protected - Jean II */
96137 spin_lock_irqsave(&self->port.lock, flags);
96138- self->port.count++;
96139+ atomic_inc(&self->port.count);
96140 spin_unlock_irqrestore(&self->port.lock, flags);
96141 tty_port_tty_set(&self->port, tty);
96142
96143 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
96144- self->line, self->port.count);
96145+ self->line, atomic_read(&self->port.count));
96146
96147 /* Not really used by us, but lets do it anyway */
96148 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
96149@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
96150 tty_kref_put(port->tty);
96151 }
96152 port->tty = NULL;
96153- port->count = 0;
96154+ atomic_set(&port->count, 0);
96155 spin_unlock_irqrestore(&port->lock, flags);
96156
96157 wake_up_interruptible(&port->open_wait);
96158@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
96159 seq_putc(m, '\n');
96160
96161 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
96162- seq_printf(m, "Open count: %d\n", self->port.count);
96163+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
96164 seq_printf(m, "Max data size: %d\n", self->max_data_size);
96165 seq_printf(m, "Max header size: %d\n", self->max_header_size);
96166
96167diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
96168index c4b7218..3e83259 100644
96169--- a/net/iucv/af_iucv.c
96170+++ b/net/iucv/af_iucv.c
96171@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
96172
96173 write_lock_bh(&iucv_sk_list.lock);
96174
96175- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
96176+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
96177 while (__iucv_get_sock_by_name(name)) {
96178 sprintf(name, "%08x",
96179- atomic_inc_return(&iucv_sk_list.autobind_name));
96180+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
96181 }
96182
96183 write_unlock_bh(&iucv_sk_list.lock);
96184diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
96185index cd5b8ec..f205e6b 100644
96186--- a/net/iucv/iucv.c
96187+++ b/net/iucv/iucv.c
96188@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
96189 return NOTIFY_OK;
96190 }
96191
96192-static struct notifier_block __refdata iucv_cpu_notifier = {
96193+static struct notifier_block iucv_cpu_notifier = {
96194 .notifier_call = iucv_cpu_notify,
96195 };
96196
96197diff --git a/net/key/af_key.c b/net/key/af_key.c
96198index 545f047..9757a9d 100644
96199--- a/net/key/af_key.c
96200+++ b/net/key/af_key.c
96201@@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
96202 static u32 get_acqseq(void)
96203 {
96204 u32 res;
96205- static atomic_t acqseq;
96206+ static atomic_unchecked_t acqseq;
96207
96208 do {
96209- res = atomic_inc_return(&acqseq);
96210+ res = atomic_inc_return_unchecked(&acqseq);
96211 } while (!res);
96212 return res;
96213 }
96214diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
96215index 9903ee5..18978be 100644
96216--- a/net/mac80211/cfg.c
96217+++ b/net/mac80211/cfg.c
96218@@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
96219 ret = ieee80211_vif_use_channel(sdata, chandef,
96220 IEEE80211_CHANCTX_EXCLUSIVE);
96221 }
96222- } else if (local->open_count == local->monitors) {
96223+ } else if (local_read(&local->open_count) == local->monitors) {
96224 local->_oper_chandef = *chandef;
96225 ieee80211_hw_config(local, 0);
96226 }
96227@@ -3124,7 +3124,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
96228 else
96229 local->probe_req_reg--;
96230
96231- if (!local->open_count)
96232+ if (!local_read(&local->open_count))
96233 break;
96234
96235 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
96236@@ -3587,8 +3587,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
96237 if (chanctx_conf) {
96238 *chandef = chanctx_conf->def;
96239 ret = 0;
96240- } else if (local->open_count > 0 &&
96241- local->open_count == local->monitors &&
96242+ } else if (local_read(&local->open_count) > 0 &&
96243+ local_read(&local->open_count) == local->monitors &&
96244 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
96245 if (local->use_chanctx)
96246 *chandef = local->monitor_chandef;
96247diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
96248index 611abfc..8c2c7e4 100644
96249--- a/net/mac80211/ieee80211_i.h
96250+++ b/net/mac80211/ieee80211_i.h
96251@@ -28,6 +28,7 @@
96252 #include <net/ieee80211_radiotap.h>
96253 #include <net/cfg80211.h>
96254 #include <net/mac80211.h>
96255+#include <asm/local.h>
96256 #include "key.h"
96257 #include "sta_info.h"
96258 #include "debug.h"
96259@@ -945,7 +946,7 @@ struct ieee80211_local {
96260 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
96261 spinlock_t queue_stop_reason_lock;
96262
96263- int open_count;
96264+ local_t open_count;
96265 int monitors, cooked_mntrs;
96266 /* number of interfaces with corresponding FIF_ flags */
96267 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
96268diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
96269index fcecd63..a404454 100644
96270--- a/net/mac80211/iface.c
96271+++ b/net/mac80211/iface.c
96272@@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96273 break;
96274 }
96275
96276- if (local->open_count == 0) {
96277+ if (local_read(&local->open_count) == 0) {
96278 res = drv_start(local);
96279 if (res)
96280 goto err_del_bss;
96281@@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96282 res = drv_add_interface(local, sdata);
96283 if (res)
96284 goto err_stop;
96285- } else if (local->monitors == 0 && local->open_count == 0) {
96286+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
96287 res = ieee80211_add_virtual_monitor(local);
96288 if (res)
96289 goto err_stop;
96290@@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96291 atomic_inc(&local->iff_promiscs);
96292
96293 if (coming_up)
96294- local->open_count++;
96295+ local_inc(&local->open_count);
96296
96297 if (hw_reconf_flags)
96298 ieee80211_hw_config(local, hw_reconf_flags);
96299@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96300 err_del_interface:
96301 drv_remove_interface(local, sdata);
96302 err_stop:
96303- if (!local->open_count)
96304+ if (!local_read(&local->open_count))
96305 drv_stop(local);
96306 err_del_bss:
96307 sdata->bss = NULL;
96308@@ -852,7 +852,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96309 }
96310
96311 if (going_down)
96312- local->open_count--;
96313+ local_dec(&local->open_count);
96314
96315 switch (sdata->vif.type) {
96316 case NL80211_IFTYPE_AP_VLAN:
96317@@ -919,7 +919,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96318 }
96319 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
96320
96321- if (local->open_count == 0)
96322+ if (local_read(&local->open_count) == 0)
96323 ieee80211_clear_tx_pending(local);
96324
96325 /*
96326@@ -959,7 +959,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96327
96328 ieee80211_recalc_ps(local, -1);
96329
96330- if (local->open_count == 0) {
96331+ if (local_read(&local->open_count) == 0) {
96332 ieee80211_stop_device(local);
96333
96334 /* no reconfiguring after stop! */
96335@@ -970,7 +970,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96336 ieee80211_configure_filter(local);
96337 ieee80211_hw_config(local, hw_reconf_flags);
96338
96339- if (local->monitors == local->open_count)
96340+ if (local->monitors == local_read(&local->open_count))
96341 ieee80211_add_virtual_monitor(local);
96342 }
96343
96344diff --git a/net/mac80211/main.c b/net/mac80211/main.c
96345index e765f77..dfd72e7 100644
96346--- a/net/mac80211/main.c
96347+++ b/net/mac80211/main.c
96348@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
96349 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
96350 IEEE80211_CONF_CHANGE_POWER);
96351
96352- if (changed && local->open_count) {
96353+ if (changed && local_read(&local->open_count)) {
96354 ret = drv_config(local, changed);
96355 /*
96356 * Goal:
96357diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
96358index 3401262..d5cd68d 100644
96359--- a/net/mac80211/pm.c
96360+++ b/net/mac80211/pm.c
96361@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
96362 struct ieee80211_sub_if_data *sdata;
96363 struct sta_info *sta;
96364
96365- if (!local->open_count)
96366+ if (!local_read(&local->open_count))
96367 goto suspend;
96368
96369 ieee80211_scan_cancel(local);
96370@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
96371 cancel_work_sync(&local->dynamic_ps_enable_work);
96372 del_timer_sync(&local->dynamic_ps_timer);
96373
96374- local->wowlan = wowlan && local->open_count;
96375+ local->wowlan = wowlan && local_read(&local->open_count);
96376 if (local->wowlan) {
96377 int err = drv_suspend(local, wowlan);
96378 if (err < 0) {
96379@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
96380 WARN_ON(!list_empty(&local->chanctx_list));
96381
96382 /* stop hardware - this must stop RX */
96383- if (local->open_count)
96384+ if (local_read(&local->open_count))
96385 ieee80211_stop_device(local);
96386
96387 suspend:
96388diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
96389index e126605..73d2c39 100644
96390--- a/net/mac80211/rate.c
96391+++ b/net/mac80211/rate.c
96392@@ -725,7 +725,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
96393
96394 ASSERT_RTNL();
96395
96396- if (local->open_count)
96397+ if (local_read(&local->open_count))
96398 return -EBUSY;
96399
96400 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
96401diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
96402index c97a065..ff61928 100644
96403--- a/net/mac80211/rc80211_pid_debugfs.c
96404+++ b/net/mac80211/rc80211_pid_debugfs.c
96405@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
96406
96407 spin_unlock_irqrestore(&events->lock, status);
96408
96409- if (copy_to_user(buf, pb, p))
96410+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
96411 return -EFAULT;
96412
96413 return p;
96414diff --git a/net/mac80211/util.c b/net/mac80211/util.c
96415index 69e4ef5..e8e4b92 100644
96416--- a/net/mac80211/util.c
96417+++ b/net/mac80211/util.c
96418@@ -1470,7 +1470,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
96419 }
96420 #endif
96421 /* everything else happens only if HW was up & running */
96422- if (!local->open_count)
96423+ if (!local_read(&local->open_count))
96424 goto wake_up;
96425
96426 /*
96427@@ -1695,7 +1695,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
96428 local->in_reconfig = false;
96429 barrier();
96430
96431- if (local->monitors == local->open_count && local->monitors > 0)
96432+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
96433 ieee80211_add_virtual_monitor(local);
96434
96435 /*
96436diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
96437index 6e839b6..002a233 100644
96438--- a/net/netfilter/Kconfig
96439+++ b/net/netfilter/Kconfig
96440@@ -950,6 +950,16 @@ config NETFILTER_XT_MATCH_ESP
96441
96442 To compile it as a module, choose M here. If unsure, say N.
96443
96444+config NETFILTER_XT_MATCH_GRADM
96445+ tristate '"gradm" match support'
96446+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
96447+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
96448+ ---help---
96449+ The gradm match allows to match on grsecurity RBAC being enabled.
96450+ It is useful when iptables rules are applied early on bootup to
96451+ prevent connections to the machine (except from a trusted host)
96452+ while the RBAC system is disabled.
96453+
96454 config NETFILTER_XT_MATCH_HASHLIMIT
96455 tristate '"hashlimit" match support'
96456 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
96457diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
96458index c3a0a12..90b587f 100644
96459--- a/net/netfilter/Makefile
96460+++ b/net/netfilter/Makefile
96461@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
96462 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
96463 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
96464 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
96465+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
96466 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
96467 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
96468 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
96469diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
96470index f2e30fb..f131862 100644
96471--- a/net/netfilter/ipset/ip_set_core.c
96472+++ b/net/netfilter/ipset/ip_set_core.c
96473@@ -1819,7 +1819,7 @@ done:
96474 return ret;
96475 }
96476
96477-static struct nf_sockopt_ops so_set __read_mostly = {
96478+static struct nf_sockopt_ops so_set = {
96479 .pf = PF_INET,
96480 .get_optmin = SO_IP_SET,
96481 .get_optmax = SO_IP_SET + 1,
96482diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
96483index 4c8e5c0..5a79b4d 100644
96484--- a/net/netfilter/ipvs/ip_vs_conn.c
96485+++ b/net/netfilter/ipvs/ip_vs_conn.c
96486@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
96487 /* Increase the refcnt counter of the dest */
96488 ip_vs_dest_hold(dest);
96489
96490- conn_flags = atomic_read(&dest->conn_flags);
96491+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
96492 if (cp->protocol != IPPROTO_UDP)
96493 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
96494 flags = cp->flags;
96495@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
96496
96497 cp->control = NULL;
96498 atomic_set(&cp->n_control, 0);
96499- atomic_set(&cp->in_pkts, 0);
96500+ atomic_set_unchecked(&cp->in_pkts, 0);
96501
96502 cp->packet_xmit = NULL;
96503 cp->app = NULL;
96504@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
96505
96506 /* Don't drop the entry if its number of incoming packets is not
96507 located in [0, 8] */
96508- i = atomic_read(&cp->in_pkts);
96509+ i = atomic_read_unchecked(&cp->in_pkts);
96510 if (i > 8 || i < 0) return 0;
96511
96512 if (!todrop_rate[i]) return 0;
96513diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
96514index 3581736..c0453e9 100644
96515--- a/net/netfilter/ipvs/ip_vs_core.c
96516+++ b/net/netfilter/ipvs/ip_vs_core.c
96517@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
96518 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
96519 /* do not touch skb anymore */
96520
96521- atomic_inc(&cp->in_pkts);
96522+ atomic_inc_unchecked(&cp->in_pkts);
96523 ip_vs_conn_put(cp);
96524 return ret;
96525 }
96526@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
96527 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
96528 pkts = sysctl_sync_threshold(ipvs);
96529 else
96530- pkts = atomic_add_return(1, &cp->in_pkts);
96531+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
96532
96533 if (ipvs->sync_state & IP_VS_STATE_MASTER)
96534 ip_vs_sync_conn(net, cp, pkts);
96535diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
96536index a3df9bd..895ae09 100644
96537--- a/net/netfilter/ipvs/ip_vs_ctl.c
96538+++ b/net/netfilter/ipvs/ip_vs_ctl.c
96539@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
96540 */
96541 ip_vs_rs_hash(ipvs, dest);
96542 }
96543- atomic_set(&dest->conn_flags, conn_flags);
96544+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
96545
96546 /* bind the service */
96547 old_svc = rcu_dereference_protected(dest->svc, 1);
96548@@ -1641,7 +1641,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
96549 * align with netns init in ip_vs_control_net_init()
96550 */
96551
96552-static struct ctl_table vs_vars[] = {
96553+static ctl_table_no_const vs_vars[] __read_only = {
96554 {
96555 .procname = "amemthresh",
96556 .maxlen = sizeof(int),
96557@@ -2062,7 +2062,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
96558 " %-7s %-6d %-10d %-10d\n",
96559 &dest->addr.in6,
96560 ntohs(dest->port),
96561- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
96562+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
96563 atomic_read(&dest->weight),
96564 atomic_read(&dest->activeconns),
96565 atomic_read(&dest->inactconns));
96566@@ -2073,7 +2073,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
96567 "%-7s %-6d %-10d %-10d\n",
96568 ntohl(dest->addr.ip),
96569 ntohs(dest->port),
96570- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
96571+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
96572 atomic_read(&dest->weight),
96573 atomic_read(&dest->activeconns),
96574 atomic_read(&dest->inactconns));
96575@@ -2551,7 +2551,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
96576
96577 entry.addr = dest->addr.ip;
96578 entry.port = dest->port;
96579- entry.conn_flags = atomic_read(&dest->conn_flags);
96580+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
96581 entry.weight = atomic_read(&dest->weight);
96582 entry.u_threshold = dest->u_threshold;
96583 entry.l_threshold = dest->l_threshold;
96584@@ -3094,7 +3094,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
96585 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
96586 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
96587 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
96588- (atomic_read(&dest->conn_flags) &
96589+ (atomic_read_unchecked(&dest->conn_flags) &
96590 IP_VS_CONN_F_FWD_MASK)) ||
96591 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
96592 atomic_read(&dest->weight)) ||
96593@@ -3684,7 +3684,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
96594 {
96595 int idx;
96596 struct netns_ipvs *ipvs = net_ipvs(net);
96597- struct ctl_table *tbl;
96598+ ctl_table_no_const *tbl;
96599
96600 atomic_set(&ipvs->dropentry, 0);
96601 spin_lock_init(&ipvs->dropentry_lock);
96602diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
96603index eff13c9..c1aab3e 100644
96604--- a/net/netfilter/ipvs/ip_vs_lblc.c
96605+++ b/net/netfilter/ipvs/ip_vs_lblc.c
96606@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
96607 * IPVS LBLC sysctl table
96608 */
96609 #ifdef CONFIG_SYSCTL
96610-static struct ctl_table vs_vars_table[] = {
96611+static ctl_table_no_const vs_vars_table[] __read_only = {
96612 {
96613 .procname = "lblc_expiration",
96614 .data = NULL,
96615diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
96616index 0b85500..8513fa5 100644
96617--- a/net/netfilter/ipvs/ip_vs_lblcr.c
96618+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
96619@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
96620 * IPVS LBLCR sysctl table
96621 */
96622
96623-static struct ctl_table vs_vars_table[] = {
96624+static ctl_table_no_const vs_vars_table[] __read_only = {
96625 {
96626 .procname = "lblcr_expiration",
96627 .data = NULL,
96628diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
96629index f448471..995f131 100644
96630--- a/net/netfilter/ipvs/ip_vs_sync.c
96631+++ b/net/netfilter/ipvs/ip_vs_sync.c
96632@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
96633 cp = cp->control;
96634 if (cp) {
96635 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
96636- pkts = atomic_add_return(1, &cp->in_pkts);
96637+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
96638 else
96639 pkts = sysctl_sync_threshold(ipvs);
96640 ip_vs_sync_conn(net, cp->control, pkts);
96641@@ -771,7 +771,7 @@ control:
96642 if (!cp)
96643 return;
96644 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
96645- pkts = atomic_add_return(1, &cp->in_pkts);
96646+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
96647 else
96648 pkts = sysctl_sync_threshold(ipvs);
96649 goto sloop;
96650@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
96651
96652 if (opt)
96653 memcpy(&cp->in_seq, opt, sizeof(*opt));
96654- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
96655+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
96656 cp->state = state;
96657 cp->old_state = cp->state;
96658 /*
96659diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
96660index c47444e..b0961c6 100644
96661--- a/net/netfilter/ipvs/ip_vs_xmit.c
96662+++ b/net/netfilter/ipvs/ip_vs_xmit.c
96663@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
96664 else
96665 rc = NF_ACCEPT;
96666 /* do not touch skb anymore */
96667- atomic_inc(&cp->in_pkts);
96668+ atomic_inc_unchecked(&cp->in_pkts);
96669 goto out;
96670 }
96671
96672@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
96673 else
96674 rc = NF_ACCEPT;
96675 /* do not touch skb anymore */
96676- atomic_inc(&cp->in_pkts);
96677+ atomic_inc_unchecked(&cp->in_pkts);
96678 goto out;
96679 }
96680
96681diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
96682index 2d3030a..7ba1c0a 100644
96683--- a/net/netfilter/nf_conntrack_acct.c
96684+++ b/net/netfilter/nf_conntrack_acct.c
96685@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
96686 #ifdef CONFIG_SYSCTL
96687 static int nf_conntrack_acct_init_sysctl(struct net *net)
96688 {
96689- struct ctl_table *table;
96690+ ctl_table_no_const *table;
96691
96692 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
96693 GFP_KERNEL);
96694diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
96695index 5d892fe..d2fc9d8 100644
96696--- a/net/netfilter/nf_conntrack_core.c
96697+++ b/net/netfilter/nf_conntrack_core.c
96698@@ -1600,6 +1600,10 @@ void nf_conntrack_init_end(void)
96699 #define DYING_NULLS_VAL ((1<<30)+1)
96700 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
96701
96702+#ifdef CONFIG_GRKERNSEC_HIDESYM
96703+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
96704+#endif
96705+
96706 int nf_conntrack_init_net(struct net *net)
96707 {
96708 int ret;
96709@@ -1614,7 +1618,11 @@ int nf_conntrack_init_net(struct net *net)
96710 goto err_stat;
96711 }
96712
96713+#ifdef CONFIG_GRKERNSEC_HIDESYM
96714+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
96715+#else
96716 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
96717+#endif
96718 if (!net->ct.slabname) {
96719 ret = -ENOMEM;
96720 goto err_slabname;
96721diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
96722index 1df1761..ce8b88a 100644
96723--- a/net/netfilter/nf_conntrack_ecache.c
96724+++ b/net/netfilter/nf_conntrack_ecache.c
96725@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
96726 #ifdef CONFIG_SYSCTL
96727 static int nf_conntrack_event_init_sysctl(struct net *net)
96728 {
96729- struct ctl_table *table;
96730+ ctl_table_no_const *table;
96731
96732 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
96733 GFP_KERNEL);
96734diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
96735index 974a2a4..52cc6ff 100644
96736--- a/net/netfilter/nf_conntrack_helper.c
96737+++ b/net/netfilter/nf_conntrack_helper.c
96738@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
96739
96740 static int nf_conntrack_helper_init_sysctl(struct net *net)
96741 {
96742- struct ctl_table *table;
96743+ ctl_table_no_const *table;
96744
96745 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
96746 GFP_KERNEL);
96747diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
96748index ce30041..3861b5d 100644
96749--- a/net/netfilter/nf_conntrack_proto.c
96750+++ b/net/netfilter/nf_conntrack_proto.c
96751@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
96752
96753 static void
96754 nf_ct_unregister_sysctl(struct ctl_table_header **header,
96755- struct ctl_table **table,
96756+ ctl_table_no_const **table,
96757 unsigned int users)
96758 {
96759 if (users > 0)
96760diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
96761index a99b6c3..cb372f9 100644
96762--- a/net/netfilter/nf_conntrack_proto_dccp.c
96763+++ b/net/netfilter/nf_conntrack_proto_dccp.c
96764@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
96765 const char *msg;
96766 u_int8_t state;
96767
96768- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
96769+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
96770 BUG_ON(dh == NULL);
96771
96772 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
96773@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
96774 out_invalid:
96775 if (LOG_INVALID(net, IPPROTO_DCCP))
96776 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
96777- NULL, msg);
96778+ NULL, "%s", msg);
96779 return false;
96780 }
96781
96782@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
96783 u_int8_t type, old_state, new_state;
96784 enum ct_dccp_roles role;
96785
96786- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
96787+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
96788 BUG_ON(dh == NULL);
96789 type = dh->dccph_type;
96790
96791@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
96792 unsigned int cscov;
96793 const char *msg;
96794
96795- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
96796+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
96797 if (dh == NULL) {
96798 msg = "nf_ct_dccp: short packet ";
96799 goto out_invalid;
96800@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
96801
96802 out_invalid:
96803 if (LOG_INVALID(net, IPPROTO_DCCP))
96804- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
96805+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
96806 return -NF_ACCEPT;
96807 }
96808
96809diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
96810index f641751..d3c5b51 100644
96811--- a/net/netfilter/nf_conntrack_standalone.c
96812+++ b/net/netfilter/nf_conntrack_standalone.c
96813@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
96814
96815 static int nf_conntrack_standalone_init_sysctl(struct net *net)
96816 {
96817- struct ctl_table *table;
96818+ ctl_table_no_const *table;
96819
96820 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
96821 GFP_KERNEL);
96822diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
96823index 902fb0a..87f7fdb 100644
96824--- a/net/netfilter/nf_conntrack_timestamp.c
96825+++ b/net/netfilter/nf_conntrack_timestamp.c
96826@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
96827 #ifdef CONFIG_SYSCTL
96828 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
96829 {
96830- struct ctl_table *table;
96831+ ctl_table_no_const *table;
96832
96833 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
96834 GFP_KERNEL);
96835diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
96836index 85296d4..8becdec 100644
96837--- a/net/netfilter/nf_log.c
96838+++ b/net/netfilter/nf_log.c
96839@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
96840
96841 #ifdef CONFIG_SYSCTL
96842 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
96843-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
96844+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
96845
96846 static int nf_log_proc_dostring(struct ctl_table *table, int write,
96847 void __user *buffer, size_t *lenp, loff_t *ppos)
96848@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
96849 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
96850 mutex_unlock(&nf_log_mutex);
96851 } else {
96852+ ctl_table_no_const nf_log_table = *table;
96853+
96854 mutex_lock(&nf_log_mutex);
96855 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
96856 lockdep_is_held(&nf_log_mutex));
96857 if (!logger)
96858- table->data = "NONE";
96859+ nf_log_table.data = "NONE";
96860 else
96861- table->data = logger->name;
96862- r = proc_dostring(table, write, buffer, lenp, ppos);
96863+ nf_log_table.data = logger->name;
96864+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
96865 mutex_unlock(&nf_log_mutex);
96866 }
96867
96868diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
96869index f042ae5..30ea486 100644
96870--- a/net/netfilter/nf_sockopt.c
96871+++ b/net/netfilter/nf_sockopt.c
96872@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
96873 }
96874 }
96875
96876- list_add(&reg->list, &nf_sockopts);
96877+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
96878 out:
96879 mutex_unlock(&nf_sockopt_mutex);
96880 return ret;
96881@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
96882 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
96883 {
96884 mutex_lock(&nf_sockopt_mutex);
96885- list_del(&reg->list);
96886+ pax_list_del((struct list_head *)&reg->list);
96887 mutex_unlock(&nf_sockopt_mutex);
96888 }
96889 EXPORT_SYMBOL(nf_unregister_sockopt);
96890diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
96891index d92cc31..e46f350 100644
96892--- a/net/netfilter/nfnetlink_log.c
96893+++ b/net/netfilter/nfnetlink_log.c
96894@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
96895 struct nfnl_log_net {
96896 spinlock_t instances_lock;
96897 struct hlist_head instance_table[INSTANCE_BUCKETS];
96898- atomic_t global_seq;
96899+ atomic_unchecked_t global_seq;
96900 };
96901
96902 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
96903@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
96904 /* global sequence number */
96905 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
96906 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
96907- htonl(atomic_inc_return(&log->global_seq))))
96908+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
96909 goto nla_put_failure;
96910
96911 if (data_len) {
96912diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
96913new file mode 100644
96914index 0000000..c566332
96915--- /dev/null
96916+++ b/net/netfilter/xt_gradm.c
96917@@ -0,0 +1,51 @@
96918+/*
96919+ * gradm match for netfilter
96920