]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.13.2-201402090002.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.13.2-201402090002.patch
CommitLineData
8c97fdf8
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..9aa2627 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,32 +101,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -125,12 +142,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -145,14 +165,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -162,14 +182,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -185,6 +206,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -194,6 +217,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -203,7 +227,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.data
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -213,8 +242,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -224,6 +257,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -235,13 +269,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -249,9 +287,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b9e9bd8..bf49b92 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1033,6 +1033,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2018,6 +2022,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2285,6 +2293,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Makefile b/Makefile
290index a7fd5d9..84ed0df 100644
291--- a/Makefile
292+++ b/Makefile
293@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
294
295 HOSTCC = gcc
296 HOSTCXX = g++
297-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
298-HOSTCXXFLAGS = -O2
299+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
300+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
301+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
302
303 # Decide whether to build built-in, modular, or both.
304 # Normally, just do built-in.
305@@ -311,9 +312,15 @@ endif
306 # If the user is running make -s (silent mode), suppress echoing of
307 # commands
308
309+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
310+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
311+ quiet=silent_
312+endif
313+else # make-3.8x
314 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
315 quiet=silent_
316 endif
317+endif
318
319 export quiet Q KBUILD_VERBOSE
320
321@@ -417,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
322 # Rules shared between *config targets and build targets
323
324 # Basic helpers built in scripts/
325-PHONY += scripts_basic
326-scripts_basic:
327+PHONY += scripts_basic gcc-plugins
328+scripts_basic: gcc-plugins
329 $(Q)$(MAKE) $(build)=scripts/basic
330 $(Q)rm -f .tmp_quiet_recordmcount
331
332@@ -579,6 +586,74 @@ else
333 KBUILD_CFLAGS += -O2
334 endif
335
336+ifndef DISABLE_PAX_PLUGINS
337+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
338+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
339+else
340+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
341+endif
342+ifneq ($(PLUGINCC),)
343+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
344+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
345+endif
346+ifdef CONFIG_PAX_MEMORY_STACKLEAK
347+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
348+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
349+endif
350+ifdef CONFIG_KALLOCSTAT_PLUGIN
351+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
352+endif
353+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
354+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
355+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
356+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
357+endif
358+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
359+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
360+RANDSTRUCT_HASHED_SEED := $(shell cat "$(objtree)/tools/gcc/randomize_layout_hash.data")
361+RANDSTRUCT_PLUGIN_CFLAGS += -DRANDSTRUCT_HASHED_SEED="\"$(RANDSTRUCT_HASHED_SEED)\""
362+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
363+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
364+endif
365+endif
366+ifdef CONFIG_CHECKER_PLUGIN
367+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
368+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
369+endif
370+endif
371+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
372+ifdef CONFIG_PAX_SIZE_OVERFLOW
373+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
374+endif
375+ifdef CONFIG_PAX_LATENT_ENTROPY
376+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
377+endif
378+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
379+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
380+endif
381+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
382+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
383+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
384+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
385+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
386+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
387+ifeq ($(KBUILD_EXTMOD),)
388+gcc-plugins:
389+ $(Q)$(MAKE) $(build)=tools/gcc
390+else
391+gcc-plugins: ;
392+endif
393+else
394+gcc-plugins:
395+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
396+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
397+else
398+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
399+endif
400+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
401+endif
402+endif
403+
404 include $(srctree)/arch/$(SRCARCH)/Makefile
405
406 ifdef CONFIG_READABLE_ASM
407@@ -619,7 +694,7 @@ endif
408
409 ifdef CONFIG_DEBUG_INFO
410 KBUILD_CFLAGS += -g
411-KBUILD_AFLAGS += -gdwarf-2
412+KBUILD_AFLAGS += -Wa,--gdwarf-2
413 endif
414
415 ifdef CONFIG_DEBUG_INFO_REDUCED
416@@ -754,7 +829,7 @@ export mod_sign_cmd
417
418
419 ifeq ($(KBUILD_EXTMOD),)
420-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
421+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
422
423 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
424 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
425@@ -803,6 +878,8 @@ endif
426
427 # The actual objects are generated when descending,
428 # make sure no implicit rule kicks in
429+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
430+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
431 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
432
433 # Handle descending into subdirectories listed in $(vmlinux-dirs)
434@@ -812,7 +889,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
435 # Error messages still appears in the original language
436
437 PHONY += $(vmlinux-dirs)
438-$(vmlinux-dirs): prepare scripts
439+$(vmlinux-dirs): gcc-plugins prepare scripts
440 $(Q)$(MAKE) $(build)=$@
441
442 define filechk_kernel.release
443@@ -855,10 +932,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
444
445 archprepare: archheaders archscripts prepare1 scripts_basic
446
447+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
448+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
449 prepare0: archprepare FORCE
450 $(Q)$(MAKE) $(build)=.
451
452 # All the preparing..
453+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
454 prepare: prepare0
455
456 # Generate some files
457@@ -966,6 +1046,8 @@ all: modules
458 # using awk while concatenating to the final file.
459
460 PHONY += modules
461+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
462+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
463 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
464 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
465 @$(kecho) ' Building modules, stage 2.';
466@@ -981,7 +1063,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
467
468 # Target to prepare building external modules
469 PHONY += modules_prepare
470-modules_prepare: prepare scripts
471+modules_prepare: gcc-plugins prepare scripts
472
473 # Target to install modules
474 PHONY += modules_install
475@@ -1047,7 +1129,8 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
476 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
477 signing_key.priv signing_key.x509 x509.genkey \
478 extra_certificates signing_key.x509.keyid \
479- signing_key.x509.signer
480+ signing_key.x509.signer tools/gcc/size_overflow_hash.h \
481+ tools/gcc/randomize_layout_seed.h tools/gcc/randomize_layout_hash.data
482
483 # clean - Delete most, but leave enough to build external modules
484 #
485@@ -1087,6 +1170,7 @@ distclean: mrproper
486 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
487 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
488 -o -name '.*.rej' \
489+ -o -name '.*.rej' -o -name '*.so' \
490 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
491 -type f -print | xargs rm -f
492
493@@ -1248,6 +1332,8 @@ PHONY += $(module-dirs) modules
494 $(module-dirs): crmodverdir $(objtree)/Module.symvers
495 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
496
497+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
498+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
499 modules: $(module-dirs)
500 @$(kecho) ' Building modules, stage 2.';
501 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
502@@ -1387,17 +1473,21 @@ else
503 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
504 endif
505
506-%.s: %.c prepare scripts FORCE
507+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
508+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
509+%.s: %.c gcc-plugins prepare scripts FORCE
510 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
511 %.i: %.c prepare scripts FORCE
512 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
513-%.o: %.c prepare scripts FORCE
514+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516+%.o: %.c gcc-plugins prepare scripts FORCE
517 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
518 %.lst: %.c prepare scripts FORCE
519 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
520-%.s: %.S prepare scripts FORCE
521+%.s: %.S gcc-plugins prepare scripts FORCE
522 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
523-%.o: %.S prepare scripts FORCE
524+%.o: %.S gcc-plugins prepare scripts FORCE
525 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
526 %.symtypes: %.c prepare scripts FORCE
527 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
528@@ -1407,11 +1497,15 @@ endif
529 $(cmd_crmodverdir)
530 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
531 $(build)=$(build-dir)
532-%/: prepare scripts FORCE
533+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
534+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
535+%/: gcc-plugins prepare scripts FORCE
536 $(cmd_crmodverdir)
537 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
538 $(build)=$(build-dir)
539-%.ko: prepare scripts FORCE
540+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
541+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
542+%.ko: gcc-plugins prepare scripts FORCE
543 $(cmd_crmodverdir)
544 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
545 $(build)=$(build-dir) $(@:.ko=.o)
546diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
547index 78b03ef..da28a51 100644
548--- a/arch/alpha/include/asm/atomic.h
549+++ b/arch/alpha/include/asm/atomic.h
550@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
551 #define atomic_dec(v) atomic_sub(1,(v))
552 #define atomic64_dec(v) atomic64_sub(1,(v))
553
554+#define atomic64_read_unchecked(v) atomic64_read(v)
555+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
556+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
557+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
558+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
559+#define atomic64_inc_unchecked(v) atomic64_inc(v)
560+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
561+#define atomic64_dec_unchecked(v) atomic64_dec(v)
562+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
563+
564 #define smp_mb__before_atomic_dec() smp_mb()
565 #define smp_mb__after_atomic_dec() smp_mb()
566 #define smp_mb__before_atomic_inc() smp_mb()
567diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
568index ad368a9..fbe0f25 100644
569--- a/arch/alpha/include/asm/cache.h
570+++ b/arch/alpha/include/asm/cache.h
571@@ -4,19 +4,19 @@
572 #ifndef __ARCH_ALPHA_CACHE_H
573 #define __ARCH_ALPHA_CACHE_H
574
575+#include <linux/const.h>
576
577 /* Bytes per L1 (data) cache line. */
578 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
579-# define L1_CACHE_BYTES 64
580 # define L1_CACHE_SHIFT 6
581 #else
582 /* Both EV4 and EV5 are write-through, read-allocate,
583 direct-mapped, physical.
584 */
585-# define L1_CACHE_BYTES 32
586 # define L1_CACHE_SHIFT 5
587 #endif
588
589+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
590 #define SMP_CACHE_BYTES L1_CACHE_BYTES
591
592 #endif
593diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
594index 968d999..d36b2df 100644
595--- a/arch/alpha/include/asm/elf.h
596+++ b/arch/alpha/include/asm/elf.h
597@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
598
599 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
600
601+#ifdef CONFIG_PAX_ASLR
602+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
603+
604+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
605+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
606+#endif
607+
608 /* $0 is set by ld.so to a pointer to a function which might be
609 registered using atexit. This provides a mean for the dynamic
610 linker to call DT_FINI functions for shared libraries that have
611diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
612index aab14a0..b4fa3e7 100644
613--- a/arch/alpha/include/asm/pgalloc.h
614+++ b/arch/alpha/include/asm/pgalloc.h
615@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
616 pgd_set(pgd, pmd);
617 }
618
619+static inline void
620+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
621+{
622+ pgd_populate(mm, pgd, pmd);
623+}
624+
625 extern pgd_t *pgd_alloc(struct mm_struct *mm);
626
627 static inline void
628diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
629index d8f9b7e..f6222fa 100644
630--- a/arch/alpha/include/asm/pgtable.h
631+++ b/arch/alpha/include/asm/pgtable.h
632@@ -102,6 +102,17 @@ struct vm_area_struct;
633 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
634 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
635 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
636+
637+#ifdef CONFIG_PAX_PAGEEXEC
638+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
639+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
640+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
641+#else
642+# define PAGE_SHARED_NOEXEC PAGE_SHARED
643+# define PAGE_COPY_NOEXEC PAGE_COPY
644+# define PAGE_READONLY_NOEXEC PAGE_READONLY
645+#endif
646+
647 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
648
649 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
650diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
651index 2fd00b7..cfd5069 100644
652--- a/arch/alpha/kernel/module.c
653+++ b/arch/alpha/kernel/module.c
654@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
655
656 /* The small sections were sorted to the end of the segment.
657 The following should definitely cover them. */
658- gp = (u64)me->module_core + me->core_size - 0x8000;
659+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
660 got = sechdrs[me->arch.gotsecindex].sh_addr;
661
662 for (i = 0; i < n; i++) {
663diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
664index 1402fcc..0b1abd2 100644
665--- a/arch/alpha/kernel/osf_sys.c
666+++ b/arch/alpha/kernel/osf_sys.c
667@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
668 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
669
670 static unsigned long
671-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
672- unsigned long limit)
673+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
674+ unsigned long limit, unsigned long flags)
675 {
676 struct vm_unmapped_area_info info;
677+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
678
679 info.flags = 0;
680 info.length = len;
681@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
682 info.high_limit = limit;
683 info.align_mask = 0;
684 info.align_offset = 0;
685+ info.threadstack_offset = offset;
686 return vm_unmapped_area(&info);
687 }
688
689@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
690 merely specific addresses, but regions of memory -- perhaps
691 this feature should be incorporated into all ports? */
692
693+#ifdef CONFIG_PAX_RANDMMAP
694+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
695+#endif
696+
697 if (addr) {
698- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
699+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
700 if (addr != (unsigned long) -ENOMEM)
701 return addr;
702 }
703
704 /* Next, try allocating at TASK_UNMAPPED_BASE. */
705- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
706- len, limit);
707+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
708+
709 if (addr != (unsigned long) -ENOMEM)
710 return addr;
711
712 /* Finally, try allocating in low memory. */
713- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
714+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
715
716 return addr;
717 }
718diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
719index 98838a0..b304fb4 100644
720--- a/arch/alpha/mm/fault.c
721+++ b/arch/alpha/mm/fault.c
722@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
723 __reload_thread(pcb);
724 }
725
726+#ifdef CONFIG_PAX_PAGEEXEC
727+/*
728+ * PaX: decide what to do with offenders (regs->pc = fault address)
729+ *
730+ * returns 1 when task should be killed
731+ * 2 when patched PLT trampoline was detected
732+ * 3 when unpatched PLT trampoline was detected
733+ */
734+static int pax_handle_fetch_fault(struct pt_regs *regs)
735+{
736+
737+#ifdef CONFIG_PAX_EMUPLT
738+ int err;
739+
740+ do { /* PaX: patched PLT emulation #1 */
741+ unsigned int ldah, ldq, jmp;
742+
743+ err = get_user(ldah, (unsigned int *)regs->pc);
744+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
745+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
746+
747+ if (err)
748+ break;
749+
750+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
751+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
752+ jmp == 0x6BFB0000U)
753+ {
754+ unsigned long r27, addr;
755+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
756+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
757+
758+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
759+ err = get_user(r27, (unsigned long *)addr);
760+ if (err)
761+ break;
762+
763+ regs->r27 = r27;
764+ regs->pc = r27;
765+ return 2;
766+ }
767+ } while (0);
768+
769+ do { /* PaX: patched PLT emulation #2 */
770+ unsigned int ldah, lda, br;
771+
772+ err = get_user(ldah, (unsigned int *)regs->pc);
773+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
774+ err |= get_user(br, (unsigned int *)(regs->pc+8));
775+
776+ if (err)
777+ break;
778+
779+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
780+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
781+ (br & 0xFFE00000U) == 0xC3E00000U)
782+ {
783+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
784+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
785+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
786+
787+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
788+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
789+ return 2;
790+ }
791+ } while (0);
792+
793+ do { /* PaX: unpatched PLT emulation */
794+ unsigned int br;
795+
796+ err = get_user(br, (unsigned int *)regs->pc);
797+
798+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
799+ unsigned int br2, ldq, nop, jmp;
800+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
801+
802+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
803+ err = get_user(br2, (unsigned int *)addr);
804+ err |= get_user(ldq, (unsigned int *)(addr+4));
805+ err |= get_user(nop, (unsigned int *)(addr+8));
806+ err |= get_user(jmp, (unsigned int *)(addr+12));
807+ err |= get_user(resolver, (unsigned long *)(addr+16));
808+
809+ if (err)
810+ break;
811+
812+ if (br2 == 0xC3600000U &&
813+ ldq == 0xA77B000CU &&
814+ nop == 0x47FF041FU &&
815+ jmp == 0x6B7B0000U)
816+ {
817+ regs->r28 = regs->pc+4;
818+ regs->r27 = addr+16;
819+ regs->pc = resolver;
820+ return 3;
821+ }
822+ }
823+ } while (0);
824+#endif
825+
826+ return 1;
827+}
828+
829+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
830+{
831+ unsigned long i;
832+
833+ printk(KERN_ERR "PAX: bytes at PC: ");
834+ for (i = 0; i < 5; i++) {
835+ unsigned int c;
836+ if (get_user(c, (unsigned int *)pc+i))
837+ printk(KERN_CONT "???????? ");
838+ else
839+ printk(KERN_CONT "%08x ", c);
840+ }
841+ printk("\n");
842+}
843+#endif
844
845 /*
846 * This routine handles page faults. It determines the address,
847@@ -133,8 +251,29 @@ retry:
848 good_area:
849 si_code = SEGV_ACCERR;
850 if (cause < 0) {
851- if (!(vma->vm_flags & VM_EXEC))
852+ if (!(vma->vm_flags & VM_EXEC)) {
853+
854+#ifdef CONFIG_PAX_PAGEEXEC
855+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
856+ goto bad_area;
857+
858+ up_read(&mm->mmap_sem);
859+ switch (pax_handle_fetch_fault(regs)) {
860+
861+#ifdef CONFIG_PAX_EMUPLT
862+ case 2:
863+ case 3:
864+ return;
865+#endif
866+
867+ }
868+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
869+ do_group_exit(SIGKILL);
870+#else
871 goto bad_area;
872+#endif
873+
874+ }
875 } else if (!cause) {
876 /* Allow reads even for write-only mappings */
877 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
878diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
879index c1f1a7e..554b0cd 100644
880--- a/arch/arm/Kconfig
881+++ b/arch/arm/Kconfig
882@@ -1828,7 +1828,7 @@ config ALIGNMENT_TRAP
883
884 config UACCESS_WITH_MEMCPY
885 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
886- depends on MMU
887+ depends on MMU && !PAX_MEMORY_UDEREF
888 default y if CPU_FEROCEON
889 help
890 Implement faster copy_to_user and clear_user methods for CPU
891@@ -2100,6 +2100,7 @@ config XIP_PHYS_ADDR
892 config KEXEC
893 bool "Kexec system call (EXPERIMENTAL)"
894 depends on (!SMP || PM_SLEEP_SMP)
895+ depends on !GRKERNSEC_KMEM
896 help
897 kexec is a system call that implements the ability to shutdown your
898 current kernel, and to start another kernel. It is like a reboot
899diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
900index 62d2cb5..7a13651 100644
901--- a/arch/arm/include/asm/atomic.h
902+++ b/arch/arm/include/asm/atomic.h
903@@ -18,17 +18,35 @@
904 #include <asm/barrier.h>
905 #include <asm/cmpxchg.h>
906
907+#ifdef CONFIG_GENERIC_ATOMIC64
908+#include <asm-generic/atomic64.h>
909+#endif
910+
911 #define ATOMIC_INIT(i) { (i) }
912
913 #ifdef __KERNEL__
914
915+#define _ASM_EXTABLE(from, to) \
916+" .pushsection __ex_table,\"a\"\n"\
917+" .align 3\n" \
918+" .long " #from ", " #to"\n" \
919+" .popsection"
920+
921 /*
922 * On ARM, ordinary assignment (str instruction) doesn't clear the local
923 * strex/ldrex monitor on some implementations. The reason we can use it for
924 * atomic_set() is the clrex or dummy strex done on every exception return.
925 */
926 #define atomic_read(v) (*(volatile int *)&(v)->counter)
927+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
928+{
929+ return v->counter;
930+}
931 #define atomic_set(v,i) (((v)->counter) = (i))
932+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
933+{
934+ v->counter = i;
935+}
936
937 #if __LINUX_ARM_ARCH__ >= 6
938
939@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
940
941 prefetchw(&v->counter);
942 __asm__ __volatile__("@ atomic_add\n"
943+"1: ldrex %1, [%3]\n"
944+" adds %0, %1, %4\n"
945+
946+#ifdef CONFIG_PAX_REFCOUNT
947+" bvc 3f\n"
948+"2: bkpt 0xf103\n"
949+"3:\n"
950+#endif
951+
952+" strex %1, %0, [%3]\n"
953+" teq %1, #0\n"
954+" bne 1b"
955+
956+#ifdef CONFIG_PAX_REFCOUNT
957+"\n4:\n"
958+ _ASM_EXTABLE(2b, 4b)
959+#endif
960+
961+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
962+ : "r" (&v->counter), "Ir" (i)
963+ : "cc");
964+}
965+
966+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
967+{
968+ unsigned long tmp;
969+ int result;
970+
971+ prefetchw(&v->counter);
972+ __asm__ __volatile__("@ atomic_add_unchecked\n"
973 "1: ldrex %0, [%3]\n"
974 " add %0, %0, %4\n"
975 " strex %1, %0, [%3]\n"
976@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
977 smp_mb();
978
979 __asm__ __volatile__("@ atomic_add_return\n"
980+"1: ldrex %1, [%3]\n"
981+" adds %0, %1, %4\n"
982+
983+#ifdef CONFIG_PAX_REFCOUNT
984+" bvc 3f\n"
985+" mov %0, %1\n"
986+"2: bkpt 0xf103\n"
987+"3:\n"
988+#endif
989+
990+" strex %1, %0, [%3]\n"
991+" teq %1, #0\n"
992+" bne 1b"
993+
994+#ifdef CONFIG_PAX_REFCOUNT
995+"\n4:\n"
996+ _ASM_EXTABLE(2b, 4b)
997+#endif
998+
999+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1000+ : "r" (&v->counter), "Ir" (i)
1001+ : "cc");
1002+
1003+ smp_mb();
1004+
1005+ return result;
1006+}
1007+
1008+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1009+{
1010+ unsigned long tmp;
1011+ int result;
1012+
1013+ smp_mb();
1014+
1015+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1016 "1: ldrex %0, [%3]\n"
1017 " add %0, %0, %4\n"
1018 " strex %1, %0, [%3]\n"
1019@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1020
1021 prefetchw(&v->counter);
1022 __asm__ __volatile__("@ atomic_sub\n"
1023+"1: ldrex %1, [%3]\n"
1024+" subs %0, %1, %4\n"
1025+
1026+#ifdef CONFIG_PAX_REFCOUNT
1027+" bvc 3f\n"
1028+"2: bkpt 0xf103\n"
1029+"3:\n"
1030+#endif
1031+
1032+" strex %1, %0, [%3]\n"
1033+" teq %1, #0\n"
1034+" bne 1b"
1035+
1036+#ifdef CONFIG_PAX_REFCOUNT
1037+"\n4:\n"
1038+ _ASM_EXTABLE(2b, 4b)
1039+#endif
1040+
1041+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1042+ : "r" (&v->counter), "Ir" (i)
1043+ : "cc");
1044+}
1045+
1046+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1047+{
1048+ unsigned long tmp;
1049+ int result;
1050+
1051+ prefetchw(&v->counter);
1052+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1053 "1: ldrex %0, [%3]\n"
1054 " sub %0, %0, %4\n"
1055 " strex %1, %0, [%3]\n"
1056@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1057 smp_mb();
1058
1059 __asm__ __volatile__("@ atomic_sub_return\n"
1060-"1: ldrex %0, [%3]\n"
1061-" sub %0, %0, %4\n"
1062+"1: ldrex %1, [%3]\n"
1063+" subs %0, %1, %4\n"
1064+
1065+#ifdef CONFIG_PAX_REFCOUNT
1066+" bvc 3f\n"
1067+" mov %0, %1\n"
1068+"2: bkpt 0xf103\n"
1069+"3:\n"
1070+#endif
1071+
1072 " strex %1, %0, [%3]\n"
1073 " teq %1, #0\n"
1074 " bne 1b"
1075+
1076+#ifdef CONFIG_PAX_REFCOUNT
1077+"\n4:\n"
1078+ _ASM_EXTABLE(2b, 4b)
1079+#endif
1080+
1081 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1082 : "r" (&v->counter), "Ir" (i)
1083 : "cc");
1084@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1085 return oldval;
1086 }
1087
1088+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1089+{
1090+ unsigned long oldval, res;
1091+
1092+ smp_mb();
1093+
1094+ do {
1095+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1096+ "ldrex %1, [%3]\n"
1097+ "mov %0, #0\n"
1098+ "teq %1, %4\n"
1099+ "strexeq %0, %5, [%3]\n"
1100+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1101+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1102+ : "cc");
1103+ } while (res);
1104+
1105+ smp_mb();
1106+
1107+ return oldval;
1108+}
1109+
1110 #else /* ARM_ARCH_6 */
1111
1112 #ifdef CONFIG_SMP
1113@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1114
1115 return val;
1116 }
1117+
1118+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1119+{
1120+ return atomic_add_return(i, v);
1121+}
1122+
1123 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1124+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1125+{
1126+ (void) atomic_add_return(i, v);
1127+}
1128
1129 static inline int atomic_sub_return(int i, atomic_t *v)
1130 {
1131@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1132 return val;
1133 }
1134 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1135+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1136+{
1137+ (void) atomic_sub_return(i, v);
1138+}
1139
1140 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1141 {
1142@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1143 return ret;
1144 }
1145
1146+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1147+{
1148+ return atomic_cmpxchg(v, old, new);
1149+}
1150+
1151 #endif /* __LINUX_ARM_ARCH__ */
1152
1153 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1154+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1155+{
1156+ return xchg(&v->counter, new);
1157+}
1158
1159 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1160 {
1161@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1162 }
1163
1164 #define atomic_inc(v) atomic_add(1, v)
1165+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1166+{
1167+ atomic_add_unchecked(1, v);
1168+}
1169 #define atomic_dec(v) atomic_sub(1, v)
1170+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1171+{
1172+ atomic_sub_unchecked(1, v);
1173+}
1174
1175 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1176+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1177+{
1178+ return atomic_add_return_unchecked(1, v) == 0;
1179+}
1180 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1181 #define atomic_inc_return(v) (atomic_add_return(1, v))
1182+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1183+{
1184+ return atomic_add_return_unchecked(1, v);
1185+}
1186 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1187 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1188
1189@@ -221,6 +410,14 @@ typedef struct {
1190 long long counter;
1191 } atomic64_t;
1192
1193+#ifdef CONFIG_PAX_REFCOUNT
1194+typedef struct {
1195+ long long counter;
1196+} atomic64_unchecked_t;
1197+#else
1198+typedef atomic64_t atomic64_unchecked_t;
1199+#endif
1200+
1201 #define ATOMIC64_INIT(i) { (i) }
1202
1203 #ifdef CONFIG_ARM_LPAE
1204@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1205 return result;
1206 }
1207
1208+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1209+{
1210+ long long result;
1211+
1212+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1213+" ldrd %0, %H0, [%1]"
1214+ : "=&r" (result)
1215+ : "r" (&v->counter), "Qo" (v->counter)
1216+ );
1217+
1218+ return result;
1219+}
1220+
1221 static inline void atomic64_set(atomic64_t *v, long long i)
1222 {
1223 __asm__ __volatile__("@ atomic64_set\n"
1224@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1225 : "r" (&v->counter), "r" (i)
1226 );
1227 }
1228+
1229+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1230+{
1231+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1232+" strd %2, %H2, [%1]"
1233+ : "=Qo" (v->counter)
1234+ : "r" (&v->counter), "r" (i)
1235+ );
1236+}
1237 #else
1238 static inline long long atomic64_read(const atomic64_t *v)
1239 {
1240@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1241 return result;
1242 }
1243
1244+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1245+{
1246+ long long result;
1247+
1248+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1249+" ldrexd %0, %H0, [%1]"
1250+ : "=&r" (result)
1251+ : "r" (&v->counter), "Qo" (v->counter)
1252+ );
1253+
1254+ return result;
1255+}
1256+
1257 static inline void atomic64_set(atomic64_t *v, long long i)
1258 {
1259 long long tmp;
1260@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1261 : "r" (&v->counter), "r" (i)
1262 : "cc");
1263 }
1264+
1265+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1266+{
1267+ long long tmp;
1268+
1269+ prefetchw(&v->counter);
1270+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1271+"1: ldrexd %0, %H0, [%2]\n"
1272+" strexd %0, %3, %H3, [%2]\n"
1273+" teq %0, #0\n"
1274+" bne 1b"
1275+ : "=&r" (tmp), "=Qo" (v->counter)
1276+ : "r" (&v->counter), "r" (i)
1277+ : "cc");
1278+}
1279 #endif
1280
1281 static inline void atomic64_add(long long i, atomic64_t *v)
1282@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1283 __asm__ __volatile__("@ atomic64_add\n"
1284 "1: ldrexd %0, %H0, [%3]\n"
1285 " adds %Q0, %Q0, %Q4\n"
1286+" adcs %R0, %R0, %R4\n"
1287+
1288+#ifdef CONFIG_PAX_REFCOUNT
1289+" bvc 3f\n"
1290+"2: bkpt 0xf103\n"
1291+"3:\n"
1292+#endif
1293+
1294+" strexd %1, %0, %H0, [%3]\n"
1295+" teq %1, #0\n"
1296+" bne 1b"
1297+
1298+#ifdef CONFIG_PAX_REFCOUNT
1299+"\n4:\n"
1300+ _ASM_EXTABLE(2b, 4b)
1301+#endif
1302+
1303+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1304+ : "r" (&v->counter), "r" (i)
1305+ : "cc");
1306+}
1307+
1308+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1309+{
1310+ long long result;
1311+ unsigned long tmp;
1312+
1313+ prefetchw(&v->counter);
1314+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1315+"1: ldrexd %0, %H0, [%3]\n"
1316+" adds %Q0, %Q0, %Q4\n"
1317 " adc %R0, %R0, %R4\n"
1318 " strexd %1, %0, %H0, [%3]\n"
1319 " teq %1, #0\n"
1320@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1321 __asm__ __volatile__("@ atomic64_add_return\n"
1322 "1: ldrexd %0, %H0, [%3]\n"
1323 " adds %Q0, %Q0, %Q4\n"
1324+" adcs %R0, %R0, %R4\n"
1325+
1326+#ifdef CONFIG_PAX_REFCOUNT
1327+" bvc 3f\n"
1328+" mov %0, %1\n"
1329+" mov %H0, %H1\n"
1330+"2: bkpt 0xf103\n"
1331+"3:\n"
1332+#endif
1333+
1334+" strexd %1, %0, %H0, [%3]\n"
1335+" teq %1, #0\n"
1336+" bne 1b"
1337+
1338+#ifdef CONFIG_PAX_REFCOUNT
1339+"\n4:\n"
1340+ _ASM_EXTABLE(2b, 4b)
1341+#endif
1342+
1343+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1344+ : "r" (&v->counter), "r" (i)
1345+ : "cc");
1346+
1347+ smp_mb();
1348+
1349+ return result;
1350+}
1351+
1352+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1353+{
1354+ long long result;
1355+ unsigned long tmp;
1356+
1357+ smp_mb();
1358+
1359+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1360+"1: ldrexd %0, %H0, [%3]\n"
1361+" adds %Q0, %Q0, %Q4\n"
1362 " adc %R0, %R0, %R4\n"
1363 " strexd %1, %0, %H0, [%3]\n"
1364 " teq %1, #0\n"
1365@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1366 __asm__ __volatile__("@ atomic64_sub\n"
1367 "1: ldrexd %0, %H0, [%3]\n"
1368 " subs %Q0, %Q0, %Q4\n"
1369+" sbcs %R0, %R0, %R4\n"
1370+
1371+#ifdef CONFIG_PAX_REFCOUNT
1372+" bvc 3f\n"
1373+"2: bkpt 0xf103\n"
1374+"3:\n"
1375+#endif
1376+
1377+" strexd %1, %0, %H0, [%3]\n"
1378+" teq %1, #0\n"
1379+" bne 1b"
1380+
1381+#ifdef CONFIG_PAX_REFCOUNT
1382+"\n4:\n"
1383+ _ASM_EXTABLE(2b, 4b)
1384+#endif
1385+
1386+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1387+ : "r" (&v->counter), "r" (i)
1388+ : "cc");
1389+}
1390+
1391+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1392+{
1393+ long long result;
1394+ unsigned long tmp;
1395+
1396+ prefetchw(&v->counter);
1397+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1398+"1: ldrexd %0, %H0, [%3]\n"
1399+" subs %Q0, %Q0, %Q4\n"
1400 " sbc %R0, %R0, %R4\n"
1401 " strexd %1, %0, %H0, [%3]\n"
1402 " teq %1, #0\n"
1403@@ -344,17 +691,28 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1404 __asm__ __volatile__("@ atomic64_sub_return\n"
1405 "1: ldrexd %0, %H0, [%3]\n"
1406 " subs %Q0, %Q0, %Q4\n"
1407-" sbc %R0, %R0, %R4\n"
1408+" sbcs %R0, %R0, %R4\n"
1409+
1410+#ifdef CONFIG_PAX_REFCOUNT
1411+" bvc 3f\n"
1412+" mov %0, %1\n"
1413+" mov %H0, %H1\n"
1414+"2: bkpt 0xf103\n"
1415+"3:\n"
1416+#endif
1417+
1418 " strexd %1, %0, %H0, [%3]\n"
1419 " teq %1, #0\n"
1420 " bne 1b"
1421+
1422+#ifdef CONFIG_PAX_REFCOUNT
1423+"\n4:\n"
1424+ _ASM_EXTABLE(2b, 4b)
1425+#endif
1426+
1427 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1428 : "r" (&v->counter), "r" (i)
1429 : "cc");
1430-
1431- smp_mb();
1432-
1433- return result;
1434 }
1435
1436 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1437@@ -382,6 +740,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1438 return oldval;
1439 }
1440
1441+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1442+ long long new)
1443+{
1444+ long long oldval;
1445+ unsigned long res;
1446+
1447+ smp_mb();
1448+
1449+ do {
1450+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1451+ "ldrexd %1, %H1, [%3]\n"
1452+ "mov %0, #0\n"
1453+ "teq %1, %4\n"
1454+ "teqeq %H1, %H4\n"
1455+ "strexdeq %0, %5, %H5, [%3]"
1456+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1457+ : "r" (&ptr->counter), "r" (old), "r" (new)
1458+ : "cc");
1459+ } while (res);
1460+
1461+ smp_mb();
1462+
1463+ return oldval;
1464+}
1465+
1466 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1467 {
1468 long long result;
1469@@ -406,20 +789,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1470 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1471 {
1472 long long result;
1473- unsigned long tmp;
1474+ u64 tmp;
1475
1476 smp_mb();
1477
1478 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1479-"1: ldrexd %0, %H0, [%3]\n"
1480-" subs %Q0, %Q0, #1\n"
1481-" sbc %R0, %R0, #0\n"
1482+"1: ldrexd %1, %H1, [%3]\n"
1483+" subs %Q0, %Q1, #1\n"
1484+" sbcs %R0, %R1, #0\n"
1485+
1486+#ifdef CONFIG_PAX_REFCOUNT
1487+" bvc 3f\n"
1488+" mov %Q0, %Q1\n"
1489+" mov %R0, %R1\n"
1490+"2: bkpt 0xf103\n"
1491+"3:\n"
1492+#endif
1493+
1494 " teq %R0, #0\n"
1495-" bmi 2f\n"
1496+" bmi 4f\n"
1497 " strexd %1, %0, %H0, [%3]\n"
1498 " teq %1, #0\n"
1499 " bne 1b\n"
1500-"2:"
1501+"4:\n"
1502+
1503+#ifdef CONFIG_PAX_REFCOUNT
1504+ _ASM_EXTABLE(2b, 4b)
1505+#endif
1506+
1507 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1508 : "r" (&v->counter)
1509 : "cc");
1510@@ -442,13 +839,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1511 " teq %0, %5\n"
1512 " teqeq %H0, %H5\n"
1513 " moveq %1, #0\n"
1514-" beq 2f\n"
1515+" beq 4f\n"
1516 " adds %Q0, %Q0, %Q6\n"
1517-" adc %R0, %R0, %R6\n"
1518+" adcs %R0, %R0, %R6\n"
1519+
1520+#ifdef CONFIG_PAX_REFCOUNT
1521+" bvc 3f\n"
1522+"2: bkpt 0xf103\n"
1523+"3:\n"
1524+#endif
1525+
1526 " strexd %2, %0, %H0, [%4]\n"
1527 " teq %2, #0\n"
1528 " bne 1b\n"
1529-"2:"
1530+"4:\n"
1531+
1532+#ifdef CONFIG_PAX_REFCOUNT
1533+ _ASM_EXTABLE(2b, 4b)
1534+#endif
1535+
1536 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1537 : "r" (&v->counter), "r" (u), "r" (a)
1538 : "cc");
1539@@ -461,10 +870,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1540
1541 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1542 #define atomic64_inc(v) atomic64_add(1LL, (v))
1543+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1544 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1545+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1546 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1547 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1548 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1549+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1550 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1551 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1552 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1553diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1554index 75fe66b..ba3dee4 100644
1555--- a/arch/arm/include/asm/cache.h
1556+++ b/arch/arm/include/asm/cache.h
1557@@ -4,8 +4,10 @@
1558 #ifndef __ASMARM_CACHE_H
1559 #define __ASMARM_CACHE_H
1560
1561+#include <linux/const.h>
1562+
1563 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1564-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1565+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1566
1567 /*
1568 * Memory returned by kmalloc() may be used for DMA, so we must make
1569@@ -24,5 +26,6 @@
1570 #endif
1571
1572 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1573+#define __read_only __attribute__ ((__section__(".data..read_only")))
1574
1575 #endif
1576diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1577index ee753f1..c9c30a5 100644
1578--- a/arch/arm/include/asm/cacheflush.h
1579+++ b/arch/arm/include/asm/cacheflush.h
1580@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1581 void (*dma_unmap_area)(const void *, size_t, int);
1582
1583 void (*dma_flush_range)(const void *, const void *);
1584-};
1585+} __no_const;
1586
1587 /*
1588 * Select the calling method
1589diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1590index 6dcc164..b14d917 100644
1591--- a/arch/arm/include/asm/checksum.h
1592+++ b/arch/arm/include/asm/checksum.h
1593@@ -37,7 +37,19 @@ __wsum
1594 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1595
1596 __wsum
1597-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1598+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1599+
1600+static inline __wsum
1601+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1602+{
1603+ __wsum ret;
1604+ pax_open_userland();
1605+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1606+ pax_close_userland();
1607+ return ret;
1608+}
1609+
1610+
1611
1612 /*
1613 * Fold a partial checksum without adding pseudo headers
1614diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1615index df2fbba..63fe3e1 100644
1616--- a/arch/arm/include/asm/cmpxchg.h
1617+++ b/arch/arm/include/asm/cmpxchg.h
1618@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1619
1620 #define xchg(ptr,x) \
1621 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1622+#define xchg_unchecked(ptr,x) \
1623+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1624
1625 #include <asm-generic/cmpxchg-local.h>
1626
1627diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1628index 6ddbe44..b5e38b1 100644
1629--- a/arch/arm/include/asm/domain.h
1630+++ b/arch/arm/include/asm/domain.h
1631@@ -48,18 +48,37 @@
1632 * Domain types
1633 */
1634 #define DOMAIN_NOACCESS 0
1635-#define DOMAIN_CLIENT 1
1636 #ifdef CONFIG_CPU_USE_DOMAINS
1637+#define DOMAIN_USERCLIENT 1
1638+#define DOMAIN_KERNELCLIENT 1
1639 #define DOMAIN_MANAGER 3
1640+#define DOMAIN_VECTORS DOMAIN_USER
1641 #else
1642+
1643+#ifdef CONFIG_PAX_KERNEXEC
1644 #define DOMAIN_MANAGER 1
1645+#define DOMAIN_KERNEXEC 3
1646+#else
1647+#define DOMAIN_MANAGER 1
1648+#endif
1649+
1650+#ifdef CONFIG_PAX_MEMORY_UDEREF
1651+#define DOMAIN_USERCLIENT 0
1652+#define DOMAIN_UDEREF 1
1653+#define DOMAIN_VECTORS DOMAIN_KERNEL
1654+#else
1655+#define DOMAIN_USERCLIENT 1
1656+#define DOMAIN_VECTORS DOMAIN_USER
1657+#endif
1658+#define DOMAIN_KERNELCLIENT 1
1659+
1660 #endif
1661
1662 #define domain_val(dom,type) ((type) << (2*(dom)))
1663
1664 #ifndef __ASSEMBLY__
1665
1666-#ifdef CONFIG_CPU_USE_DOMAINS
1667+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1668 static inline void set_domain(unsigned val)
1669 {
1670 asm volatile(
1671@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1672 isb();
1673 }
1674
1675-#define modify_domain(dom,type) \
1676- do { \
1677- struct thread_info *thread = current_thread_info(); \
1678- unsigned int domain = thread->cpu_domain; \
1679- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1680- thread->cpu_domain = domain | domain_val(dom, type); \
1681- set_domain(thread->cpu_domain); \
1682- } while (0)
1683-
1684+extern void modify_domain(unsigned int dom, unsigned int type);
1685 #else
1686 static inline void set_domain(unsigned val) { }
1687 static inline void modify_domain(unsigned dom, unsigned type) { }
1688diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1689index f4b46d3..abc9b2b 100644
1690--- a/arch/arm/include/asm/elf.h
1691+++ b/arch/arm/include/asm/elf.h
1692@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1693 the loader. We need to make sure that it is out of the way of the program
1694 that it will "exec", and that there is sufficient room for the brk. */
1695
1696-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1697+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1698+
1699+#ifdef CONFIG_PAX_ASLR
1700+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1701+
1702+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1703+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1704+#endif
1705
1706 /* When the program starts, a1 contains a pointer to a function to be
1707 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1708@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1709 extern void elf_set_personality(const struct elf32_hdr *);
1710 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1711
1712-struct mm_struct;
1713-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1714-#define arch_randomize_brk arch_randomize_brk
1715-
1716 #ifdef CONFIG_MMU
1717 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1718 struct linux_binprm;
1719diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1720index de53547..52b9a28 100644
1721--- a/arch/arm/include/asm/fncpy.h
1722+++ b/arch/arm/include/asm/fncpy.h
1723@@ -81,7 +81,9 @@
1724 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1725 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1726 \
1727+ pax_open_kernel(); \
1728 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1729+ pax_close_kernel(); \
1730 flush_icache_range((unsigned long)(dest_buf), \
1731 (unsigned long)(dest_buf) + (size)); \
1732 \
1733diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1734index e42cf59..7b94b8f 100644
1735--- a/arch/arm/include/asm/futex.h
1736+++ b/arch/arm/include/asm/futex.h
1737@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1738 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1739 return -EFAULT;
1740
1741+ pax_open_userland();
1742+
1743 smp_mb();
1744 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1745 "1: ldrex %1, [%4]\n"
1746@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1747 : "cc", "memory");
1748 smp_mb();
1749
1750+ pax_close_userland();
1751+
1752 *uval = val;
1753 return ret;
1754 }
1755@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1756 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1757 return -EFAULT;
1758
1759+ pax_open_userland();
1760+
1761 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1762 "1: " TUSER(ldr) " %1, [%4]\n"
1763 " teq %1, %2\n"
1764@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1765 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1766 : "cc", "memory");
1767
1768+ pax_close_userland();
1769+
1770 *uval = val;
1771 return ret;
1772 }
1773@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 return -EFAULT;
1775
1776 pagefault_disable(); /* implies preempt_disable() */
1777+ pax_open_userland();
1778
1779 switch (op) {
1780 case FUTEX_OP_SET:
1781@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1782 ret = -ENOSYS;
1783 }
1784
1785+ pax_close_userland();
1786 pagefault_enable(); /* subsumes preempt_enable() */
1787
1788 if (!ret) {
1789diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1790index 83eb2f7..ed77159 100644
1791--- a/arch/arm/include/asm/kmap_types.h
1792+++ b/arch/arm/include/asm/kmap_types.h
1793@@ -4,6 +4,6 @@
1794 /*
1795 * This is the "bare minimum". AIO seems to require this.
1796 */
1797-#define KM_TYPE_NR 16
1798+#define KM_TYPE_NR 17
1799
1800 #endif
1801diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1802index 9e614a1..3302cca 100644
1803--- a/arch/arm/include/asm/mach/dma.h
1804+++ b/arch/arm/include/asm/mach/dma.h
1805@@ -22,7 +22,7 @@ struct dma_ops {
1806 int (*residue)(unsigned int, dma_t *); /* optional */
1807 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1808 const char *type;
1809-};
1810+} __do_const;
1811
1812 struct dma_struct {
1813 void *addr; /* single DMA address */
1814diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1815index 2fe141f..192dc01 100644
1816--- a/arch/arm/include/asm/mach/map.h
1817+++ b/arch/arm/include/asm/mach/map.h
1818@@ -27,13 +27,16 @@ struct map_desc {
1819 #define MT_MINICLEAN 6
1820 #define MT_LOW_VECTORS 7
1821 #define MT_HIGH_VECTORS 8
1822-#define MT_MEMORY 9
1823+#define MT_MEMORY_RWX 9
1824 #define MT_ROM 10
1825-#define MT_MEMORY_NONCACHED 11
1826+#define MT_MEMORY_NONCACHED_RX 11
1827 #define MT_MEMORY_DTCM 12
1828 #define MT_MEMORY_ITCM 13
1829 #define MT_MEMORY_SO 14
1830 #define MT_MEMORY_DMA_READY 15
1831+#define MT_MEMORY_RW 16
1832+#define MT_MEMORY_RX 17
1833+#define MT_MEMORY_NONCACHED_RW 18
1834
1835 #ifdef CONFIG_MMU
1836 extern void iotable_init(struct map_desc *, int);
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index f94784f..9a09a4a 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -35,7 +35,7 @@ struct outer_cache_fns {
1842 #endif
1843 void (*set_debug)(unsigned long);
1844 void (*resume)(void);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..c229913 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -114,7 +114,7 @@ struct cpu_user_fns {
1855 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1856 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1857 unsigned long vaddr, struct vm_area_struct *vma);
1858-};
1859+} __no_const;
1860
1861 #ifdef MULTI_USER
1862 extern struct cpu_user_fns cpu_user;
1863diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1864index 78a7793..e3dc06c 100644
1865--- a/arch/arm/include/asm/pgalloc.h
1866+++ b/arch/arm/include/asm/pgalloc.h
1867@@ -17,6 +17,7 @@
1868 #include <asm/processor.h>
1869 #include <asm/cacheflush.h>
1870 #include <asm/tlbflush.h>
1871+#include <asm/system_info.h>
1872
1873 #define check_pgt_cache() do { } while (0)
1874
1875@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1876 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1877 }
1878
1879+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1880+{
1881+ pud_populate(mm, pud, pmd);
1882+}
1883+
1884 #else /* !CONFIG_ARM_LPAE */
1885
1886 /*
1887@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1889 #define pmd_free(mm, pmd) do { } while (0)
1890 #define pud_populate(mm,pmd,pte) BUG()
1891+#define pud_populate_kernel(mm,pmd,pte) BUG()
1892
1893 #endif /* CONFIG_ARM_LPAE */
1894
1895@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1896 __free_page(pte);
1897 }
1898
1899+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1900+{
1901+#ifdef CONFIG_ARM_LPAE
1902+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1903+#else
1904+ if (addr & SECTION_SIZE)
1905+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1906+ else
1907+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908+#endif
1909+ flush_pmd_entry(pmdp);
1910+}
1911+
1912 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1913 pmdval_t prot)
1914 {
1915@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1916 static inline void
1917 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1918 {
1919- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1920+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1921 }
1922 #define pmd_pgtable(pmd) pmd_page(pmd)
1923
1924diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1925index 5cfba15..f415e1a 100644
1926--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1927+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1928@@ -20,12 +20,15 @@
1929 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1930 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1931 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1932+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1933 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1934 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1935 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1936+
1937 /*
1938 * - section
1939 */
1940+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1941 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1942 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1943 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1944@@ -37,6 +40,7 @@
1945 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1946 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1947 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1948+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1949
1950 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1951 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1952@@ -66,6 +70,7 @@
1953 * - extended small page/tiny page
1954 */
1955 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1956+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1957 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1958 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1959 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1960diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1961index 86a659a..70e0120 100644
1962--- a/arch/arm/include/asm/pgtable-2level.h
1963+++ b/arch/arm/include/asm/pgtable-2level.h
1964@@ -126,6 +126,9 @@
1965 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1966 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1967
1968+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1969+#define L_PTE_PXN (_AT(pteval_t, 0))
1970+
1971 /*
1972 * These are the memory types, defined to be compatible with
1973 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1974diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1975index 626989f..9d67a33 100644
1976--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1977+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1978@@ -75,6 +75,7 @@
1979 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1980 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1981 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1982+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1983 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1984
1985 /*
1986diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1987index 4f95039..b2dd513 100644
1988--- a/arch/arm/include/asm/pgtable-3level.h
1989+++ b/arch/arm/include/asm/pgtable-3level.h
1990@@ -82,6 +82,7 @@
1991 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1992 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1993 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1994+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1995 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1996 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1997 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1998@@ -95,6 +96,7 @@
1999 /*
2000 * To be used in assembly code with the upper page attributes.
2001 */
2002+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2003 #define L_PTE_XN_HIGH (1 << (54 - 32))
2004 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2005
2006diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2007index 1571d12..b8a9b43 100644
2008--- a/arch/arm/include/asm/pgtable.h
2009+++ b/arch/arm/include/asm/pgtable.h
2010@@ -33,6 +33,9 @@
2011 #include <asm/pgtable-2level.h>
2012 #endif
2013
2014+#define ktla_ktva(addr) (addr)
2015+#define ktva_ktla(addr) (addr)
2016+
2017 /*
2018 * Just any arbitrary offset to the start of the vmalloc VM area: the
2019 * current 8MB value just means that there will be a 8MB "hole" after the
2020@@ -48,6 +51,9 @@
2021 #define LIBRARY_TEXT_START 0x0c000000
2022
2023 #ifndef __ASSEMBLY__
2024+extern pteval_t __supported_pte_mask;
2025+extern pmdval_t __supported_pmd_mask;
2026+
2027 extern void __pte_error(const char *file, int line, pte_t);
2028 extern void __pmd_error(const char *file, int line, pmd_t);
2029 extern void __pgd_error(const char *file, int line, pgd_t);
2030@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2031 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2032 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2033
2034+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2035+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2036+
2037+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2038+#include <asm/domain.h>
2039+#include <linux/thread_info.h>
2040+#include <linux/preempt.h>
2041+
2042+static inline int test_domain(int domain, int domaintype)
2043+{
2044+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2045+}
2046+#endif
2047+
2048+#ifdef CONFIG_PAX_KERNEXEC
2049+static inline unsigned long pax_open_kernel(void) {
2050+#ifdef CONFIG_ARM_LPAE
2051+ /* TODO */
2052+#else
2053+ preempt_disable();
2054+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2055+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2056+#endif
2057+ return 0;
2058+}
2059+
2060+static inline unsigned long pax_close_kernel(void) {
2061+#ifdef CONFIG_ARM_LPAE
2062+ /* TODO */
2063+#else
2064+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2065+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2066+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2067+ preempt_enable_no_resched();
2068+#endif
2069+ return 0;
2070+}
2071+#else
2072+static inline unsigned long pax_open_kernel(void) { return 0; }
2073+static inline unsigned long pax_close_kernel(void) { return 0; }
2074+#endif
2075+
2076 /*
2077 * This is the lowest virtual address we can permit any user space
2078 * mapping to be mapped at. This is particularly important for
2079@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2080 /*
2081 * The pgprot_* and protection_map entries will be fixed up in runtime
2082 * to include the cachable and bufferable bits based on memory policy,
2083- * as well as any architecture dependent bits like global/ASID and SMP
2084- * shared mapping bits.
2085+ * as well as any architecture dependent bits like global/ASID, PXN,
2086+ * and SMP shared mapping bits.
2087 */
2088 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2089
2090@@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2091 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2092 {
2093 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2094- L_PTE_NONE | L_PTE_VALID;
2095+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2096 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2097 return pte;
2098 }
2099diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2100index 5324c11..bcae5f0 100644
2101--- a/arch/arm/include/asm/proc-fns.h
2102+++ b/arch/arm/include/asm/proc-fns.h
2103@@ -75,7 +75,7 @@ extern struct processor {
2104 unsigned int suspend_size;
2105 void (*do_suspend)(void *);
2106 void (*do_resume)(void *);
2107-} processor;
2108+} __do_const processor;
2109
2110 #ifndef MULTI_CPU
2111 extern void cpu_proc_init(void);
2112diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2113index c4ae171..ea0c0c2 100644
2114--- a/arch/arm/include/asm/psci.h
2115+++ b/arch/arm/include/asm/psci.h
2116@@ -29,7 +29,7 @@ struct psci_operations {
2117 int (*cpu_off)(struct psci_power_state state);
2118 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2119 int (*migrate)(unsigned long cpuid);
2120-};
2121+} __no_const;
2122
2123 extern struct psci_operations psci_ops;
2124 extern struct smp_operations psci_smp_ops;
2125diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2126index 22a3b9b..7f214ee 100644
2127--- a/arch/arm/include/asm/smp.h
2128+++ b/arch/arm/include/asm/smp.h
2129@@ -112,7 +112,7 @@ struct smp_operations {
2130 int (*cpu_disable)(unsigned int cpu);
2131 #endif
2132 #endif
2133-};
2134+} __no_const;
2135
2136 /*
2137 * set platform specific SMP operations
2138diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2139index 71a06b2..8bb9ae1 100644
2140--- a/arch/arm/include/asm/thread_info.h
2141+++ b/arch/arm/include/asm/thread_info.h
2142@@ -88,9 +88,9 @@ struct thread_info {
2143 .flags = 0, \
2144 .preempt_count = INIT_PREEMPT_COUNT, \
2145 .addr_limit = KERNEL_DS, \
2146- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2147- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2148- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2149+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2150+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2151+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2152 .restart_block = { \
2153 .fn = do_no_restart_syscall, \
2154 }, \
2155@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2156 #define TIF_SYSCALL_AUDIT 9
2157 #define TIF_SYSCALL_TRACEPOINT 10
2158 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2159-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2160+/* within 8 bits of TIF_SYSCALL_TRACE
2161+ * to meet flexible second operand requirements
2162+ */
2163+#define TIF_GRSEC_SETXID 12
2164+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2165 #define TIF_USING_IWMMXT 17
2166 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2167 #define TIF_RESTORE_SIGMASK 20
2168@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2169 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2170 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2171 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2172+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2173
2174 /* Checks for any syscall work in entry-common.S */
2175 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2176- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2177+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2178
2179 /*
2180 * Change these and you break ASM code in entry-common.S
2181diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2182index 72abdc5..9eba222 100644
2183--- a/arch/arm/include/asm/uaccess.h
2184+++ b/arch/arm/include/asm/uaccess.h
2185@@ -18,6 +18,7 @@
2186 #include <asm/domain.h>
2187 #include <asm/unified.h>
2188 #include <asm/compiler.h>
2189+#include <asm/pgtable.h>
2190
2191 #if __LINUX_ARM_ARCH__ < 6
2192 #include <asm-generic/uaccess-unaligned.h>
2193@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2194 static inline void set_fs(mm_segment_t fs)
2195 {
2196 current_thread_info()->addr_limit = fs;
2197- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2198+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2199 }
2200
2201 #define segment_eq(a,b) ((a) == (b))
2202
2203+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2204+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2205+
2206+static inline void pax_open_userland(void)
2207+{
2208+
2209+#ifdef CONFIG_PAX_MEMORY_UDEREF
2210+ if (segment_eq(get_fs(), USER_DS)) {
2211+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2212+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2213+ }
2214+#endif
2215+
2216+}
2217+
2218+static inline void pax_close_userland(void)
2219+{
2220+
2221+#ifdef CONFIG_PAX_MEMORY_UDEREF
2222+ if (segment_eq(get_fs(), USER_DS)) {
2223+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2224+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2225+ }
2226+#endif
2227+
2228+}
2229+
2230 #define __addr_ok(addr) ({ \
2231 unsigned long flag; \
2232 __asm__("cmp %2, %0; movlo %0, #0" \
2233@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2234
2235 #define get_user(x,p) \
2236 ({ \
2237+ int __e; \
2238 might_fault(); \
2239- __get_user_check(x,p); \
2240+ pax_open_userland(); \
2241+ __e = __get_user_check(x,p); \
2242+ pax_close_userland(); \
2243+ __e; \
2244 })
2245
2246 extern int __put_user_1(void *, unsigned int);
2247@@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2248
2249 #define put_user(x,p) \
2250 ({ \
2251+ int __e; \
2252 might_fault(); \
2253- __put_user_check(x,p); \
2254+ pax_open_userland(); \
2255+ __e = __put_user_check(x,p); \
2256+ pax_close_userland(); \
2257+ __e; \
2258 })
2259
2260 #else /* CONFIG_MMU */
2261@@ -237,13 +273,17 @@ static inline void set_fs(mm_segment_t fs)
2262 #define __get_user(x,ptr) \
2263 ({ \
2264 long __gu_err = 0; \
2265+ pax_open_userland(); \
2266 __get_user_err((x),(ptr),__gu_err); \
2267+ pax_close_userland(); \
2268 __gu_err; \
2269 })
2270
2271 #define __get_user_error(x,ptr,err) \
2272 ({ \
2273+ pax_open_userland(); \
2274 __get_user_err((x),(ptr),err); \
2275+ pax_close_userland(); \
2276 (void) 0; \
2277 })
2278
2279@@ -319,13 +359,17 @@ do { \
2280 #define __put_user(x,ptr) \
2281 ({ \
2282 long __pu_err = 0; \
2283+ pax_open_userland(); \
2284 __put_user_err((x),(ptr),__pu_err); \
2285+ pax_close_userland(); \
2286 __pu_err; \
2287 })
2288
2289 #define __put_user_error(x,ptr,err) \
2290 ({ \
2291+ pax_open_userland(); \
2292 __put_user_err((x),(ptr),err); \
2293+ pax_close_userland(); \
2294 (void) 0; \
2295 })
2296
2297@@ -425,11 +469,44 @@ do { \
2298
2299
2300 #ifdef CONFIG_MMU
2301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2305+
2306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2307+{
2308+ unsigned long ret;
2309+
2310+ check_object_size(to, n, false);
2311+ pax_open_userland();
2312+ ret = ___copy_from_user(to, from, n);
2313+ pax_close_userland();
2314+ return ret;
2315+}
2316+
2317+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2318+{
2319+ unsigned long ret;
2320+
2321+ check_object_size(from, n, true);
2322+ pax_open_userland();
2323+ ret = ___copy_to_user(to, from, n);
2324+ pax_close_userland();
2325+ return ret;
2326+}
2327+
2328 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2329-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2330+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2331 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2332+
2333+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2334+{
2335+ unsigned long ret;
2336+ pax_open_userland();
2337+ ret = ___clear_user(addr, n);
2338+ pax_close_userland();
2339+ return ret;
2340+}
2341+
2342 #else
2343 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2344 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2345@@ -438,6 +515,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2346
2347 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2348 {
2349+ if ((long)n < 0)
2350+ return n;
2351+
2352 if (access_ok(VERIFY_READ, from, n))
2353 n = __copy_from_user(to, from, n);
2354 else /* security hole - plug it */
2355@@ -447,6 +527,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2356
2357 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2358 {
2359+ if ((long)n < 0)
2360+ return n;
2361+
2362 if (access_ok(VERIFY_WRITE, to, n))
2363 n = __copy_to_user(to, from, n);
2364 return n;
2365diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2366index 5af0ed1..cea83883 100644
2367--- a/arch/arm/include/uapi/asm/ptrace.h
2368+++ b/arch/arm/include/uapi/asm/ptrace.h
2369@@ -92,7 +92,7 @@
2370 * ARMv7 groups of PSR bits
2371 */
2372 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2373-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2374+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2375 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2376 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2377
2378diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2379index 1f031dd..d9b5e4a 100644
2380--- a/arch/arm/kernel/armksyms.c
2381+++ b/arch/arm/kernel/armksyms.c
2382@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2383
2384 /* networking */
2385 EXPORT_SYMBOL(csum_partial);
2386-EXPORT_SYMBOL(csum_partial_copy_from_user);
2387+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2388 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2389 EXPORT_SYMBOL(__csum_ipv6_magic);
2390
2391@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2392 #ifdef CONFIG_MMU
2393 EXPORT_SYMBOL(copy_page);
2394
2395-EXPORT_SYMBOL(__copy_from_user);
2396-EXPORT_SYMBOL(__copy_to_user);
2397-EXPORT_SYMBOL(__clear_user);
2398+EXPORT_SYMBOL(___copy_from_user);
2399+EXPORT_SYMBOL(___copy_to_user);
2400+EXPORT_SYMBOL(___clear_user);
2401
2402 EXPORT_SYMBOL(__get_user_1);
2403 EXPORT_SYMBOL(__get_user_2);
2404diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2405index b3fb8c9..59cfab2 100644
2406--- a/arch/arm/kernel/entry-armv.S
2407+++ b/arch/arm/kernel/entry-armv.S
2408@@ -47,6 +47,87 @@
2409 9997:
2410 .endm
2411
2412+ .macro pax_enter_kernel
2413+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2414+ @ make aligned space for saved DACR
2415+ sub sp, sp, #8
2416+ @ save regs
2417+ stmdb sp!, {r1, r2}
2418+ @ read DACR from cpu_domain into r1
2419+ mov r2, sp
2420+ @ assume 8K pages, since we have to split the immediate in two
2421+ bic r2, r2, #(0x1fc0)
2422+ bic r2, r2, #(0x3f)
2423+ ldr r1, [r2, #TI_CPU_DOMAIN]
2424+ @ store old DACR on stack
2425+ str r1, [sp, #8]
2426+#ifdef CONFIG_PAX_KERNEXEC
2427+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2428+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2429+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2430+#endif
2431+#ifdef CONFIG_PAX_MEMORY_UDEREF
2432+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2433+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2434+#endif
2435+ @ write r1 to current_thread_info()->cpu_domain
2436+ str r1, [r2, #TI_CPU_DOMAIN]
2437+ @ write r1 to DACR
2438+ mcr p15, 0, r1, c3, c0, 0
2439+ @ instruction sync
2440+ instr_sync
2441+ @ restore regs
2442+ ldmia sp!, {r1, r2}
2443+#endif
2444+ .endm
2445+
2446+ .macro pax_open_userland
2447+#ifdef CONFIG_PAX_MEMORY_UDEREF
2448+ @ save regs
2449+ stmdb sp!, {r0, r1}
2450+ @ read DACR from cpu_domain into r1
2451+ mov r0, sp
2452+ @ assume 8K pages, since we have to split the immediate in two
2453+ bic r0, r0, #(0x1fc0)
2454+ bic r0, r0, #(0x3f)
2455+ ldr r1, [r0, #TI_CPU_DOMAIN]
2456+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2457+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2458+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2459+ @ write r1 to current_thread_info()->cpu_domain
2460+ str r1, [r0, #TI_CPU_DOMAIN]
2461+ @ write r1 to DACR
2462+ mcr p15, 0, r1, c3, c0, 0
2463+ @ instruction sync
2464+ instr_sync
2465+ @ restore regs
2466+ ldmia sp!, {r0, r1}
2467+#endif
2468+ .endm
2469+
2470+ .macro pax_close_userland
2471+#ifdef CONFIG_PAX_MEMORY_UDEREF
2472+ @ save regs
2473+ stmdb sp!, {r0, r1}
2474+ @ read DACR from cpu_domain into r1
2475+ mov r0, sp
2476+ @ assume 8K pages, since we have to split the immediate in two
2477+ bic r0, r0, #(0x1fc0)
2478+ bic r0, r0, #(0x3f)
2479+ ldr r1, [r0, #TI_CPU_DOMAIN]
2480+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2481+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2482+ @ write r1 to current_thread_info()->cpu_domain
2483+ str r1, [r0, #TI_CPU_DOMAIN]
2484+ @ write r1 to DACR
2485+ mcr p15, 0, r1, c3, c0, 0
2486+ @ instruction sync
2487+ instr_sync
2488+ @ restore regs
2489+ ldmia sp!, {r0, r1}
2490+#endif
2491+ .endm
2492+
2493 .macro pabt_helper
2494 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2495 #ifdef MULTI_PABORT
2496@@ -89,11 +170,15 @@
2497 * Invalid mode handlers
2498 */
2499 .macro inv_entry, reason
2500+
2501+ pax_enter_kernel
2502+
2503 sub sp, sp, #S_FRAME_SIZE
2504 ARM( stmib sp, {r1 - lr} )
2505 THUMB( stmia sp, {r0 - r12} )
2506 THUMB( str sp, [sp, #S_SP] )
2507 THUMB( str lr, [sp, #S_LR] )
2508+
2509 mov r1, #\reason
2510 .endm
2511
2512@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2513 .macro svc_entry, stack_hole=0
2514 UNWIND(.fnstart )
2515 UNWIND(.save {r0 - pc} )
2516+
2517+ pax_enter_kernel
2518+
2519 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2520+
2521 #ifdef CONFIG_THUMB2_KERNEL
2522 SPFIX( str r0, [sp] ) @ temporarily saved
2523 SPFIX( mov r0, sp )
2524@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2525 ldmia r0, {r3 - r5}
2526 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2527 mov r6, #-1 @ "" "" "" ""
2528+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2529+ @ offset sp by 8 as done in pax_enter_kernel
2530+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2531+#else
2532 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2533+#endif
2534 SPFIX( addeq r2, r2, #4 )
2535 str r3, [sp, #-4]! @ save the "real" r0 copied
2536 @ from the exception stack
2537@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2538 .macro usr_entry
2539 UNWIND(.fnstart )
2540 UNWIND(.cantunwind ) @ don't unwind the user space
2541+
2542+ pax_enter_kernel_user
2543+
2544 sub sp, sp, #S_FRAME_SIZE
2545 ARM( stmib sp, {r1 - r12} )
2546 THUMB( stmia sp, {r0 - r12} )
2547@@ -416,7 +513,9 @@ __und_usr:
2548 tst r3, #PSR_T_BIT @ Thumb mode?
2549 bne __und_usr_thumb
2550 sub r4, r2, #4 @ ARM instr at LR - 4
2551+ pax_open_userland
2552 1: ldrt r0, [r4]
2553+ pax_close_userland
2554 ARM_BE8(rev r0, r0) @ little endian instruction
2555
2556 @ r0 = 32-bit ARM instruction which caused the exception
2557@@ -450,10 +549,14 @@ __und_usr_thumb:
2558 */
2559 .arch armv6t2
2560 #endif
2561+ pax_open_userland
2562 2: ldrht r5, [r4]
2563+ pax_close_userland
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2570 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2571 orr r0, r0, r5, lsl #16
2572@@ -482,7 +585,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: mov pc, r9
2577+4: pax_close_userland
2578+ mov pc, r9
2579 .popsection
2580 .pushsection __ex_table,"a"
2581 .long 1b, 4b
2582@@ -692,7 +796,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -701,7 +805,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index a2dcafd..1048b5a 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -10,18 +10,46 @@
2605
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -411,6 +439,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 39f89fb..d612bd9 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -184,6 +184,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -203,6 +257,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6)
2741 ldr r0, [sp]
2742@@ -266,6 +323,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751 clrex @ clear the exclusive monitor
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 918875d..cd5fa27 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index 32f317e..710ae07 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -52,7 +52,9 @@
2772 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2773
2774 .macro pgtbl, rd, phys
2775- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2776+ mov \rd, #TEXT_OFFSET
2777+ sub \rd, #PG_DIR_SIZE
2778+ add \rd, \rd, \phys
2779 .endm
2780
2781 /*
2782@@ -436,7 +438,7 @@ __enable_mmu:
2783 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2784 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2785 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2786- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2787+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2788 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2789 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2790 #endif
2791diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2792index 45e4781..8eac93d 100644
2793--- a/arch/arm/kernel/module.c
2794+++ b/arch/arm/kernel/module.c
2795@@ -38,12 +38,39 @@
2796 #endif
2797
2798 #ifdef CONFIG_MMU
2799-void *module_alloc(unsigned long size)
2800+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2801 {
2802+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2803+ return NULL;
2804 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2805- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2806+ GFP_KERNEL, prot, NUMA_NO_NODE,
2807 __builtin_return_address(0));
2808 }
2809+
2810+void *module_alloc(unsigned long size)
2811+{
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+ return __module_alloc(size, PAGE_KERNEL);
2815+#else
2816+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2817+#endif
2818+
2819+}
2820+
2821+#ifdef CONFIG_PAX_KERNEXEC
2822+void module_free_exec(struct module *mod, void *module_region)
2823+{
2824+ module_free(mod, module_region);
2825+}
2826+EXPORT_SYMBOL(module_free_exec);
2827+
2828+void *module_alloc_exec(unsigned long size)
2829+{
2830+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2831+}
2832+EXPORT_SYMBOL(module_alloc_exec);
2833+#endif
2834 #endif
2835
2836 int
2837diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2838index 07314af..c46655c 100644
2839--- a/arch/arm/kernel/patch.c
2840+++ b/arch/arm/kernel/patch.c
2841@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2842 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2843 int size;
2844
2845+ pax_open_kernel();
2846 if (thumb2 && __opcode_is_thumb16(insn)) {
2847 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2848 size = sizeof(u16);
2849@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2850 *(u32 *)addr = insn;
2851 size = sizeof(u32);
2852 }
2853+ pax_close_kernel();
2854
2855 flush_icache_range((uintptr_t)(addr),
2856 (uintptr_t)(addr) + size);
2857diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2858index 92f7b15..7048500 100644
2859--- a/arch/arm/kernel/process.c
2860+++ b/arch/arm/kernel/process.c
2861@@ -217,6 +217,7 @@ void machine_power_off(void)
2862
2863 if (pm_power_off)
2864 pm_power_off();
2865+ BUG();
2866 }
2867
2868 /*
2869@@ -230,7 +231,7 @@ void machine_power_off(void)
2870 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2871 * to use. Implementing such co-ordination would be essentially impossible.
2872 */
2873-void machine_restart(char *cmd)
2874+__noreturn void machine_restart(char *cmd)
2875 {
2876 local_irq_disable();
2877 smp_send_stop();
2878@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2879
2880 show_regs_print_info(KERN_DEFAULT);
2881
2882- print_symbol("PC is at %s\n", instruction_pointer(regs));
2883- print_symbol("LR is at %s\n", regs->ARM_lr);
2884+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2885+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2886 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2887 "sp : %08lx ip : %08lx fp : %08lx\n",
2888 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2889@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2890 return 0;
2891 }
2892
2893-unsigned long arch_randomize_brk(struct mm_struct *mm)
2894-{
2895- unsigned long range_end = mm->brk + 0x02000000;
2896- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2897-}
2898-
2899 #ifdef CONFIG_MMU
2900 #ifdef CONFIG_KUSER_HELPERS
2901 /*
2902@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2903
2904 static int __init gate_vma_init(void)
2905 {
2906- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2907+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2908 return 0;
2909 }
2910 arch_initcall(gate_vma_init);
2911@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2912
2913 const char *arch_vma_name(struct vm_area_struct *vma)
2914 {
2915- return is_gate_vma(vma) ? "[vectors]" :
2916- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2917- "[sigpage]" : NULL;
2918+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2919 }
2920
2921-static struct page *signal_page;
2922-extern struct page *get_signal_page(void);
2923-
2924 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2925 {
2926 struct mm_struct *mm = current->mm;
2927- unsigned long addr;
2928- int ret;
2929-
2930- if (!signal_page)
2931- signal_page = get_signal_page();
2932- if (!signal_page)
2933- return -ENOMEM;
2934
2935 down_write(&mm->mmap_sem);
2936- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2937- if (IS_ERR_VALUE(addr)) {
2938- ret = addr;
2939- goto up_fail;
2940- }
2941-
2942- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2943- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2944- &signal_page);
2945-
2946- if (ret == 0)
2947- mm->context.sigpage = addr;
2948-
2949- up_fail:
2950+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2951 up_write(&mm->mmap_sem);
2952- return ret;
2953+ return 0;
2954 }
2955 #endif
2956diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2957index 4693188..4596c5e 100644
2958--- a/arch/arm/kernel/psci.c
2959+++ b/arch/arm/kernel/psci.c
2960@@ -24,7 +24,7 @@
2961 #include <asm/opcodes-virt.h>
2962 #include <asm/psci.h>
2963
2964-struct psci_operations psci_ops;
2965+struct psci_operations psci_ops __read_only;
2966
2967 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2968
2969diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2970index 0dd3b79..e018f64 100644
2971--- a/arch/arm/kernel/ptrace.c
2972+++ b/arch/arm/kernel/ptrace.c
2973@@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2974 return current_thread_info()->syscall;
2975 }
2976
2977+#ifdef CONFIG_GRKERNSEC_SETXID
2978+extern void gr_delayed_cred_worker(void);
2979+#endif
2980+
2981 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2982 {
2983 current_thread_info()->syscall = scno;
2984
2985+#ifdef CONFIG_GRKERNSEC_SETXID
2986+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2987+ gr_delayed_cred_worker();
2988+#endif
2989+
2990 /* Do the secure computing check first; failures should be fast. */
2991 if (secure_computing(scno) == -1)
2992 return -1;
2993diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2994index 987a7f5..d9d6071 100644
2995--- a/arch/arm/kernel/setup.c
2996+++ b/arch/arm/kernel/setup.c
2997@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high);
2998 unsigned int elf_hwcap __read_mostly;
2999 EXPORT_SYMBOL(elf_hwcap);
3000
3001+pteval_t __supported_pte_mask __read_only;
3002+pmdval_t __supported_pmd_mask __read_only;
3003
3004 #ifdef MULTI_CPU
3005-struct processor processor __read_mostly;
3006+struct processor processor;
3007 #endif
3008 #ifdef MULTI_TLB
3009-struct cpu_tlb_fns cpu_tlb __read_mostly;
3010+struct cpu_tlb_fns cpu_tlb __read_only;
3011 #endif
3012 #ifdef MULTI_USER
3013-struct cpu_user_fns cpu_user __read_mostly;
3014+struct cpu_user_fns cpu_user __read_only;
3015 #endif
3016 #ifdef MULTI_CACHE
3017-struct cpu_cache_fns cpu_cache __read_mostly;
3018+struct cpu_cache_fns cpu_cache __read_only;
3019 #endif
3020 #ifdef CONFIG_OUTER_CACHE
3021-struct outer_cache_fns outer_cache __read_mostly;
3022+struct outer_cache_fns outer_cache __read_only;
3023 EXPORT_SYMBOL(outer_cache);
3024 #endif
3025
3026@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void)
3027 asm("mrc p15, 0, %0, c0, c1, 4"
3028 : "=r" (mmfr0));
3029 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3030- (mmfr0 & 0x000000f0) >= 0x00000030)
3031+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3032 cpu_arch = CPU_ARCH_ARMv7;
3033- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3034+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3035+ __supported_pte_mask |= L_PTE_PXN;
3036+ __supported_pmd_mask |= PMD_PXNTABLE;
3037+ }
3038+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3039 (mmfr0 & 0x000000f0) == 0x00000020)
3040 cpu_arch = CPU_ARCH_ARMv6;
3041 else
3042@@ -573,7 +579,7 @@ static void __init setup_processor(void)
3043 __cpu_architecture = __get_cpu_architecture();
3044
3045 #ifdef MULTI_CPU
3046- processor = *list->proc;
3047+ memcpy((void *)&processor, list->proc, sizeof processor);
3048 #endif
3049 #ifdef MULTI_TLB
3050 cpu_tlb = *list->tlb;
3051diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3052index 04d6388..5115238 100644
3053--- a/arch/arm/kernel/signal.c
3054+++ b/arch/arm/kernel/signal.c
3055@@ -23,8 +23,6 @@
3056
3057 extern const unsigned long sigreturn_codes[7];
3058
3059-static unsigned long signal_return_offset;
3060-
3061 #ifdef CONFIG_CRUNCH
3062 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3063 {
3064@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3065 * except when the MPU has protected the vectors
3066 * page from PL0
3067 */
3068- retcode = mm->context.sigpage + signal_return_offset +
3069- (idx << 2) + thumb;
3070+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3071 } else
3072 #endif
3073 {
3074@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3075 } while (thread_flags & _TIF_WORK_MASK);
3076 return 0;
3077 }
3078-
3079-struct page *get_signal_page(void)
3080-{
3081- unsigned long ptr;
3082- unsigned offset;
3083- struct page *page;
3084- void *addr;
3085-
3086- page = alloc_pages(GFP_KERNEL, 0);
3087-
3088- if (!page)
3089- return NULL;
3090-
3091- addr = page_address(page);
3092-
3093- /* Give the signal return code some randomness */
3094- offset = 0x200 + (get_random_int() & 0x7fc);
3095- signal_return_offset = offset;
3096-
3097- /*
3098- * Copy signal return handlers into the vector page, and
3099- * set sigreturn to be a pointer to these.
3100- */
3101- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3102-
3103- ptr = (unsigned long)addr + offset;
3104- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3105-
3106- return page;
3107-}
3108diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3109index dc894ab..f929a0d 100644
3110--- a/arch/arm/kernel/smp.c
3111+++ b/arch/arm/kernel/smp.c
3112@@ -73,7 +73,7 @@ enum ipi_msg_type {
3113
3114 static DECLARE_COMPLETION(cpu_running);
3115
3116-static struct smp_operations smp_ops;
3117+static struct smp_operations smp_ops __read_only;
3118
3119 void __init smp_set_ops(struct smp_operations *ops)
3120 {
3121diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3122index 4636d56..ce4ec3d 100644
3123--- a/arch/arm/kernel/traps.c
3124+++ b/arch/arm/kernel/traps.c
3125@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3126 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3127 {
3128 #ifdef CONFIG_KALLSYMS
3129- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3130+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3131 #else
3132 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3133 #endif
3134@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3135 static int die_owner = -1;
3136 static unsigned int die_nest_count;
3137
3138+extern void gr_handle_kernel_exploit(void);
3139+
3140 static unsigned long oops_begin(void)
3141 {
3142 int cpu;
3143@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3144 panic("Fatal exception in interrupt");
3145 if (panic_on_oops)
3146 panic("Fatal exception");
3147+
3148+ gr_handle_kernel_exploit();
3149+
3150 if (signr)
3151 do_exit(signr);
3152 }
3153@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3154 * The user helper at 0xffff0fe0 must be used instead.
3155 * (see entry-armv.S for details)
3156 */
3157+ pax_open_kernel();
3158 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3159+ pax_close_kernel();
3160 }
3161 return 0;
3162
3163@@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base)
3164 kuser_init(vectors_base);
3165
3166 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3167- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3168+
3169+#ifndef CONFIG_PAX_MEMORY_UDEREF
3170+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3171+#endif
3172+
3173 #else /* ifndef CONFIG_CPU_V7M */
3174 /*
3175 * on V7-M there is no need to copy the vector table to a dedicated
3176diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3177index 7bcee5c..e2f3249 100644
3178--- a/arch/arm/kernel/vmlinux.lds.S
3179+++ b/arch/arm/kernel/vmlinux.lds.S
3180@@ -8,7 +8,11 @@
3181 #include <asm/thread_info.h>
3182 #include <asm/memory.h>
3183 #include <asm/page.h>
3184-
3185+
3186+#ifdef CONFIG_PAX_KERNEXEC
3187+#include <asm/pgtable.h>
3188+#endif
3189+
3190 #define PROC_INFO \
3191 . = ALIGN(4); \
3192 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3193@@ -34,7 +38,7 @@
3194 #endif
3195
3196 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3197- defined(CONFIG_GENERIC_BUG)
3198+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3199 #define ARM_EXIT_KEEP(x) x
3200 #define ARM_EXIT_DISCARD(x)
3201 #else
3202@@ -90,6 +94,11 @@ SECTIONS
3203 _text = .;
3204 HEAD_TEXT
3205 }
3206+
3207+#ifdef CONFIG_PAX_KERNEXEC
3208+ . = ALIGN(1<<SECTION_SHIFT);
3209+#endif
3210+
3211 .text : { /* Real text segment */
3212 _stext = .; /* Text and read-only data */
3213 __exception_text_start = .;
3214@@ -112,6 +121,8 @@ SECTIONS
3215 ARM_CPU_KEEP(PROC_INFO)
3216 }
3217
3218+ _etext = .; /* End of text section */
3219+
3220 RO_DATA(PAGE_SIZE)
3221
3222 . = ALIGN(4);
3223@@ -142,7 +153,9 @@ SECTIONS
3224
3225 NOTES
3226
3227- _etext = .; /* End of text and rodata section */
3228+#ifdef CONFIG_PAX_KERNEXEC
3229+ . = ALIGN(1<<SECTION_SHIFT);
3230+#endif
3231
3232 #ifndef CONFIG_XIP_KERNEL
3233 . = ALIGN(PAGE_SIZE);
3234@@ -220,6 +233,11 @@ SECTIONS
3235 . = PAGE_OFFSET + TEXT_OFFSET;
3236 #else
3237 __init_end = .;
3238+
3239+#ifdef CONFIG_PAX_KERNEXEC
3240+ . = ALIGN(1<<SECTION_SHIFT);
3241+#endif
3242+
3243 . = ALIGN(THREAD_SIZE);
3244 __data_loc = .;
3245 #endif
3246diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3247index 2a700e0..745b980 100644
3248--- a/arch/arm/kvm/arm.c
3249+++ b/arch/arm/kvm/arm.c
3250@@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
3251 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3252
3253 /* The VMID used in the VTTBR */
3254-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3255+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3256 static u8 kvm_next_vmid;
3257 static DEFINE_SPINLOCK(kvm_vmid_lock);
3258
3259@@ -397,7 +397,7 @@ void force_vm_exit(const cpumask_t *mask)
3260 */
3261 static bool need_new_vmid_gen(struct kvm *kvm)
3262 {
3263- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3264+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3265 }
3266
3267 /**
3268@@ -430,7 +430,7 @@ static void update_vttbr(struct kvm *kvm)
3269
3270 /* First user of a new VMID generation? */
3271 if (unlikely(kvm_next_vmid == 0)) {
3272- atomic64_inc(&kvm_vmid_gen);
3273+ atomic64_inc_unchecked(&kvm_vmid_gen);
3274 kvm_next_vmid = 1;
3275
3276 /*
3277@@ -447,7 +447,7 @@ static void update_vttbr(struct kvm *kvm)
3278 kvm_call_hyp(__kvm_flush_vm_context);
3279 }
3280
3281- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3282+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3283 kvm->arch.vmid = kvm_next_vmid;
3284 kvm_next_vmid++;
3285
3286diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3287index 14a0d98..7771a7d 100644
3288--- a/arch/arm/lib/clear_user.S
3289+++ b/arch/arm/lib/clear_user.S
3290@@ -12,14 +12,14 @@
3291
3292 .text
3293
3294-/* Prototype: int __clear_user(void *addr, size_t sz)
3295+/* Prototype: int ___clear_user(void *addr, size_t sz)
3296 * Purpose : clear some user memory
3297 * Params : addr - user memory address to clear
3298 * : sz - number of bytes to clear
3299 * Returns : number of bytes NOT cleared
3300 */
3301 ENTRY(__clear_user_std)
3302-WEAK(__clear_user)
3303+WEAK(___clear_user)
3304 stmfd sp!, {r1, lr}
3305 mov r2, #0
3306 cmp r1, #4
3307@@ -44,7 +44,7 @@ WEAK(__clear_user)
3308 USER( strnebt r2, [r0])
3309 mov r0, #0
3310 ldmfd sp!, {r1, pc}
3311-ENDPROC(__clear_user)
3312+ENDPROC(___clear_user)
3313 ENDPROC(__clear_user_std)
3314
3315 .pushsection .fixup,"ax"
3316diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3317index 66a477a..bee61d3 100644
3318--- a/arch/arm/lib/copy_from_user.S
3319+++ b/arch/arm/lib/copy_from_user.S
3320@@ -16,7 +16,7 @@
3321 /*
3322 * Prototype:
3323 *
3324- * size_t __copy_from_user(void *to, const void *from, size_t n)
3325+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3326 *
3327 * Purpose:
3328 *
3329@@ -84,11 +84,11 @@
3330
3331 .text
3332
3333-ENTRY(__copy_from_user)
3334+ENTRY(___copy_from_user)
3335
3336 #include "copy_template.S"
3337
3338-ENDPROC(__copy_from_user)
3339+ENDPROC(___copy_from_user)
3340
3341 .pushsection .fixup,"ax"
3342 .align 0
3343diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3344index 6ee2f67..d1cce76 100644
3345--- a/arch/arm/lib/copy_page.S
3346+++ b/arch/arm/lib/copy_page.S
3347@@ -10,6 +10,7 @@
3348 * ASM optimised string functions
3349 */
3350 #include <linux/linkage.h>
3351+#include <linux/const.h>
3352 #include <asm/assembler.h>
3353 #include <asm/asm-offsets.h>
3354 #include <asm/cache.h>
3355diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3356index d066df6..df28194 100644
3357--- a/arch/arm/lib/copy_to_user.S
3358+++ b/arch/arm/lib/copy_to_user.S
3359@@ -16,7 +16,7 @@
3360 /*
3361 * Prototype:
3362 *
3363- * size_t __copy_to_user(void *to, const void *from, size_t n)
3364+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3365 *
3366 * Purpose:
3367 *
3368@@ -88,11 +88,11 @@
3369 .text
3370
3371 ENTRY(__copy_to_user_std)
3372-WEAK(__copy_to_user)
3373+WEAK(___copy_to_user)
3374
3375 #include "copy_template.S"
3376
3377-ENDPROC(__copy_to_user)
3378+ENDPROC(___copy_to_user)
3379 ENDPROC(__copy_to_user_std)
3380
3381 .pushsection .fixup,"ax"
3382diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3383index 7d08b43..f7ca7ea 100644
3384--- a/arch/arm/lib/csumpartialcopyuser.S
3385+++ b/arch/arm/lib/csumpartialcopyuser.S
3386@@ -57,8 +57,8 @@
3387 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3388 */
3389
3390-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3391-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3392+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3393+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3394
3395 #include "csumpartialcopygeneric.S"
3396
3397diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3398index 5306de3..aed6d03 100644
3399--- a/arch/arm/lib/delay.c
3400+++ b/arch/arm/lib/delay.c
3401@@ -28,7 +28,7 @@
3402 /*
3403 * Default to the loop-based delay implementation.
3404 */
3405-struct arm_delay_ops arm_delay_ops = {
3406+struct arm_delay_ops arm_delay_ops __read_only = {
3407 .delay = __loop_delay,
3408 .const_udelay = __loop_const_udelay,
3409 .udelay = __loop_udelay,
3410diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3411index 3e58d71..029817c 100644
3412--- a/arch/arm/lib/uaccess_with_memcpy.c
3413+++ b/arch/arm/lib/uaccess_with_memcpy.c
3414@@ -136,7 +136,7 @@ out:
3415 }
3416
3417 unsigned long
3418-__copy_to_user(void __user *to, const void *from, unsigned long n)
3419+___copy_to_user(void __user *to, const void *from, unsigned long n)
3420 {
3421 /*
3422 * This test is stubbed out of the main function above to keep
3423@@ -190,7 +190,7 @@ out:
3424 return n;
3425 }
3426
3427-unsigned long __clear_user(void __user *addr, unsigned long n)
3428+unsigned long ___clear_user(void __user *addr, unsigned long n)
3429 {
3430 /* See rational for this in __copy_to_user() above. */
3431 if (n < 64)
3432diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3433index f3407a5..bd4256f 100644
3434--- a/arch/arm/mach-kirkwood/common.c
3435+++ b/arch/arm/mach-kirkwood/common.c
3436@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3437 clk_gate_ops.disable(hw);
3438 }
3439
3440-static struct clk_ops clk_gate_fn_ops;
3441+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3442+{
3443+ return clk_gate_ops.is_enabled(hw);
3444+}
3445+
3446+static struct clk_ops clk_gate_fn_ops = {
3447+ .enable = clk_gate_fn_enable,
3448+ .disable = clk_gate_fn_disable,
3449+ .is_enabled = clk_gate_fn_is_enabled,
3450+};
3451
3452 static struct clk __init *clk_register_gate_fn(struct device *dev,
3453 const char *name,
3454@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3455 gate_fn->fn_en = fn_en;
3456 gate_fn->fn_dis = fn_dis;
3457
3458- /* ops is the gate ops, but with our enable/disable functions */
3459- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3460- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3461- clk_gate_fn_ops = clk_gate_ops;
3462- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3463- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3464- }
3465-
3466 clk = clk_register(dev, &gate_fn->gate.hw);
3467
3468 if (IS_ERR(clk))
3469diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3470index 827d1500..2885dc6 100644
3471--- a/arch/arm/mach-omap2/board-n8x0.c
3472+++ b/arch/arm/mach-omap2/board-n8x0.c
3473@@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3474 }
3475 #endif
3476
3477-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3478+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3479 .late_init = n8x0_menelaus_late_init,
3480 };
3481
3482diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3483index d24926e..a7645a6 100644
3484--- a/arch/arm/mach-omap2/gpmc.c
3485+++ b/arch/arm/mach-omap2/gpmc.c
3486@@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3487 };
3488
3489 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3490-static struct irq_chip gpmc_irq_chip;
3491 static int gpmc_irq_start;
3492
3493 static struct resource gpmc_mem_root;
3494@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3495
3496 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3497
3498+static struct irq_chip gpmc_irq_chip = {
3499+ .name = "gpmc",
3500+ .irq_startup = gpmc_irq_noop_ret,
3501+ .irq_enable = gpmc_irq_enable,
3502+ .irq_disable = gpmc_irq_disable,
3503+ .irq_shutdown = gpmc_irq_noop,
3504+ .irq_ack = gpmc_irq_noop,
3505+ .irq_mask = gpmc_irq_noop,
3506+ .irq_unmask = gpmc_irq_noop,
3507+
3508+};
3509+
3510 static int gpmc_setup_irq(void)
3511 {
3512 int i;
3513@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3514 return gpmc_irq_start;
3515 }
3516
3517- gpmc_irq_chip.name = "gpmc";
3518- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3519- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3520- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3521- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3522- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3523- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3524- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3525-
3526 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3527 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3528
3529diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3530index f991016..145ebeb 100644
3531--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3532+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3533@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3534 int (*finish_suspend)(unsigned long cpu_state);
3535 void (*resume)(void);
3536 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3537-};
3538+} __no_const;
3539
3540 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3541 static struct powerdomain *mpuss_pd;
3542@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3543 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3544 {}
3545
3546-struct cpu_pm_ops omap_pm_ops = {
3547+static struct cpu_pm_ops omap_pm_ops __read_only = {
3548 .finish_suspend = default_finish_suspend,
3549 .resume = dummy_cpu_resume,
3550 .scu_prepare = dummy_scu_prepare,
3551diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3552index 3664562..72f85c6 100644
3553--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3554+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3555@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3556 return NOTIFY_OK;
3557 }
3558
3559-static struct notifier_block __refdata irq_hotplug_notifier = {
3560+static struct notifier_block irq_hotplug_notifier = {
3561 .notifier_call = irq_cpu_hotplug_notify,
3562 };
3563
3564diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3565index e0a398c..a470fa5 100644
3566--- a/arch/arm/mach-omap2/omap_device.c
3567+++ b/arch/arm/mach-omap2/omap_device.c
3568@@ -508,7 +508,7 @@ void omap_device_delete(struct omap_device *od)
3569 struct platform_device __init *omap_device_build(const char *pdev_name,
3570 int pdev_id,
3571 struct omap_hwmod *oh,
3572- void *pdata, int pdata_len)
3573+ const void *pdata, int pdata_len)
3574 {
3575 struct omap_hwmod *ohs[] = { oh };
3576
3577@@ -536,7 +536,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3578 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3579 int pdev_id,
3580 struct omap_hwmod **ohs,
3581- int oh_cnt, void *pdata,
3582+ int oh_cnt, const void *pdata,
3583 int pdata_len)
3584 {
3585 int ret = -ENOMEM;
3586diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3587index 78c02b3..c94109a 100644
3588--- a/arch/arm/mach-omap2/omap_device.h
3589+++ b/arch/arm/mach-omap2/omap_device.h
3590@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3591 /* Core code interface */
3592
3593 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3594- struct omap_hwmod *oh, void *pdata,
3595+ struct omap_hwmod *oh, const void *pdata,
3596 int pdata_len);
3597
3598 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3599 struct omap_hwmod **oh, int oh_cnt,
3600- void *pdata, int pdata_len);
3601+ const void *pdata, int pdata_len);
3602
3603 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3604 struct omap_hwmod **ohs, int oh_cnt);
3605diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3606index 8a1b5e0..5f30074 100644
3607--- a/arch/arm/mach-omap2/omap_hwmod.c
3608+++ b/arch/arm/mach-omap2/omap_hwmod.c
3609@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3610 int (*init_clkdm)(struct omap_hwmod *oh);
3611 void (*update_context_lost)(struct omap_hwmod *oh);
3612 int (*get_context_lost)(struct omap_hwmod *oh);
3613-};
3614+} __no_const;
3615
3616 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3617-static struct omap_hwmod_soc_ops soc_ops;
3618+static struct omap_hwmod_soc_ops soc_ops __read_only;
3619
3620 /* omap_hwmod_list contains all registered struct omap_hwmods */
3621 static LIST_HEAD(omap_hwmod_list);
3622diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3623index 95fee54..cfa9cf1 100644
3624--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3625+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3626@@ -10,6 +10,7 @@
3627
3628 #include <linux/kernel.h>
3629 #include <linux/init.h>
3630+#include <asm/pgtable.h>
3631
3632 #include "powerdomain.h"
3633
3634@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3635
3636 void __init am43xx_powerdomains_init(void)
3637 {
3638- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3639+ pax_open_kernel();
3640+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3641+ pax_close_kernel();
3642 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3643 pwrdm_register_pwrdms(powerdomains_am43xx);
3644 pwrdm_complete_init();
3645diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3646index d15c7bb..b2d1f0c 100644
3647--- a/arch/arm/mach-omap2/wd_timer.c
3648+++ b/arch/arm/mach-omap2/wd_timer.c
3649@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3650 struct omap_hwmod *oh;
3651 char *oh_name = "wd_timer2";
3652 char *dev_name = "omap_wdt";
3653- struct omap_wd_timer_platform_data pdata;
3654+ static struct omap_wd_timer_platform_data pdata = {
3655+ .read_reset_sources = prm_read_reset_sources
3656+ };
3657
3658 if (!cpu_class_is_omap2() || of_have_populated_dt())
3659 return 0;
3660@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3661 return -EINVAL;
3662 }
3663
3664- pdata.read_reset_sources = prm_read_reset_sources;
3665-
3666 pdev = omap_device_build(dev_name, id, oh, &pdata,
3667 sizeof(struct omap_wd_timer_platform_data));
3668 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3669diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3670index b82dcae..44ee5b6 100644
3671--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3672+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3673@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3674 bool entered_lp2 = false;
3675
3676 if (tegra_pending_sgi())
3677- ACCESS_ONCE(abort_flag) = true;
3678+ ACCESS_ONCE_RW(abort_flag) = true;
3679
3680 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3681
3682diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3683index bdb3564..cebb96f 100644
3684--- a/arch/arm/mach-ux500/setup.h
3685+++ b/arch/arm/mach-ux500/setup.h
3686@@ -39,13 +39,6 @@ extern void ux500_timer_init(void);
3687 .type = MT_DEVICE, \
3688 }
3689
3690-#define __MEM_DEV_DESC(x, sz) { \
3691- .virtual = IO_ADDRESS(x), \
3692- .pfn = __phys_to_pfn(x), \
3693- .length = sz, \
3694- .type = MT_MEMORY, \
3695-}
3696-
3697 extern struct smp_operations ux500_smp_ops;
3698 extern void ux500_cpu_die(unsigned int cpu);
3699
3700diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3701index 1f8fed9..14d7823 100644
3702--- a/arch/arm/mm/Kconfig
3703+++ b/arch/arm/mm/Kconfig
3704@@ -446,7 +446,7 @@ config CPU_32v5
3705
3706 config CPU_32v6
3707 bool
3708- select CPU_USE_DOMAINS if CPU_V6 && MMU
3709+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3710 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3711
3712 config CPU_32v6K
3713@@ -601,6 +601,7 @@ config CPU_CP15_MPU
3714
3715 config CPU_USE_DOMAINS
3716 bool
3717+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3718 help
3719 This option enables or disables the use of domain switching
3720 via the set_fs() function.
3721@@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
3722 config KUSER_HELPERS
3723 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3724 default y
3725+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3726 help
3727 Warning: disabling this option may break user programs.
3728
3729@@ -812,7 +814,7 @@ config KUSER_HELPERS
3730 See Documentation/arm/kernel_user_helpers.txt for details.
3731
3732 However, the fixed address nature of these helpers can be used
3733- by ROP (return orientated programming) authors when creating
3734+ by ROP (Return Oriented Programming) authors when creating
3735 exploits.
3736
3737 If all of the binaries and libraries which run on your platform
3738diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3739index 9240364..a2b8cf3 100644
3740--- a/arch/arm/mm/alignment.c
3741+++ b/arch/arm/mm/alignment.c
3742@@ -212,10 +212,12 @@ union offset_union {
3743 #define __get16_unaligned_check(ins,val,addr) \
3744 do { \
3745 unsigned int err = 0, v, a = addr; \
3746+ pax_open_userland(); \
3747 __get8_unaligned_check(ins,v,a,err); \
3748 val = v << ((BE) ? 8 : 0); \
3749 __get8_unaligned_check(ins,v,a,err); \
3750 val |= v << ((BE) ? 0 : 8); \
3751+ pax_close_userland(); \
3752 if (err) \
3753 goto fault; \
3754 } while (0)
3755@@ -229,6 +231,7 @@ union offset_union {
3756 #define __get32_unaligned_check(ins,val,addr) \
3757 do { \
3758 unsigned int err = 0, v, a = addr; \
3759+ pax_open_userland(); \
3760 __get8_unaligned_check(ins,v,a,err); \
3761 val = v << ((BE) ? 24 : 0); \
3762 __get8_unaligned_check(ins,v,a,err); \
3763@@ -237,6 +240,7 @@ union offset_union {
3764 val |= v << ((BE) ? 8 : 16); \
3765 __get8_unaligned_check(ins,v,a,err); \
3766 val |= v << ((BE) ? 0 : 24); \
3767+ pax_close_userland(); \
3768 if (err) \
3769 goto fault; \
3770 } while (0)
3771@@ -250,6 +254,7 @@ union offset_union {
3772 #define __put16_unaligned_check(ins,val,addr) \
3773 do { \
3774 unsigned int err = 0, v = val, a = addr; \
3775+ pax_open_userland(); \
3776 __asm__( FIRST_BYTE_16 \
3777 ARM( "1: "ins" %1, [%2], #1\n" ) \
3778 THUMB( "1: "ins" %1, [%2]\n" ) \
3779@@ -269,6 +274,7 @@ union offset_union {
3780 " .popsection\n" \
3781 : "=r" (err), "=&r" (v), "=&r" (a) \
3782 : "0" (err), "1" (v), "2" (a)); \
3783+ pax_close_userland(); \
3784 if (err) \
3785 goto fault; \
3786 } while (0)
3787@@ -282,6 +288,7 @@ union offset_union {
3788 #define __put32_unaligned_check(ins,val,addr) \
3789 do { \
3790 unsigned int err = 0, v = val, a = addr; \
3791+ pax_open_userland(); \
3792 __asm__( FIRST_BYTE_32 \
3793 ARM( "1: "ins" %1, [%2], #1\n" ) \
3794 THUMB( "1: "ins" %1, [%2]\n" ) \
3795@@ -311,6 +318,7 @@ union offset_union {
3796 " .popsection\n" \
3797 : "=r" (err), "=&r" (v), "=&r" (a) \
3798 : "0" (err), "1" (v), "2" (a)); \
3799+ pax_close_userland(); \
3800 if (err) \
3801 goto fault; \
3802 } while (0)
3803diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3804index 447da6f..77a5057 100644
3805--- a/arch/arm/mm/cache-l2x0.c
3806+++ b/arch/arm/mm/cache-l2x0.c
3807@@ -45,7 +45,7 @@ struct l2x0_of_data {
3808 void (*setup)(const struct device_node *, u32 *, u32 *);
3809 void (*save)(void);
3810 struct outer_cache_fns outer_cache;
3811-};
3812+} __do_const;
3813
3814 static bool of_init = false;
3815
3816diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3817index 84e6f77..0b52f31 100644
3818--- a/arch/arm/mm/context.c
3819+++ b/arch/arm/mm/context.c
3820@@ -43,7 +43,7 @@
3821 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3822
3823 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3824-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3825+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3826 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3827
3828 static DEFINE_PER_CPU(atomic64_t, active_asids);
3829@@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
3830 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3831 {
3832 u64 asid = atomic64_read(&mm->context.id);
3833- u64 generation = atomic64_read(&asid_generation);
3834+ u64 generation = atomic64_read_unchecked(&asid_generation);
3835
3836 if (asid != 0 && is_reserved_asid(asid)) {
3837 /*
3838@@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3839 */
3840 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3841 if (asid == NUM_USER_ASIDS) {
3842- generation = atomic64_add_return(ASID_FIRST_VERSION,
3843+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3844 &asid_generation);
3845 flush_context(cpu);
3846 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3847@@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3848 cpu_set_reserved_ttbr0();
3849
3850 asid = atomic64_read(&mm->context.id);
3851- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3852+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3853 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3854 goto switch_mm_fastpath;
3855
3856 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3857 /* Check that our ASID belongs to the current generation. */
3858 asid = atomic64_read(&mm->context.id);
3859- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3860+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3861 asid = new_context(mm, cpu);
3862 atomic64_set(&mm->context.id, asid);
3863 }
3864diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3865index eb8830a..5360ce7 100644
3866--- a/arch/arm/mm/fault.c
3867+++ b/arch/arm/mm/fault.c
3868@@ -25,6 +25,7 @@
3869 #include <asm/system_misc.h>
3870 #include <asm/system_info.h>
3871 #include <asm/tlbflush.h>
3872+#include <asm/sections.h>
3873
3874 #include "fault.h"
3875
3876@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3877 if (fixup_exception(regs))
3878 return;
3879
3880+#ifdef CONFIG_PAX_MEMORY_UDEREF
3881+ if (addr < TASK_SIZE) {
3882+ if (current->signal->curr_ip)
3883+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3884+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3885+ else
3886+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3887+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3888+ }
3889+#endif
3890+
3891+#ifdef CONFIG_PAX_KERNEXEC
3892+ if ((fsr & FSR_WRITE) &&
3893+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3894+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3895+ {
3896+ if (current->signal->curr_ip)
3897+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3898+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3899+ else
3900+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3901+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3902+ }
3903+#endif
3904+
3905 /*
3906 * No handler, we'll have to terminate things with extreme prejudice.
3907 */
3908@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3909 }
3910 #endif
3911
3912+#ifdef CONFIG_PAX_PAGEEXEC
3913+ if (fsr & FSR_LNX_PF) {
3914+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3915+ do_group_exit(SIGKILL);
3916+ }
3917+#endif
3918+
3919 tsk->thread.address = addr;
3920 tsk->thread.error_code = fsr;
3921 tsk->thread.trap_no = 14;
3922@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3923 }
3924 #endif /* CONFIG_MMU */
3925
3926+#ifdef CONFIG_PAX_PAGEEXEC
3927+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3928+{
3929+ long i;
3930+
3931+ printk(KERN_ERR "PAX: bytes at PC: ");
3932+ for (i = 0; i < 20; i++) {
3933+ unsigned char c;
3934+ if (get_user(c, (__force unsigned char __user *)pc+i))
3935+ printk(KERN_CONT "?? ");
3936+ else
3937+ printk(KERN_CONT "%02x ", c);
3938+ }
3939+ printk("\n");
3940+
3941+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3942+ for (i = -1; i < 20; i++) {
3943+ unsigned long c;
3944+ if (get_user(c, (__force unsigned long __user *)sp+i))
3945+ printk(KERN_CONT "???????? ");
3946+ else
3947+ printk(KERN_CONT "%08lx ", c);
3948+ }
3949+ printk("\n");
3950+}
3951+#endif
3952+
3953 /*
3954 * First Level Translation Fault Handler
3955 *
3956@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3957 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3958 struct siginfo info;
3959
3960+#ifdef CONFIG_PAX_MEMORY_UDEREF
3961+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3962+ if (current->signal->curr_ip)
3963+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3964+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3965+ else
3966+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3967+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3968+ goto die;
3969+ }
3970+#endif
3971+
3972 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3973 return;
3974
3975+die:
3976 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3977 inf->name, fsr, addr);
3978
3979@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3980 ifsr_info[nr].name = name;
3981 }
3982
3983+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3984+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3985+
3986 asmlinkage void __exception
3987 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3988 {
3989 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3990 struct siginfo info;
3991+ unsigned long pc = instruction_pointer(regs);
3992+
3993+ if (user_mode(regs)) {
3994+ unsigned long sigpage = current->mm->context.sigpage;
3995+
3996+ if (sigpage <= pc && pc < sigpage + 7*4) {
3997+ if (pc < sigpage + 3*4)
3998+ sys_sigreturn(regs);
3999+ else
4000+ sys_rt_sigreturn(regs);
4001+ return;
4002+ }
4003+ if (pc == 0xffff0f60UL) {
4004+ /*
4005+ * PaX: __kuser_cmpxchg64 emulation
4006+ */
4007+ // TODO
4008+ //regs->ARM_pc = regs->ARM_lr;
4009+ //return;
4010+ }
4011+ if (pc == 0xffff0fa0UL) {
4012+ /*
4013+ * PaX: __kuser_memory_barrier emulation
4014+ */
4015+ // dmb(); implied by the exception
4016+ regs->ARM_pc = regs->ARM_lr;
4017+ return;
4018+ }
4019+ if (pc == 0xffff0fc0UL) {
4020+ /*
4021+ * PaX: __kuser_cmpxchg emulation
4022+ */
4023+ // TODO
4024+ //long new;
4025+ //int op;
4026+
4027+ //op = FUTEX_OP_SET << 28;
4028+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4029+ //regs->ARM_r0 = old != new;
4030+ //regs->ARM_pc = regs->ARM_lr;
4031+ //return;
4032+ }
4033+ if (pc == 0xffff0fe0UL) {
4034+ /*
4035+ * PaX: __kuser_get_tls emulation
4036+ */
4037+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4038+ regs->ARM_pc = regs->ARM_lr;
4039+ return;
4040+ }
4041+ }
4042+
4043+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4044+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4045+ if (current->signal->curr_ip)
4046+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4047+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4048+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4049+ else
4050+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4051+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4052+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4053+ goto die;
4054+ }
4055+#endif
4056+
4057+#ifdef CONFIG_PAX_REFCOUNT
4058+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4059+ unsigned int bkpt;
4060+
4061+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4062+ current->thread.error_code = ifsr;
4063+ current->thread.trap_no = 0;
4064+ pax_report_refcount_overflow(regs);
4065+ fixup_exception(regs);
4066+ return;
4067+ }
4068+ }
4069+#endif
4070
4071 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4072 return;
4073
4074+die:
4075 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4076 inf->name, ifsr, addr);
4077
4078diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4079index cf08bdf..772656c 100644
4080--- a/arch/arm/mm/fault.h
4081+++ b/arch/arm/mm/fault.h
4082@@ -3,6 +3,7 @@
4083
4084 /*
4085 * Fault status register encodings. We steal bit 31 for our own purposes.
4086+ * Set when the FSR value is from an instruction fault.
4087 */
4088 #define FSR_LNX_PF (1 << 31)
4089 #define FSR_WRITE (1 << 11)
4090@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4091 }
4092 #endif
4093
4094+/* valid for LPAE and !LPAE */
4095+static inline int is_xn_fault(unsigned int fsr)
4096+{
4097+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4098+}
4099+
4100+static inline int is_domain_fault(unsigned int fsr)
4101+{
4102+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4103+}
4104+
4105 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4106 unsigned long search_exception_table(unsigned long addr);
4107
4108diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4109index 3e8f106..a0a1fe4 100644
4110--- a/arch/arm/mm/init.c
4111+++ b/arch/arm/mm/init.c
4112@@ -30,6 +30,8 @@
4113 #include <asm/setup.h>
4114 #include <asm/tlb.h>
4115 #include <asm/fixmap.h>
4116+#include <asm/system_info.h>
4117+#include <asm/cp15.h>
4118
4119 #include <asm/mach/arch.h>
4120 #include <asm/mach/map.h>
4121@@ -681,7 +683,46 @@ void free_initmem(void)
4122 {
4123 #ifdef CONFIG_HAVE_TCM
4124 extern char __tcm_start, __tcm_end;
4125+#endif
4126
4127+#ifdef CONFIG_PAX_KERNEXEC
4128+ unsigned long addr;
4129+ pgd_t *pgd;
4130+ pud_t *pud;
4131+ pmd_t *pmd;
4132+ int cpu_arch = cpu_architecture();
4133+ unsigned int cr = get_cr();
4134+
4135+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4136+ /* make pages tables, etc before .text NX */
4137+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4138+ pgd = pgd_offset_k(addr);
4139+ pud = pud_offset(pgd, addr);
4140+ pmd = pmd_offset(pud, addr);
4141+ __section_update(pmd, addr, PMD_SECT_XN);
4142+ }
4143+ /* make init NX */
4144+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4145+ pgd = pgd_offset_k(addr);
4146+ pud = pud_offset(pgd, addr);
4147+ pmd = pmd_offset(pud, addr);
4148+ __section_update(pmd, addr, PMD_SECT_XN);
4149+ }
4150+ /* make kernel code/rodata RX */
4151+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4152+ pgd = pgd_offset_k(addr);
4153+ pud = pud_offset(pgd, addr);
4154+ pmd = pmd_offset(pud, addr);
4155+#ifdef CONFIG_ARM_LPAE
4156+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4157+#else
4158+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4159+#endif
4160+ }
4161+ }
4162+#endif
4163+
4164+#ifdef CONFIG_HAVE_TCM
4165 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4166 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4167 #endif
4168diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4169index f123d6e..04bf569 100644
4170--- a/arch/arm/mm/ioremap.c
4171+++ b/arch/arm/mm/ioremap.c
4172@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4173 unsigned int mtype;
4174
4175 if (cached)
4176- mtype = MT_MEMORY;
4177+ mtype = MT_MEMORY_RX;
4178 else
4179- mtype = MT_MEMORY_NONCACHED;
4180+ mtype = MT_MEMORY_NONCACHED_RX;
4181
4182 return __arm_ioremap_caller(phys_addr, size, mtype,
4183 __builtin_return_address(0));
4184diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4185index 5e85ed3..b10a7ed 100644
4186--- a/arch/arm/mm/mmap.c
4187+++ b/arch/arm/mm/mmap.c
4188@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4189 struct vm_area_struct *vma;
4190 int do_align = 0;
4191 int aliasing = cache_is_vipt_aliasing();
4192+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4193 struct vm_unmapped_area_info info;
4194
4195 /*
4196@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4197 if (len > TASK_SIZE)
4198 return -ENOMEM;
4199
4200+#ifdef CONFIG_PAX_RANDMMAP
4201+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4202+#endif
4203+
4204 if (addr) {
4205 if (do_align)
4206 addr = COLOUR_ALIGN(addr, pgoff);
4207@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4208 addr = PAGE_ALIGN(addr);
4209
4210 vma = find_vma(mm, addr);
4211- if (TASK_SIZE - len >= addr &&
4212- (!vma || addr + len <= vma->vm_start))
4213+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4214 return addr;
4215 }
4216
4217@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4218 info.high_limit = TASK_SIZE;
4219 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4220 info.align_offset = pgoff << PAGE_SHIFT;
4221+ info.threadstack_offset = offset;
4222 return vm_unmapped_area(&info);
4223 }
4224
4225@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4226 unsigned long addr = addr0;
4227 int do_align = 0;
4228 int aliasing = cache_is_vipt_aliasing();
4229+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4230 struct vm_unmapped_area_info info;
4231
4232 /*
4233@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4234 return addr;
4235 }
4236
4237+#ifdef CONFIG_PAX_RANDMMAP
4238+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4239+#endif
4240+
4241 /* requesting a specific address */
4242 if (addr) {
4243 if (do_align)
4244@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4245 else
4246 addr = PAGE_ALIGN(addr);
4247 vma = find_vma(mm, addr);
4248- if (TASK_SIZE - len >= addr &&
4249- (!vma || addr + len <= vma->vm_start))
4250+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4251 return addr;
4252 }
4253
4254@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4255 info.high_limit = mm->mmap_base;
4256 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4257 info.align_offset = pgoff << PAGE_SHIFT;
4258+ info.threadstack_offset = offset;
4259 addr = vm_unmapped_area(&info);
4260
4261 /*
4262@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4263 {
4264 unsigned long random_factor = 0UL;
4265
4266+#ifdef CONFIG_PAX_RANDMMAP
4267+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4268+#endif
4269+
4270 /* 8 bits of randomness in 20 address space bits */
4271 if ((current->flags & PF_RANDOMIZE) &&
4272 !(current->personality & ADDR_NO_RANDOMIZE))
4273@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4274
4275 if (mmap_is_legacy()) {
4276 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4277+
4278+#ifdef CONFIG_PAX_RANDMMAP
4279+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4280+ mm->mmap_base += mm->delta_mmap;
4281+#endif
4282+
4283 mm->get_unmapped_area = arch_get_unmapped_area;
4284 } else {
4285 mm->mmap_base = mmap_base(random_factor);
4286+
4287+#ifdef CONFIG_PAX_RANDMMAP
4288+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4289+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4290+#endif
4291+
4292 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4293 }
4294 }
4295diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4296index 580ef2d..2da06ca 100644
4297--- a/arch/arm/mm/mmu.c
4298+++ b/arch/arm/mm/mmu.c
4299@@ -38,6 +38,22 @@
4300 #include "mm.h"
4301 #include "tcm.h"
4302
4303+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4304+void modify_domain(unsigned int dom, unsigned int type)
4305+{
4306+ struct thread_info *thread = current_thread_info();
4307+ unsigned int domain = thread->cpu_domain;
4308+ /*
4309+ * DOMAIN_MANAGER might be defined to some other value,
4310+ * use the arch-defined constant
4311+ */
4312+ domain &= ~domain_val(dom, 3);
4313+ thread->cpu_domain = domain | domain_val(dom, type);
4314+ set_domain(thread->cpu_domain);
4315+}
4316+EXPORT_SYMBOL(modify_domain);
4317+#endif
4318+
4319 /*
4320 * empty_zero_page is a special page that is used for
4321 * zero-initialized data and COW.
4322@@ -230,10 +246,18 @@ __setup("noalign", noalign_setup);
4323
4324 #endif /* ifdef CONFIG_CPU_CP15 / else */
4325
4326-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4327+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4328 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4329
4330-static struct mem_type mem_types[] = {
4331+#ifdef CONFIG_PAX_KERNEXEC
4332+#define L_PTE_KERNEXEC L_PTE_RDONLY
4333+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4334+#else
4335+#define L_PTE_KERNEXEC L_PTE_DIRTY
4336+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4337+#endif
4338+
4339+static struct mem_type mem_types[] __read_only = {
4340 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4341 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4342 L_PTE_SHARED,
4343@@ -262,16 +286,16 @@ static struct mem_type mem_types[] = {
4344 [MT_UNCACHED] = {
4345 .prot_pte = PROT_PTE_DEVICE,
4346 .prot_l1 = PMD_TYPE_TABLE,
4347- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4348+ .prot_sect = PROT_SECT_DEVICE,
4349 .domain = DOMAIN_IO,
4350 },
4351 [MT_CACHECLEAN] = {
4352- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4353+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4354 .domain = DOMAIN_KERNEL,
4355 },
4356 #ifndef CONFIG_ARM_LPAE
4357 [MT_MINICLEAN] = {
4358- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4359+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4360 .domain = DOMAIN_KERNEL,
4361 },
4362 #endif
4363@@ -279,36 +303,54 @@ static struct mem_type mem_types[] = {
4364 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4365 L_PTE_RDONLY,
4366 .prot_l1 = PMD_TYPE_TABLE,
4367- .domain = DOMAIN_USER,
4368+ .domain = DOMAIN_VECTORS,
4369 },
4370 [MT_HIGH_VECTORS] = {
4371 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4372 L_PTE_USER | L_PTE_RDONLY,
4373 .prot_l1 = PMD_TYPE_TABLE,
4374- .domain = DOMAIN_USER,
4375+ .domain = DOMAIN_VECTORS,
4376 },
4377- [MT_MEMORY] = {
4378+ [MT_MEMORY_RWX] = {
4379 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4380 .prot_l1 = PMD_TYPE_TABLE,
4381 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4382 .domain = DOMAIN_KERNEL,
4383 },
4384+ [MT_MEMORY_RW] = {
4385+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4386+ .prot_l1 = PMD_TYPE_TABLE,
4387+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4388+ .domain = DOMAIN_KERNEL,
4389+ },
4390+ [MT_MEMORY_RX] = {
4391+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4392+ .prot_l1 = PMD_TYPE_TABLE,
4393+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4394+ .domain = DOMAIN_KERNEL,
4395+ },
4396 [MT_ROM] = {
4397- .prot_sect = PMD_TYPE_SECT,
4398+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4399 .domain = DOMAIN_KERNEL,
4400 },
4401- [MT_MEMORY_NONCACHED] = {
4402+ [MT_MEMORY_NONCACHED_RW] = {
4403 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4404 L_PTE_MT_BUFFERABLE,
4405 .prot_l1 = PMD_TYPE_TABLE,
4406 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4407 .domain = DOMAIN_KERNEL,
4408 },
4409+ [MT_MEMORY_NONCACHED_RX] = {
4410+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4411+ L_PTE_MT_BUFFERABLE,
4412+ .prot_l1 = PMD_TYPE_TABLE,
4413+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4414+ .domain = DOMAIN_KERNEL,
4415+ },
4416 [MT_MEMORY_DTCM] = {
4417- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4418- L_PTE_XN,
4419+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4420 .prot_l1 = PMD_TYPE_TABLE,
4421- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4422+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4423 .domain = DOMAIN_KERNEL,
4424 },
4425 [MT_MEMORY_ITCM] = {
4426@@ -318,10 +360,10 @@ static struct mem_type mem_types[] = {
4427 },
4428 [MT_MEMORY_SO] = {
4429 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4430- L_PTE_MT_UNCACHED | L_PTE_XN,
4431+ L_PTE_MT_UNCACHED,
4432 .prot_l1 = PMD_TYPE_TABLE,
4433 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4434- PMD_SECT_UNCACHED | PMD_SECT_XN,
4435+ PMD_SECT_UNCACHED,
4436 .domain = DOMAIN_KERNEL,
4437 },
4438 [MT_MEMORY_DMA_READY] = {
4439@@ -407,9 +449,35 @@ static void __init build_mem_type_table(void)
4440 * to prevent speculative instruction fetches.
4441 */
4442 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4443+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4444 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4445+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4446 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4447+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4448 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4449+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4450+
4451+ /* Mark other regions on ARMv6+ as execute-never */
4452+
4453+#ifdef CONFIG_PAX_KERNEXEC
4454+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4455+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4456+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4457+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4458+#ifndef CONFIG_ARM_LPAE
4459+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4460+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4461+#endif
4462+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4463+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4464+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4465+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4466+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4467+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4468+#endif
4469+
4470+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4471+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4472 }
4473 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4474 /*
4475@@ -470,6 +538,9 @@ static void __init build_mem_type_table(void)
4476 * from SVC mode and no access from userspace.
4477 */
4478 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4479+#ifdef CONFIG_PAX_KERNEXEC
4480+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481+#endif
4482 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4483 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4484 #endif
4485@@ -487,11 +558,17 @@ static void __init build_mem_type_table(void)
4486 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4487 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4488 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4489- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4490- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4493+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4494+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4495+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4496+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4497 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4498- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4499- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4500+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4501+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4502+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4503+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4504 }
4505 }
4506
4507@@ -502,15 +579,20 @@ static void __init build_mem_type_table(void)
4508 if (cpu_arch >= CPU_ARCH_ARMv6) {
4509 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4510 /* Non-cacheable Normal is XCB = 001 */
4511- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4512+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4513+ PMD_SECT_BUFFERED;
4514+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4515 PMD_SECT_BUFFERED;
4516 } else {
4517 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4518- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4519+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4520+ PMD_SECT_TEX(1);
4521+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4522 PMD_SECT_TEX(1);
4523 }
4524 } else {
4525- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4526+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4527+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4528 }
4529
4530 #ifdef CONFIG_ARM_LPAE
4531@@ -526,6 +608,8 @@ static void __init build_mem_type_table(void)
4532 vecs_pgprot |= PTE_EXT_AF;
4533 #endif
4534
4535+ user_pgprot |= __supported_pte_mask;
4536+
4537 for (i = 0; i < 16; i++) {
4538 pteval_t v = pgprot_val(protection_map[i]);
4539 protection_map[i] = __pgprot(v | user_pgprot);
4540@@ -543,10 +627,15 @@ static void __init build_mem_type_table(void)
4541
4542 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4543 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4544- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4545- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4546+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4547+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4548+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4549+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4550+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4551+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4552 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4553- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4554+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4555+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4556 mem_types[MT_ROM].prot_sect |= cp->pmd;
4557
4558 switch (cp->pmd) {
4559@@ -1188,18 +1277,15 @@ void __init arm_mm_memblock_reserve(void)
4560 * called function. This means you can't use any function or debugging
4561 * method which may touch any device, otherwise the kernel _will_ crash.
4562 */
4563+
4564+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4565+
4566 static void __init devicemaps_init(const struct machine_desc *mdesc)
4567 {
4568 struct map_desc map;
4569 unsigned long addr;
4570- void *vectors;
4571
4572- /*
4573- * Allocate the vector page early.
4574- */
4575- vectors = early_alloc(PAGE_SIZE * 2);
4576-
4577- early_trap_init(vectors);
4578+ early_trap_init(&vectors);
4579
4580 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4581 pmd_clear(pmd_off_k(addr));
4582@@ -1239,7 +1325,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4583 * location (0xffff0000). If we aren't using high-vectors, also
4584 * create a mapping at the low-vectors virtual address.
4585 */
4586- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4587+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4588 map.virtual = 0xffff0000;
4589 map.length = PAGE_SIZE;
4590 #ifdef CONFIG_KUSER_HELPERS
4591@@ -1311,8 +1397,39 @@ static void __init map_lowmem(void)
4592 map.pfn = __phys_to_pfn(start);
4593 map.virtual = __phys_to_virt(start);
4594 map.length = end - start;
4595- map.type = MT_MEMORY;
4596
4597+#ifdef CONFIG_PAX_KERNEXEC
4598+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4599+ struct map_desc kernel;
4600+ struct map_desc initmap;
4601+
4602+ /* when freeing initmem we will make this RW */
4603+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4604+ initmap.virtual = (unsigned long)__init_begin;
4605+ initmap.length = _sdata - __init_begin;
4606+ initmap.type = MT_MEMORY_RWX;
4607+ create_mapping(&initmap);
4608+
4609+ /* when freeing initmem we will make this RX */
4610+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4611+ kernel.virtual = (unsigned long)_stext;
4612+ kernel.length = __init_begin - _stext;
4613+ kernel.type = MT_MEMORY_RWX;
4614+ create_mapping(&kernel);
4615+
4616+ if (map.virtual < (unsigned long)_stext) {
4617+ map.length = (unsigned long)_stext - map.virtual;
4618+ map.type = MT_MEMORY_RWX;
4619+ create_mapping(&map);
4620+ }
4621+
4622+ map.pfn = __phys_to_pfn(__pa(_sdata));
4623+ map.virtual = (unsigned long)_sdata;
4624+ map.length = end - __pa(_sdata);
4625+ }
4626+#endif
4627+
4628+ map.type = MT_MEMORY_RW;
4629 create_mapping(&map);
4630 }
4631 }
4632diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4633index a5bc92d..0bb4730 100644
4634--- a/arch/arm/plat-omap/sram.c
4635+++ b/arch/arm/plat-omap/sram.c
4636@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4637 * Looks like we need to preserve some bootloader code at the
4638 * beginning of SRAM for jumping to flash for reboot to work...
4639 */
4640+ pax_open_kernel();
4641 memset_io(omap_sram_base + omap_sram_skip, 0,
4642 omap_sram_size - omap_sram_skip);
4643+ pax_close_kernel();
4644 }
4645diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4646index ce6d763..cfea917 100644
4647--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4648+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4649@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4650 int (*started)(unsigned ch);
4651 int (*flush)(unsigned ch);
4652 int (*stop)(unsigned ch);
4653-};
4654+} __no_const;
4655
4656 extern void *samsung_dmadev_get_ops(void);
4657 extern void *s3c_dma_get_ops(void);
4658diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4659index c3a58a1..78fbf54 100644
4660--- a/arch/avr32/include/asm/cache.h
4661+++ b/arch/avr32/include/asm/cache.h
4662@@ -1,8 +1,10 @@
4663 #ifndef __ASM_AVR32_CACHE_H
4664 #define __ASM_AVR32_CACHE_H
4665
4666+#include <linux/const.h>
4667+
4668 #define L1_CACHE_SHIFT 5
4669-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4670+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4671
4672 /*
4673 * Memory returned by kmalloc() may be used for DMA, so we must make
4674diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4675index d232888..87c8df1 100644
4676--- a/arch/avr32/include/asm/elf.h
4677+++ b/arch/avr32/include/asm/elf.h
4678@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4679 the loader. We need to make sure that it is out of the way of the program
4680 that it will "exec", and that there is sufficient room for the brk. */
4681
4682-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4683+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4684
4685+#ifdef CONFIG_PAX_ASLR
4686+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4687+
4688+#define PAX_DELTA_MMAP_LEN 15
4689+#define PAX_DELTA_STACK_LEN 15
4690+#endif
4691
4692 /* This yields a mask that user programs can use to figure out what
4693 instruction set this CPU supports. This could be done in user space,
4694diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4695index 479330b..53717a8 100644
4696--- a/arch/avr32/include/asm/kmap_types.h
4697+++ b/arch/avr32/include/asm/kmap_types.h
4698@@ -2,9 +2,9 @@
4699 #define __ASM_AVR32_KMAP_TYPES_H
4700
4701 #ifdef CONFIG_DEBUG_HIGHMEM
4702-# define KM_TYPE_NR 29
4703+# define KM_TYPE_NR 30
4704 #else
4705-# define KM_TYPE_NR 14
4706+# define KM_TYPE_NR 15
4707 #endif
4708
4709 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4710diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4711index 0eca933..eb78c7b 100644
4712--- a/arch/avr32/mm/fault.c
4713+++ b/arch/avr32/mm/fault.c
4714@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4715
4716 int exception_trace = 1;
4717
4718+#ifdef CONFIG_PAX_PAGEEXEC
4719+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4720+{
4721+ unsigned long i;
4722+
4723+ printk(KERN_ERR "PAX: bytes at PC: ");
4724+ for (i = 0; i < 20; i++) {
4725+ unsigned char c;
4726+ if (get_user(c, (unsigned char *)pc+i))
4727+ printk(KERN_CONT "???????? ");
4728+ else
4729+ printk(KERN_CONT "%02x ", c);
4730+ }
4731+ printk("\n");
4732+}
4733+#endif
4734+
4735 /*
4736 * This routine handles page faults. It determines the address and the
4737 * problem, and then passes it off to one of the appropriate routines.
4738@@ -176,6 +193,16 @@ bad_area:
4739 up_read(&mm->mmap_sem);
4740
4741 if (user_mode(regs)) {
4742+
4743+#ifdef CONFIG_PAX_PAGEEXEC
4744+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4745+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4746+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4747+ do_group_exit(SIGKILL);
4748+ }
4749+ }
4750+#endif
4751+
4752 if (exception_trace && printk_ratelimit())
4753 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4754 "sp %08lx ecr %lu\n",
4755diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4756index 568885a..f8008df 100644
4757--- a/arch/blackfin/include/asm/cache.h
4758+++ b/arch/blackfin/include/asm/cache.h
4759@@ -7,6 +7,7 @@
4760 #ifndef __ARCH_BLACKFIN_CACHE_H
4761 #define __ARCH_BLACKFIN_CACHE_H
4762
4763+#include <linux/const.h>
4764 #include <linux/linkage.h> /* for asmlinkage */
4765
4766 /*
4767@@ -14,7 +15,7 @@
4768 * Blackfin loads 32 bytes for cache
4769 */
4770 #define L1_CACHE_SHIFT 5
4771-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4772+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4773 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4774
4775 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4776diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4777index aea2718..3639a60 100644
4778--- a/arch/cris/include/arch-v10/arch/cache.h
4779+++ b/arch/cris/include/arch-v10/arch/cache.h
4780@@ -1,8 +1,9 @@
4781 #ifndef _ASM_ARCH_CACHE_H
4782 #define _ASM_ARCH_CACHE_H
4783
4784+#include <linux/const.h>
4785 /* Etrax 100LX have 32-byte cache-lines. */
4786-#define L1_CACHE_BYTES 32
4787 #define L1_CACHE_SHIFT 5
4788+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4789
4790 #endif /* _ASM_ARCH_CACHE_H */
4791diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4792index 7caf25d..ee65ac5 100644
4793--- a/arch/cris/include/arch-v32/arch/cache.h
4794+++ b/arch/cris/include/arch-v32/arch/cache.h
4795@@ -1,11 +1,12 @@
4796 #ifndef _ASM_CRIS_ARCH_CACHE_H
4797 #define _ASM_CRIS_ARCH_CACHE_H
4798
4799+#include <linux/const.h>
4800 #include <arch/hwregs/dma.h>
4801
4802 /* A cache-line is 32 bytes. */
4803-#define L1_CACHE_BYTES 32
4804 #define L1_CACHE_SHIFT 5
4805+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4806
4807 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4808
4809diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4810index b86329d..6709906 100644
4811--- a/arch/frv/include/asm/atomic.h
4812+++ b/arch/frv/include/asm/atomic.h
4813@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4814 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4815 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4816
4817+#define atomic64_read_unchecked(v) atomic64_read(v)
4818+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4819+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4820+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4821+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4822+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4823+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4824+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4825+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4826+
4827 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4828 {
4829 int c, old;
4830diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4831index 2797163..c2a401d 100644
4832--- a/arch/frv/include/asm/cache.h
4833+++ b/arch/frv/include/asm/cache.h
4834@@ -12,10 +12,11 @@
4835 #ifndef __ASM_CACHE_H
4836 #define __ASM_CACHE_H
4837
4838+#include <linux/const.h>
4839
4840 /* bytes per L1 cache line */
4841 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4842-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4843+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4844
4845 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4846 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4847diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4848index 43901f2..0d8b865 100644
4849--- a/arch/frv/include/asm/kmap_types.h
4850+++ b/arch/frv/include/asm/kmap_types.h
4851@@ -2,6 +2,6 @@
4852 #ifndef _ASM_KMAP_TYPES_H
4853 #define _ASM_KMAP_TYPES_H
4854
4855-#define KM_TYPE_NR 17
4856+#define KM_TYPE_NR 18
4857
4858 #endif
4859diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4860index 836f147..4cf23f5 100644
4861--- a/arch/frv/mm/elf-fdpic.c
4862+++ b/arch/frv/mm/elf-fdpic.c
4863@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4864 {
4865 struct vm_area_struct *vma;
4866 struct vm_unmapped_area_info info;
4867+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4868
4869 if (len > TASK_SIZE)
4870 return -ENOMEM;
4871@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4872 if (addr) {
4873 addr = PAGE_ALIGN(addr);
4874 vma = find_vma(current->mm, addr);
4875- if (TASK_SIZE - len >= addr &&
4876- (!vma || addr + len <= vma->vm_start))
4877+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4878 goto success;
4879 }
4880
4881@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4882 info.high_limit = (current->mm->start_stack - 0x00200000);
4883 info.align_mask = 0;
4884 info.align_offset = 0;
4885+ info.threadstack_offset = offset;
4886 addr = vm_unmapped_area(&info);
4887 if (!(addr & ~PAGE_MASK))
4888 goto success;
4889diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4890index f4ca594..adc72fd6 100644
4891--- a/arch/hexagon/include/asm/cache.h
4892+++ b/arch/hexagon/include/asm/cache.h
4893@@ -21,9 +21,11 @@
4894 #ifndef __ASM_CACHE_H
4895 #define __ASM_CACHE_H
4896
4897+#include <linux/const.h>
4898+
4899 /* Bytes per L1 cache line */
4900-#define L1_CACHE_SHIFT (5)
4901-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4902+#define L1_CACHE_SHIFT 5
4903+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4904
4905 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4906 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4907diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
4908index 4e4119b..dd7de0a 100644
4909--- a/arch/ia64/Kconfig
4910+++ b/arch/ia64/Kconfig
4911@@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
4912 config KEXEC
4913 bool "kexec system call"
4914 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
4915+ depends on !GRKERNSEC_KMEM
4916 help
4917 kexec is a system call that implements the ability to shutdown your
4918 current kernel, and to start another kernel. It is like a reboot
4919diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4920index 6e6fe18..a6ae668 100644
4921--- a/arch/ia64/include/asm/atomic.h
4922+++ b/arch/ia64/include/asm/atomic.h
4923@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4924 #define atomic64_inc(v) atomic64_add(1, (v))
4925 #define atomic64_dec(v) atomic64_sub(1, (v))
4926
4927+#define atomic64_read_unchecked(v) atomic64_read(v)
4928+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4929+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4930+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4931+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4932+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4933+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4934+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4935+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4936+
4937 /* Atomic operations are already serializing */
4938 #define smp_mb__before_atomic_dec() barrier()
4939 #define smp_mb__after_atomic_dec() barrier()
4940diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4941index 988254a..e1ee885 100644
4942--- a/arch/ia64/include/asm/cache.h
4943+++ b/arch/ia64/include/asm/cache.h
4944@@ -1,6 +1,7 @@
4945 #ifndef _ASM_IA64_CACHE_H
4946 #define _ASM_IA64_CACHE_H
4947
4948+#include <linux/const.h>
4949
4950 /*
4951 * Copyright (C) 1998-2000 Hewlett-Packard Co
4952@@ -9,7 +10,7 @@
4953
4954 /* Bytes per L1 (data) cache line. */
4955 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4956-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4957+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4958
4959 #ifdef CONFIG_SMP
4960 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4961diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4962index 5a83c5c..4d7f553 100644
4963--- a/arch/ia64/include/asm/elf.h
4964+++ b/arch/ia64/include/asm/elf.h
4965@@ -42,6 +42,13 @@
4966 */
4967 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4968
4969+#ifdef CONFIG_PAX_ASLR
4970+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4971+
4972+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4973+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4974+#endif
4975+
4976 #define PT_IA_64_UNWIND 0x70000001
4977
4978 /* IA-64 relocations: */
4979diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4980index 5767cdf..7462574 100644
4981--- a/arch/ia64/include/asm/pgalloc.h
4982+++ b/arch/ia64/include/asm/pgalloc.h
4983@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4984 pgd_val(*pgd_entry) = __pa(pud);
4985 }
4986
4987+static inline void
4988+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4989+{
4990+ pgd_populate(mm, pgd_entry, pud);
4991+}
4992+
4993 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4994 {
4995 return quicklist_alloc(0, GFP_KERNEL, NULL);
4996@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4997 pud_val(*pud_entry) = __pa(pmd);
4998 }
4999
5000+static inline void
5001+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5002+{
5003+ pud_populate(mm, pud_entry, pmd);
5004+}
5005+
5006 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5007 {
5008 return quicklist_alloc(0, GFP_KERNEL, NULL);
5009diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5010index 7935115..c0eca6a 100644
5011--- a/arch/ia64/include/asm/pgtable.h
5012+++ b/arch/ia64/include/asm/pgtable.h
5013@@ -12,7 +12,7 @@
5014 * David Mosberger-Tang <davidm@hpl.hp.com>
5015 */
5016
5017-
5018+#include <linux/const.h>
5019 #include <asm/mman.h>
5020 #include <asm/page.h>
5021 #include <asm/processor.h>
5022@@ -142,6 +142,17 @@
5023 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5024 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5025 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5029+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5030+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5031+#else
5032+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5033+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5034+# define PAGE_COPY_NOEXEC PAGE_COPY
5035+#endif
5036+
5037 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5038 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5039 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5040diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5041index 45698cd..e8e2dbc 100644
5042--- a/arch/ia64/include/asm/spinlock.h
5043+++ b/arch/ia64/include/asm/spinlock.h
5044@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5045 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5046
5047 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5048- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5049+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5050 }
5051
5052 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5053diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5054index 449c8c0..18965fb 100644
5055--- a/arch/ia64/include/asm/uaccess.h
5056+++ b/arch/ia64/include/asm/uaccess.h
5057@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5058 static inline unsigned long
5059 __copy_to_user (void __user *to, const void *from, unsigned long count)
5060 {
5061+ if (count > INT_MAX)
5062+ return count;
5063+
5064+ if (!__builtin_constant_p(count))
5065+ check_object_size(from, count, true);
5066+
5067 return __copy_user(to, (__force void __user *) from, count);
5068 }
5069
5070 static inline unsigned long
5071 __copy_from_user (void *to, const void __user *from, unsigned long count)
5072 {
5073+ if (count > INT_MAX)
5074+ return count;
5075+
5076+ if (!__builtin_constant_p(count))
5077+ check_object_size(to, count, false);
5078+
5079 return __copy_user((__force void __user *) to, from, count);
5080 }
5081
5082@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5083 ({ \
5084 void __user *__cu_to = (to); \
5085 const void *__cu_from = (from); \
5086- long __cu_len = (n); \
5087+ unsigned long __cu_len = (n); \
5088 \
5089- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5090+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5091+ if (!__builtin_constant_p(n)) \
5092+ check_object_size(__cu_from, __cu_len, true); \
5093 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5094+ } \
5095 __cu_len; \
5096 })
5097
5098@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5099 ({ \
5100 void *__cu_to = (to); \
5101 const void __user *__cu_from = (from); \
5102- long __cu_len = (n); \
5103+ unsigned long __cu_len = (n); \
5104 \
5105 __chk_user_ptr(__cu_from); \
5106- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5107+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5108+ if (!__builtin_constant_p(n)) \
5109+ check_object_size(__cu_to, __cu_len, false); \
5110 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5111+ } \
5112 __cu_len; \
5113 })
5114
5115diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5116index 24603be..948052d 100644
5117--- a/arch/ia64/kernel/module.c
5118+++ b/arch/ia64/kernel/module.c
5119@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5120 void
5121 module_free (struct module *mod, void *module_region)
5122 {
5123- if (mod && mod->arch.init_unw_table &&
5124- module_region == mod->module_init) {
5125+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5126 unw_remove_unwind_table(mod->arch.init_unw_table);
5127 mod->arch.init_unw_table = NULL;
5128 }
5129@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5130 }
5131
5132 static inline int
5133+in_init_rx (const struct module *mod, uint64_t addr)
5134+{
5135+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5136+}
5137+
5138+static inline int
5139+in_init_rw (const struct module *mod, uint64_t addr)
5140+{
5141+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5142+}
5143+
5144+static inline int
5145 in_init (const struct module *mod, uint64_t addr)
5146 {
5147- return addr - (uint64_t) mod->module_init < mod->init_size;
5148+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5149+}
5150+
5151+static inline int
5152+in_core_rx (const struct module *mod, uint64_t addr)
5153+{
5154+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5155+}
5156+
5157+static inline int
5158+in_core_rw (const struct module *mod, uint64_t addr)
5159+{
5160+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5161 }
5162
5163 static inline int
5164 in_core (const struct module *mod, uint64_t addr)
5165 {
5166- return addr - (uint64_t) mod->module_core < mod->core_size;
5167+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5168 }
5169
5170 static inline int
5171@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5172 break;
5173
5174 case RV_BDREL:
5175- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5176+ if (in_init_rx(mod, val))
5177+ val -= (uint64_t) mod->module_init_rx;
5178+ else if (in_init_rw(mod, val))
5179+ val -= (uint64_t) mod->module_init_rw;
5180+ else if (in_core_rx(mod, val))
5181+ val -= (uint64_t) mod->module_core_rx;
5182+ else if (in_core_rw(mod, val))
5183+ val -= (uint64_t) mod->module_core_rw;
5184 break;
5185
5186 case RV_LTV:
5187@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5188 * addresses have been selected...
5189 */
5190 uint64_t gp;
5191- if (mod->core_size > MAX_LTOFF)
5192+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5193 /*
5194 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5195 * at the end of the module.
5196 */
5197- gp = mod->core_size - MAX_LTOFF / 2;
5198+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5199 else
5200- gp = mod->core_size / 2;
5201- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5202+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5203+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5204 mod->arch.gp = gp;
5205 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5206 }
5207diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5208index ab33328..f39506c 100644
5209--- a/arch/ia64/kernel/palinfo.c
5210+++ b/arch/ia64/kernel/palinfo.c
5211@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5212 return NOTIFY_OK;
5213 }
5214
5215-static struct notifier_block __refdata palinfo_cpu_notifier =
5216+static struct notifier_block palinfo_cpu_notifier =
5217 {
5218 .notifier_call = palinfo_cpu_callback,
5219 .priority = 0,
5220diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5221index 41e33f8..65180b2a 100644
5222--- a/arch/ia64/kernel/sys_ia64.c
5223+++ b/arch/ia64/kernel/sys_ia64.c
5224@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5225 unsigned long align_mask = 0;
5226 struct mm_struct *mm = current->mm;
5227 struct vm_unmapped_area_info info;
5228+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5229
5230 if (len > RGN_MAP_LIMIT)
5231 return -ENOMEM;
5232@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5233 if (REGION_NUMBER(addr) == RGN_HPAGE)
5234 addr = 0;
5235 #endif
5236+
5237+#ifdef CONFIG_PAX_RANDMMAP
5238+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5239+ addr = mm->free_area_cache;
5240+ else
5241+#endif
5242+
5243 if (!addr)
5244 addr = TASK_UNMAPPED_BASE;
5245
5246@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5247 info.high_limit = TASK_SIZE;
5248 info.align_mask = align_mask;
5249 info.align_offset = 0;
5250+ info.threadstack_offset = offset;
5251 return vm_unmapped_area(&info);
5252 }
5253
5254diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5255index 0ccb28f..8992469 100644
5256--- a/arch/ia64/kernel/vmlinux.lds.S
5257+++ b/arch/ia64/kernel/vmlinux.lds.S
5258@@ -198,7 +198,7 @@ SECTIONS {
5259 /* Per-cpu data: */
5260 . = ALIGN(PERCPU_PAGE_SIZE);
5261 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5262- __phys_per_cpu_start = __per_cpu_load;
5263+ __phys_per_cpu_start = per_cpu_load;
5264 /*
5265 * ensure percpu data fits
5266 * into percpu page size
5267diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5268index 7225dad..2a7c8256 100644
5269--- a/arch/ia64/mm/fault.c
5270+++ b/arch/ia64/mm/fault.c
5271@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5272 return pte_present(pte);
5273 }
5274
5275+#ifdef CONFIG_PAX_PAGEEXEC
5276+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5277+{
5278+ unsigned long i;
5279+
5280+ printk(KERN_ERR "PAX: bytes at PC: ");
5281+ for (i = 0; i < 8; i++) {
5282+ unsigned int c;
5283+ if (get_user(c, (unsigned int *)pc+i))
5284+ printk(KERN_CONT "???????? ");
5285+ else
5286+ printk(KERN_CONT "%08x ", c);
5287+ }
5288+ printk("\n");
5289+}
5290+#endif
5291+
5292 # define VM_READ_BIT 0
5293 # define VM_WRITE_BIT 1
5294 # define VM_EXEC_BIT 2
5295@@ -151,8 +168,21 @@ retry:
5296 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5297 goto bad_area;
5298
5299- if ((vma->vm_flags & mask) != mask)
5300+ if ((vma->vm_flags & mask) != mask) {
5301+
5302+#ifdef CONFIG_PAX_PAGEEXEC
5303+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5304+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5305+ goto bad_area;
5306+
5307+ up_read(&mm->mmap_sem);
5308+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5309+ do_group_exit(SIGKILL);
5310+ }
5311+#endif
5312+
5313 goto bad_area;
5314+ }
5315
5316 /*
5317 * If for any reason at all we couldn't handle the fault, make
5318diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5319index 68232db..6ca80af 100644
5320--- a/arch/ia64/mm/hugetlbpage.c
5321+++ b/arch/ia64/mm/hugetlbpage.c
5322@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5323 unsigned long pgoff, unsigned long flags)
5324 {
5325 struct vm_unmapped_area_info info;
5326+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5327
5328 if (len > RGN_MAP_LIMIT)
5329 return -ENOMEM;
5330@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5331 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5332 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5333 info.align_offset = 0;
5334+ info.threadstack_offset = offset;
5335 return vm_unmapped_area(&info);
5336 }
5337
5338diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5339index 88504ab..cbb6c9f 100644
5340--- a/arch/ia64/mm/init.c
5341+++ b/arch/ia64/mm/init.c
5342@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5343 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5344 vma->vm_end = vma->vm_start + PAGE_SIZE;
5345 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5346+
5347+#ifdef CONFIG_PAX_PAGEEXEC
5348+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5349+ vma->vm_flags &= ~VM_EXEC;
5350+
5351+#ifdef CONFIG_PAX_MPROTECT
5352+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5353+ vma->vm_flags &= ~VM_MAYEXEC;
5354+#endif
5355+
5356+ }
5357+#endif
5358+
5359 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5360 down_write(&current->mm->mmap_sem);
5361 if (insert_vm_struct(current->mm, vma)) {
5362diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5363index 40b3ee9..8c2c112 100644
5364--- a/arch/m32r/include/asm/cache.h
5365+++ b/arch/m32r/include/asm/cache.h
5366@@ -1,8 +1,10 @@
5367 #ifndef _ASM_M32R_CACHE_H
5368 #define _ASM_M32R_CACHE_H
5369
5370+#include <linux/const.h>
5371+
5372 /* L1 cache line size */
5373 #define L1_CACHE_SHIFT 4
5374-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5375+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5376
5377 #endif /* _ASM_M32R_CACHE_H */
5378diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5379index 82abd15..d95ae5d 100644
5380--- a/arch/m32r/lib/usercopy.c
5381+++ b/arch/m32r/lib/usercopy.c
5382@@ -14,6 +14,9 @@
5383 unsigned long
5384 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5385 {
5386+ if ((long)n < 0)
5387+ return n;
5388+
5389 prefetch(from);
5390 if (access_ok(VERIFY_WRITE, to, n))
5391 __copy_user(to,from,n);
5392@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5393 unsigned long
5394 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5395 {
5396+ if ((long)n < 0)
5397+ return n;
5398+
5399 prefetchw(to);
5400 if (access_ok(VERIFY_READ, from, n))
5401 __copy_user_zeroing(to,from,n);
5402diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5403index 0395c51..5f26031 100644
5404--- a/arch/m68k/include/asm/cache.h
5405+++ b/arch/m68k/include/asm/cache.h
5406@@ -4,9 +4,11 @@
5407 #ifndef __ARCH_M68K_CACHE_H
5408 #define __ARCH_M68K_CACHE_H
5409
5410+#include <linux/const.h>
5411+
5412 /* bytes per L1 cache line */
5413 #define L1_CACHE_SHIFT 4
5414-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5415+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5416
5417 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5418
5419diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5420index 0424315..defcca9 100644
5421--- a/arch/metag/mm/hugetlbpage.c
5422+++ b/arch/metag/mm/hugetlbpage.c
5423@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5424 info.high_limit = TASK_SIZE;
5425 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5426 info.align_offset = 0;
5427+ info.threadstack_offset = 0;
5428 return vm_unmapped_area(&info);
5429 }
5430
5431diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5432index 4efe96a..60e8699 100644
5433--- a/arch/microblaze/include/asm/cache.h
5434+++ b/arch/microblaze/include/asm/cache.h
5435@@ -13,11 +13,12 @@
5436 #ifndef _ASM_MICROBLAZE_CACHE_H
5437 #define _ASM_MICROBLAZE_CACHE_H
5438
5439+#include <linux/const.h>
5440 #include <asm/registers.h>
5441
5442 #define L1_CACHE_SHIFT 5
5443 /* word-granular cache in microblaze */
5444-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5446
5447 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5448
5449diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5450index 650de39..6982b02 100644
5451--- a/arch/mips/Kconfig
5452+++ b/arch/mips/Kconfig
5453@@ -2268,6 +2268,7 @@ source "kernel/Kconfig.preempt"
5454
5455 config KEXEC
5456 bool "Kexec system call"
5457+ depends on !GRKERNSEC_KMEM
5458 help
5459 kexec is a system call that implements the ability to shutdown your
5460 current kernel, and to start another kernel. It is like a reboot
5461diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5462index 7eed2f2..c4e385d 100644
5463--- a/arch/mips/include/asm/atomic.h
5464+++ b/arch/mips/include/asm/atomic.h
5465@@ -21,15 +21,39 @@
5466 #include <asm/cmpxchg.h>
5467 #include <asm/war.h>
5468
5469+#ifdef CONFIG_GENERIC_ATOMIC64
5470+#include <asm-generic/atomic64.h>
5471+#endif
5472+
5473 #define ATOMIC_INIT(i) { (i) }
5474
5475+#ifdef CONFIG_64BIT
5476+#define _ASM_EXTABLE(from, to) \
5477+" .section __ex_table,\"a\"\n" \
5478+" .dword " #from ", " #to"\n" \
5479+" .previous\n"
5480+#else
5481+#define _ASM_EXTABLE(from, to) \
5482+" .section __ex_table,\"a\"\n" \
5483+" .word " #from ", " #to"\n" \
5484+" .previous\n"
5485+#endif
5486+
5487 /*
5488 * atomic_read - read atomic variable
5489 * @v: pointer of type atomic_t
5490 *
5491 * Atomically reads the value of @v.
5492 */
5493-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5494+static inline int atomic_read(const atomic_t *v)
5495+{
5496+ return (*(volatile const int *) &v->counter);
5497+}
5498+
5499+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5500+{
5501+ return (*(volatile const int *) &v->counter);
5502+}
5503
5504 /*
5505 * atomic_set - set atomic variable
5506@@ -38,7 +62,15 @@
5507 *
5508 * Atomically sets the value of @v to @i.
5509 */
5510-#define atomic_set(v, i) ((v)->counter = (i))
5511+static inline void atomic_set(atomic_t *v, int i)
5512+{
5513+ v->counter = i;
5514+}
5515+
5516+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5517+{
5518+ v->counter = i;
5519+}
5520
5521 /*
5522 * atomic_add - add integer to atomic variable
5523@@ -47,7 +79,67 @@
5524 *
5525 * Atomically adds @i to @v.
5526 */
5527-static __inline__ void atomic_add(int i, atomic_t * v)
5528+static __inline__ void atomic_add(int i, atomic_t *v)
5529+{
5530+ int temp;
5531+
5532+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5533+ __asm__ __volatile__(
5534+ " .set mips3 \n"
5535+ "1: ll %0, %1 # atomic_add \n"
5536+#ifdef CONFIG_PAX_REFCOUNT
5537+ /* Exception on overflow. */
5538+ "2: add %0, %2 \n"
5539+#else
5540+ " addu %0, %2 \n"
5541+#endif
5542+ " sc %0, %1 \n"
5543+ " beqzl %0, 1b \n"
5544+#ifdef CONFIG_PAX_REFCOUNT
5545+ "3: \n"
5546+ _ASM_EXTABLE(2b, 3b)
5547+#endif
5548+ " .set mips0 \n"
5549+ : "=&r" (temp), "+m" (v->counter)
5550+ : "Ir" (i));
5551+ } else if (kernel_uses_llsc) {
5552+ __asm__ __volatile__(
5553+ " .set mips3 \n"
5554+ "1: ll %0, %1 # atomic_add \n"
5555+#ifdef CONFIG_PAX_REFCOUNT
5556+ /* Exception on overflow. */
5557+ "2: add %0, %2 \n"
5558+#else
5559+ " addu %0, %2 \n"
5560+#endif
5561+ " sc %0, %1 \n"
5562+ " beqz %0, 1b \n"
5563+#ifdef CONFIG_PAX_REFCOUNT
5564+ "3: \n"
5565+ _ASM_EXTABLE(2b, 3b)
5566+#endif
5567+ " .set mips0 \n"
5568+ : "=&r" (temp), "+m" (v->counter)
5569+ : "Ir" (i));
5570+ } else {
5571+ unsigned long flags;
5572+
5573+ raw_local_irq_save(flags);
5574+ __asm__ __volatile__(
5575+#ifdef CONFIG_PAX_REFCOUNT
5576+ /* Exception on overflow. */
5577+ "1: add %0, %1 \n"
5578+ "2: \n"
5579+ _ASM_EXTABLE(1b, 2b)
5580+#else
5581+ " addu %0, %1 \n"
5582+#endif
5583+ : "+r" (v->counter) : "Ir" (i));
5584+ raw_local_irq_restore(flags);
5585+ }
5586+}
5587+
5588+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5589 {
5590 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5591 int temp;
5592@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5593 *
5594 * Atomically subtracts @i from @v.
5595 */
5596-static __inline__ void atomic_sub(int i, atomic_t * v)
5597+static __inline__ void atomic_sub(int i, atomic_t *v)
5598+{
5599+ int temp;
5600+
5601+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5602+ __asm__ __volatile__(
5603+ " .set mips3 \n"
5604+ "1: ll %0, %1 # atomic64_sub \n"
5605+#ifdef CONFIG_PAX_REFCOUNT
5606+ /* Exception on overflow. */
5607+ "2: sub %0, %2 \n"
5608+#else
5609+ " subu %0, %2 \n"
5610+#endif
5611+ " sc %0, %1 \n"
5612+ " beqzl %0, 1b \n"
5613+#ifdef CONFIG_PAX_REFCOUNT
5614+ "3: \n"
5615+ _ASM_EXTABLE(2b, 3b)
5616+#endif
5617+ " .set mips0 \n"
5618+ : "=&r" (temp), "+m" (v->counter)
5619+ : "Ir" (i));
5620+ } else if (kernel_uses_llsc) {
5621+ __asm__ __volatile__(
5622+ " .set mips3 \n"
5623+ "1: ll %0, %1 # atomic64_sub \n"
5624+#ifdef CONFIG_PAX_REFCOUNT
5625+ /* Exception on overflow. */
5626+ "2: sub %0, %2 \n"
5627+#else
5628+ " subu %0, %2 \n"
5629+#endif
5630+ " sc %0, %1 \n"
5631+ " beqz %0, 1b \n"
5632+#ifdef CONFIG_PAX_REFCOUNT
5633+ "3: \n"
5634+ _ASM_EXTABLE(2b, 3b)
5635+#endif
5636+ " .set mips0 \n"
5637+ : "=&r" (temp), "+m" (v->counter)
5638+ : "Ir" (i));
5639+ } else {
5640+ unsigned long flags;
5641+
5642+ raw_local_irq_save(flags);
5643+ __asm__ __volatile__(
5644+#ifdef CONFIG_PAX_REFCOUNT
5645+ /* Exception on overflow. */
5646+ "1: sub %0, %1 \n"
5647+ "2: \n"
5648+ _ASM_EXTABLE(1b, 2b)
5649+#else
5650+ " subu %0, %1 \n"
5651+#endif
5652+ : "+r" (v->counter) : "Ir" (i));
5653+ raw_local_irq_restore(flags);
5654+ }
5655+}
5656+
5657+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5658 {
5659 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5660 int temp;
5661@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5662 /*
5663 * Same as above, but return the result value
5664 */
5665-static __inline__ int atomic_add_return(int i, atomic_t * v)
5666+static __inline__ int atomic_add_return(int i, atomic_t *v)
5667+{
5668+ int result;
5669+ int temp;
5670+
5671+ smp_mb__before_llsc();
5672+
5673+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5674+ __asm__ __volatile__(
5675+ " .set mips3 \n"
5676+ "1: ll %1, %2 # atomic_add_return \n"
5677+#ifdef CONFIG_PAX_REFCOUNT
5678+ "2: add %0, %1, %3 \n"
5679+#else
5680+ " addu %0, %1, %3 \n"
5681+#endif
5682+ " sc %0, %2 \n"
5683+ " beqzl %0, 1b \n"
5684+#ifdef CONFIG_PAX_REFCOUNT
5685+ " b 4f \n"
5686+ " .set noreorder \n"
5687+ "3: b 5f \n"
5688+ " move %0, %1 \n"
5689+ " .set reorder \n"
5690+ _ASM_EXTABLE(2b, 3b)
5691+#endif
5692+ "4: addu %0, %1, %3 \n"
5693+#ifdef CONFIG_PAX_REFCOUNT
5694+ "5: \n"
5695+#endif
5696+ " .set mips0 \n"
5697+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5698+ : "Ir" (i));
5699+ } else if (kernel_uses_llsc) {
5700+ __asm__ __volatile__(
5701+ " .set mips3 \n"
5702+ "1: ll %1, %2 # atomic_add_return \n"
5703+#ifdef CONFIG_PAX_REFCOUNT
5704+ "2: add %0, %1, %3 \n"
5705+#else
5706+ " addu %0, %1, %3 \n"
5707+#endif
5708+ " sc %0, %2 \n"
5709+ " bnez %0, 4f \n"
5710+ " b 1b \n"
5711+#ifdef CONFIG_PAX_REFCOUNT
5712+ " .set noreorder \n"
5713+ "3: b 5f \n"
5714+ " move %0, %1 \n"
5715+ " .set reorder \n"
5716+ _ASM_EXTABLE(2b, 3b)
5717+#endif
5718+ "4: addu %0, %1, %3 \n"
5719+#ifdef CONFIG_PAX_REFCOUNT
5720+ "5: \n"
5721+#endif
5722+ " .set mips0 \n"
5723+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5724+ : "Ir" (i));
5725+ } else {
5726+ unsigned long flags;
5727+
5728+ raw_local_irq_save(flags);
5729+ __asm__ __volatile__(
5730+ " lw %0, %1 \n"
5731+#ifdef CONFIG_PAX_REFCOUNT
5732+ /* Exception on overflow. */
5733+ "1: add %0, %2 \n"
5734+#else
5735+ " addu %0, %2 \n"
5736+#endif
5737+ " sw %0, %1 \n"
5738+#ifdef CONFIG_PAX_REFCOUNT
5739+ /* Note: Dest reg is not modified on overflow */
5740+ "2: \n"
5741+ _ASM_EXTABLE(1b, 2b)
5742+#endif
5743+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5744+ raw_local_irq_restore(flags);
5745+ }
5746+
5747+ smp_llsc_mb();
5748+
5749+ return result;
5750+}
5751+
5752+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5753 {
5754 int result;
5755
5756@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5757 return result;
5758 }
5759
5760-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5761+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5762+{
5763+ int result;
5764+ int temp;
5765+
5766+ smp_mb__before_llsc();
5767+
5768+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5769+ __asm__ __volatile__(
5770+ " .set mips3 \n"
5771+ "1: ll %1, %2 # atomic_sub_return \n"
5772+#ifdef CONFIG_PAX_REFCOUNT
5773+ "2: sub %0, %1, %3 \n"
5774+#else
5775+ " subu %0, %1, %3 \n"
5776+#endif
5777+ " sc %0, %2 \n"
5778+ " beqzl %0, 1b \n"
5779+#ifdef CONFIG_PAX_REFCOUNT
5780+ " b 4f \n"
5781+ " .set noreorder \n"
5782+ "3: b 5f \n"
5783+ " move %0, %1 \n"
5784+ " .set reorder \n"
5785+ _ASM_EXTABLE(2b, 3b)
5786+#endif
5787+ "4: subu %0, %1, %3 \n"
5788+#ifdef CONFIG_PAX_REFCOUNT
5789+ "5: \n"
5790+#endif
5791+ " .set mips0 \n"
5792+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5793+ : "Ir" (i), "m" (v->counter)
5794+ : "memory");
5795+ } else if (kernel_uses_llsc) {
5796+ __asm__ __volatile__(
5797+ " .set mips3 \n"
5798+ "1: ll %1, %2 # atomic_sub_return \n"
5799+#ifdef CONFIG_PAX_REFCOUNT
5800+ "2: sub %0, %1, %3 \n"
5801+#else
5802+ " subu %0, %1, %3 \n"
5803+#endif
5804+ " sc %0, %2 \n"
5805+ " bnez %0, 4f \n"
5806+ " b 1b \n"
5807+#ifdef CONFIG_PAX_REFCOUNT
5808+ " .set noreorder \n"
5809+ "3: b 5f \n"
5810+ " move %0, %1 \n"
5811+ " .set reorder \n"
5812+ _ASM_EXTABLE(2b, 3b)
5813+#endif
5814+ "4: subu %0, %1, %3 \n"
5815+#ifdef CONFIG_PAX_REFCOUNT
5816+ "5: \n"
5817+#endif
5818+ " .set mips0 \n"
5819+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5820+ : "Ir" (i));
5821+ } else {
5822+ unsigned long flags;
5823+
5824+ raw_local_irq_save(flags);
5825+ __asm__ __volatile__(
5826+ " lw %0, %1 \n"
5827+#ifdef CONFIG_PAX_REFCOUNT
5828+ /* Exception on overflow. */
5829+ "1: sub %0, %2 \n"
5830+#else
5831+ " subu %0, %2 \n"
5832+#endif
5833+ " sw %0, %1 \n"
5834+#ifdef CONFIG_PAX_REFCOUNT
5835+ /* Note: Dest reg is not modified on overflow */
5836+ "2: \n"
5837+ _ASM_EXTABLE(1b, 2b)
5838+#endif
5839+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5840+ raw_local_irq_restore(flags);
5841+ }
5842+
5843+ smp_llsc_mb();
5844+
5845+ return result;
5846+}
5847+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5848 {
5849 int result;
5850
5851@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5852 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5853 * The function returns the old value of @v minus @i.
5854 */
5855-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5856+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5857 {
5858 int result;
5859
5860@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5861 return result;
5862 }
5863
5864-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5865-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5866+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5867+{
5868+ return cmpxchg(&v->counter, old, new);
5869+}
5870+
5871+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5872+ int new)
5873+{
5874+ return cmpxchg(&(v->counter), old, new);
5875+}
5876+
5877+static inline int atomic_xchg(atomic_t *v, int new)
5878+{
5879+ return xchg(&v->counter, new);
5880+}
5881+
5882+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5883+{
5884+ return xchg(&(v->counter), new);
5885+}
5886
5887 /**
5888 * __atomic_add_unless - add unless the number is a given value
5889@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5890
5891 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5892 #define atomic_inc_return(v) atomic_add_return(1, (v))
5893+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5894+{
5895+ return atomic_add_return_unchecked(1, v);
5896+}
5897
5898 /*
5899 * atomic_sub_and_test - subtract value from variable and test result
5900@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5901 * other cases.
5902 */
5903 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5904+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5905+{
5906+ return atomic_add_return_unchecked(1, v) == 0;
5907+}
5908
5909 /*
5910 * atomic_dec_and_test - decrement by 1 and test
5911@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5912 * Atomically increments @v by 1.
5913 */
5914 #define atomic_inc(v) atomic_add(1, (v))
5915+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5916+{
5917+ atomic_add_unchecked(1, v);
5918+}
5919
5920 /*
5921 * atomic_dec - decrement and test
5922@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5923 * Atomically decrements @v by 1.
5924 */
5925 #define atomic_dec(v) atomic_sub(1, (v))
5926+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5927+{
5928+ atomic_sub_unchecked(1, v);
5929+}
5930
5931 /*
5932 * atomic_add_negative - add and test if negative
5933@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5934 * @v: pointer of type atomic64_t
5935 *
5936 */
5937-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5938+static inline long atomic64_read(const atomic64_t *v)
5939+{
5940+ return (*(volatile const long *) &v->counter);
5941+}
5942+
5943+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5944+{
5945+ return (*(volatile const long *) &v->counter);
5946+}
5947
5948 /*
5949 * atomic64_set - set atomic variable
5950 * @v: pointer of type atomic64_t
5951 * @i: required value
5952 */
5953-#define atomic64_set(v, i) ((v)->counter = (i))
5954+static inline void atomic64_set(atomic64_t *v, long i)
5955+{
5956+ v->counter = i;
5957+}
5958+
5959+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5960+{
5961+ v->counter = i;
5962+}
5963
5964 /*
5965 * atomic64_add - add integer to atomic variable
5966@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5967 *
5968 * Atomically adds @i to @v.
5969 */
5970-static __inline__ void atomic64_add(long i, atomic64_t * v)
5971+static __inline__ void atomic64_add(long i, atomic64_t *v)
5972+{
5973+ long temp;
5974+
5975+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5976+ __asm__ __volatile__(
5977+ " .set mips3 \n"
5978+ "1: lld %0, %1 # atomic64_add \n"
5979+#ifdef CONFIG_PAX_REFCOUNT
5980+ /* Exception on overflow. */
5981+ "2: dadd %0, %2 \n"
5982+#else
5983+ " daddu %0, %2 \n"
5984+#endif
5985+ " scd %0, %1 \n"
5986+ " beqzl %0, 1b \n"
5987+#ifdef CONFIG_PAX_REFCOUNT
5988+ "3: \n"
5989+ _ASM_EXTABLE(2b, 3b)
5990+#endif
5991+ " .set mips0 \n"
5992+ : "=&r" (temp), "+m" (v->counter)
5993+ : "Ir" (i));
5994+ } else if (kernel_uses_llsc) {
5995+ __asm__ __volatile__(
5996+ " .set mips3 \n"
5997+ "1: lld %0, %1 # atomic64_add \n"
5998+#ifdef CONFIG_PAX_REFCOUNT
5999+ /* Exception on overflow. */
6000+ "2: dadd %0, %2 \n"
6001+#else
6002+ " daddu %0, %2 \n"
6003+#endif
6004+ " scd %0, %1 \n"
6005+ " beqz %0, 1b \n"
6006+#ifdef CONFIG_PAX_REFCOUNT
6007+ "3: \n"
6008+ _ASM_EXTABLE(2b, 3b)
6009+#endif
6010+ " .set mips0 \n"
6011+ : "=&r" (temp), "+m" (v->counter)
6012+ : "Ir" (i));
6013+ } else {
6014+ unsigned long flags;
6015+
6016+ raw_local_irq_save(flags);
6017+ __asm__ __volatile__(
6018+#ifdef CONFIG_PAX_REFCOUNT
6019+ /* Exception on overflow. */
6020+ "1: dadd %0, %1 \n"
6021+ "2: \n"
6022+ _ASM_EXTABLE(1b, 2b)
6023+#else
6024+ " daddu %0, %1 \n"
6025+#endif
6026+ : "+r" (v->counter) : "Ir" (i));
6027+ raw_local_irq_restore(flags);
6028+ }
6029+}
6030+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6031 {
6032 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6033 long temp;
6034@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6035 *
6036 * Atomically subtracts @i from @v.
6037 */
6038-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6039+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6040+{
6041+ long temp;
6042+
6043+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6044+ __asm__ __volatile__(
6045+ " .set mips3 \n"
6046+ "1: lld %0, %1 # atomic64_sub \n"
6047+#ifdef CONFIG_PAX_REFCOUNT
6048+ /* Exception on overflow. */
6049+ "2: dsub %0, %2 \n"
6050+#else
6051+ " dsubu %0, %2 \n"
6052+#endif
6053+ " scd %0, %1 \n"
6054+ " beqzl %0, 1b \n"
6055+#ifdef CONFIG_PAX_REFCOUNT
6056+ "3: \n"
6057+ _ASM_EXTABLE(2b, 3b)
6058+#endif
6059+ " .set mips0 \n"
6060+ : "=&r" (temp), "+m" (v->counter)
6061+ : "Ir" (i));
6062+ } else if (kernel_uses_llsc) {
6063+ __asm__ __volatile__(
6064+ " .set mips3 \n"
6065+ "1: lld %0, %1 # atomic64_sub \n"
6066+#ifdef CONFIG_PAX_REFCOUNT
6067+ /* Exception on overflow. */
6068+ "2: dsub %0, %2 \n"
6069+#else
6070+ " dsubu %0, %2 \n"
6071+#endif
6072+ " scd %0, %1 \n"
6073+ " beqz %0, 1b \n"
6074+#ifdef CONFIG_PAX_REFCOUNT
6075+ "3: \n"
6076+ _ASM_EXTABLE(2b, 3b)
6077+#endif
6078+ " .set mips0 \n"
6079+ : "=&r" (temp), "+m" (v->counter)
6080+ : "Ir" (i));
6081+ } else {
6082+ unsigned long flags;
6083+
6084+ raw_local_irq_save(flags);
6085+ __asm__ __volatile__(
6086+#ifdef CONFIG_PAX_REFCOUNT
6087+ /* Exception on overflow. */
6088+ "1: dsub %0, %1 \n"
6089+ "2: \n"
6090+ _ASM_EXTABLE(1b, 2b)
6091+#else
6092+ " dsubu %0, %1 \n"
6093+#endif
6094+ : "+r" (v->counter) : "Ir" (i));
6095+ raw_local_irq_restore(flags);
6096+ }
6097+}
6098+
6099+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6100 {
6101 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6102 long temp;
6103@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6104 /*
6105 * Same as above, but return the result value
6106 */
6107-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6108+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6109+{
6110+ long result;
6111+ long temp;
6112+
6113+ smp_mb__before_llsc();
6114+
6115+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6116+ __asm__ __volatile__(
6117+ " .set mips3 \n"
6118+ "1: lld %1, %2 # atomic64_add_return \n"
6119+#ifdef CONFIG_PAX_REFCOUNT
6120+ "2: dadd %0, %1, %3 \n"
6121+#else
6122+ " daddu %0, %1, %3 \n"
6123+#endif
6124+ " scd %0, %2 \n"
6125+ " beqzl %0, 1b \n"
6126+#ifdef CONFIG_PAX_REFCOUNT
6127+ " b 4f \n"
6128+ " .set noreorder \n"
6129+ "3: b 5f \n"
6130+ " move %0, %1 \n"
6131+ " .set reorder \n"
6132+ _ASM_EXTABLE(2b, 3b)
6133+#endif
6134+ "4: daddu %0, %1, %3 \n"
6135+#ifdef CONFIG_PAX_REFCOUNT
6136+ "5: \n"
6137+#endif
6138+ " .set mips0 \n"
6139+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6140+ : "Ir" (i));
6141+ } else if (kernel_uses_llsc) {
6142+ __asm__ __volatile__(
6143+ " .set mips3 \n"
6144+ "1: lld %1, %2 # atomic64_add_return \n"
6145+#ifdef CONFIG_PAX_REFCOUNT
6146+ "2: dadd %0, %1, %3 \n"
6147+#else
6148+ " daddu %0, %1, %3 \n"
6149+#endif
6150+ " scd %0, %2 \n"
6151+ " bnez %0, 4f \n"
6152+ " b 1b \n"
6153+#ifdef CONFIG_PAX_REFCOUNT
6154+ " .set noreorder \n"
6155+ "3: b 5f \n"
6156+ " move %0, %1 \n"
6157+ " .set reorder \n"
6158+ _ASM_EXTABLE(2b, 3b)
6159+#endif
6160+ "4: daddu %0, %1, %3 \n"
6161+#ifdef CONFIG_PAX_REFCOUNT
6162+ "5: \n"
6163+#endif
6164+ " .set mips0 \n"
6165+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6166+ : "Ir" (i), "m" (v->counter)
6167+ : "memory");
6168+ } else {
6169+ unsigned long flags;
6170+
6171+ raw_local_irq_save(flags);
6172+ __asm__ __volatile__(
6173+ " ld %0, %1 \n"
6174+#ifdef CONFIG_PAX_REFCOUNT
6175+ /* Exception on overflow. */
6176+ "1: dadd %0, %2 \n"
6177+#else
6178+ " daddu %0, %2 \n"
6179+#endif
6180+ " sd %0, %1 \n"
6181+#ifdef CONFIG_PAX_REFCOUNT
6182+ /* Note: Dest reg is not modified on overflow */
6183+ "2: \n"
6184+ _ASM_EXTABLE(1b, 2b)
6185+#endif
6186+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6187+ raw_local_irq_restore(flags);
6188+ }
6189+
6190+ smp_llsc_mb();
6191+
6192+ return result;
6193+}
6194+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6195 {
6196 long result;
6197
6198@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6199 return result;
6200 }
6201
6202-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6203+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6204+{
6205+ long result;
6206+ long temp;
6207+
6208+ smp_mb__before_llsc();
6209+
6210+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6211+ long temp;
6212+
6213+ __asm__ __volatile__(
6214+ " .set mips3 \n"
6215+ "1: lld %1, %2 # atomic64_sub_return \n"
6216+#ifdef CONFIG_PAX_REFCOUNT
6217+ "2: dsub %0, %1, %3 \n"
6218+#else
6219+ " dsubu %0, %1, %3 \n"
6220+#endif
6221+ " scd %0, %2 \n"
6222+ " beqzl %0, 1b \n"
6223+#ifdef CONFIG_PAX_REFCOUNT
6224+ " b 4f \n"
6225+ " .set noreorder \n"
6226+ "3: b 5f \n"
6227+ " move %0, %1 \n"
6228+ " .set reorder \n"
6229+ _ASM_EXTABLE(2b, 3b)
6230+#endif
6231+ "4: dsubu %0, %1, %3 \n"
6232+#ifdef CONFIG_PAX_REFCOUNT
6233+ "5: \n"
6234+#endif
6235+ " .set mips0 \n"
6236+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6237+ : "Ir" (i), "m" (v->counter)
6238+ : "memory");
6239+ } else if (kernel_uses_llsc) {
6240+ __asm__ __volatile__(
6241+ " .set mips3 \n"
6242+ "1: lld %1, %2 # atomic64_sub_return \n"
6243+#ifdef CONFIG_PAX_REFCOUNT
6244+ "2: dsub %0, %1, %3 \n"
6245+#else
6246+ " dsubu %0, %1, %3 \n"
6247+#endif
6248+ " scd %0, %2 \n"
6249+ " bnez %0, 4f \n"
6250+ " b 1b \n"
6251+#ifdef CONFIG_PAX_REFCOUNT
6252+ " .set noreorder \n"
6253+ "3: b 5f \n"
6254+ " move %0, %1 \n"
6255+ " .set reorder \n"
6256+ _ASM_EXTABLE(2b, 3b)
6257+#endif
6258+ "4: dsubu %0, %1, %3 \n"
6259+#ifdef CONFIG_PAX_REFCOUNT
6260+ "5: \n"
6261+#endif
6262+ " .set mips0 \n"
6263+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6264+ : "Ir" (i), "m" (v->counter)
6265+ : "memory");
6266+ } else {
6267+ unsigned long flags;
6268+
6269+ raw_local_irq_save(flags);
6270+ __asm__ __volatile__(
6271+ " ld %0, %1 \n"
6272+#ifdef CONFIG_PAX_REFCOUNT
6273+ /* Exception on overflow. */
6274+ "1: dsub %0, %2 \n"
6275+#else
6276+ " dsubu %0, %2 \n"
6277+#endif
6278+ " sd %0, %1 \n"
6279+#ifdef CONFIG_PAX_REFCOUNT
6280+ /* Note: Dest reg is not modified on overflow */
6281+ "2: \n"
6282+ _ASM_EXTABLE(1b, 2b)
6283+#endif
6284+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6285+ raw_local_irq_restore(flags);
6286+ }
6287+
6288+ smp_llsc_mb();
6289+
6290+ return result;
6291+}
6292+
6293+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6294 {
6295 long result;
6296
6297@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6298 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6299 * The function returns the old value of @v minus @i.
6300 */
6301-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6302+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6303 {
6304 long result;
6305
6306@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6307 return result;
6308 }
6309
6310-#define atomic64_cmpxchg(v, o, n) \
6311- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6312-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6313+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6314+{
6315+ return cmpxchg(&v->counter, old, new);
6316+}
6317+
6318+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6319+ long new)
6320+{
6321+ return cmpxchg(&(v->counter), old, new);
6322+}
6323+
6324+static inline long atomic64_xchg(atomic64_t *v, long new)
6325+{
6326+ return xchg(&v->counter, new);
6327+}
6328+
6329+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6330+{
6331+ return xchg(&(v->counter), new);
6332+}
6333
6334 /**
6335 * atomic64_add_unless - add unless the number is a given value
6336@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6337
6338 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6339 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6340+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6341
6342 /*
6343 * atomic64_sub_and_test - subtract value from variable and test result
6344@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6345 * other cases.
6346 */
6347 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6348+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6349
6350 /*
6351 * atomic64_dec_and_test - decrement by 1 and test
6352@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6353 * Atomically increments @v by 1.
6354 */
6355 #define atomic64_inc(v) atomic64_add(1, (v))
6356+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6357
6358 /*
6359 * atomic64_dec - decrement and test
6360@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6361 * Atomically decrements @v by 1.
6362 */
6363 #define atomic64_dec(v) atomic64_sub(1, (v))
6364+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6365
6366 /*
6367 * atomic64_add_negative - add and test if negative
6368diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6369index b4db69f..8f3b093 100644
6370--- a/arch/mips/include/asm/cache.h
6371+++ b/arch/mips/include/asm/cache.h
6372@@ -9,10 +9,11 @@
6373 #ifndef _ASM_CACHE_H
6374 #define _ASM_CACHE_H
6375
6376+#include <linux/const.h>
6377 #include <kmalloc.h>
6378
6379 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6380-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6381+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6382
6383 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6384 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6385diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6386index a66359e..d3d474a 100644
6387--- a/arch/mips/include/asm/elf.h
6388+++ b/arch/mips/include/asm/elf.h
6389@@ -373,13 +373,16 @@ extern const char *__elf_platform;
6390 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6391 #endif
6392
6393+#ifdef CONFIG_PAX_ASLR
6394+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6395+
6396+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6397+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6398+#endif
6399+
6400 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6401 struct linux_binprm;
6402 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6403 int uses_interp);
6404
6405-struct mm_struct;
6406-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6407-#define arch_randomize_brk arch_randomize_brk
6408-
6409 #endif /* _ASM_ELF_H */
6410diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6411index c1f6afa..38cc6e9 100644
6412--- a/arch/mips/include/asm/exec.h
6413+++ b/arch/mips/include/asm/exec.h
6414@@ -12,6 +12,6 @@
6415 #ifndef _ASM_EXEC_H
6416 #define _ASM_EXEC_H
6417
6418-extern unsigned long arch_align_stack(unsigned long sp);
6419+#define arch_align_stack(x) ((x) & ~0xfUL)
6420
6421 #endif /* _ASM_EXEC_H */
6422diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6423index d44622c..64990d2 100644
6424--- a/arch/mips/include/asm/local.h
6425+++ b/arch/mips/include/asm/local.h
6426@@ -12,15 +12,25 @@ typedef struct
6427 atomic_long_t a;
6428 } local_t;
6429
6430+typedef struct {
6431+ atomic_long_unchecked_t a;
6432+} local_unchecked_t;
6433+
6434 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6435
6436 #define local_read(l) atomic_long_read(&(l)->a)
6437+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6438 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6439+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6440
6441 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6442+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6443 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6444+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6445 #define local_inc(l) atomic_long_inc(&(l)->a)
6446+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6447 #define local_dec(l) atomic_long_dec(&(l)->a)
6448+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6449
6450 /*
6451 * Same as above, but return the result value
6452@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6453 return result;
6454 }
6455
6456+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6457+{
6458+ unsigned long result;
6459+
6460+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6461+ unsigned long temp;
6462+
6463+ __asm__ __volatile__(
6464+ " .set mips3 \n"
6465+ "1:" __LL "%1, %2 # local_add_return \n"
6466+ " addu %0, %1, %3 \n"
6467+ __SC "%0, %2 \n"
6468+ " beqzl %0, 1b \n"
6469+ " addu %0, %1, %3 \n"
6470+ " .set mips0 \n"
6471+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6472+ : "Ir" (i), "m" (l->a.counter)
6473+ : "memory");
6474+ } else if (kernel_uses_llsc) {
6475+ unsigned long temp;
6476+
6477+ __asm__ __volatile__(
6478+ " .set mips3 \n"
6479+ "1:" __LL "%1, %2 # local_add_return \n"
6480+ " addu %0, %1, %3 \n"
6481+ __SC "%0, %2 \n"
6482+ " beqz %0, 1b \n"
6483+ " addu %0, %1, %3 \n"
6484+ " .set mips0 \n"
6485+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6486+ : "Ir" (i), "m" (l->a.counter)
6487+ : "memory");
6488+ } else {
6489+ unsigned long flags;
6490+
6491+ local_irq_save(flags);
6492+ result = l->a.counter;
6493+ result += i;
6494+ l->a.counter = result;
6495+ local_irq_restore(flags);
6496+ }
6497+
6498+ return result;
6499+}
6500+
6501 static __inline__ long local_sub_return(long i, local_t * l)
6502 {
6503 unsigned long result;
6504@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6505
6506 #define local_cmpxchg(l, o, n) \
6507 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6508+#define local_cmpxchg_unchecked(l, o, n) \
6509+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6510 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6511
6512 /**
6513diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6514index f6be474..12ad554 100644
6515--- a/arch/mips/include/asm/page.h
6516+++ b/arch/mips/include/asm/page.h
6517@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6518 #ifdef CONFIG_CPU_MIPS32
6519 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6520 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6521- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6522+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6523 #else
6524 typedef struct { unsigned long long pte; } pte_t;
6525 #define pte_val(x) ((x).pte)
6526diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6527index b336037..5b874cc 100644
6528--- a/arch/mips/include/asm/pgalloc.h
6529+++ b/arch/mips/include/asm/pgalloc.h
6530@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6531 {
6532 set_pud(pud, __pud((unsigned long)pmd));
6533 }
6534+
6535+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6536+{
6537+ pud_populate(mm, pud, pmd);
6538+}
6539 #endif
6540
6541 /*
6542diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6543index 25da651..ae2a259 100644
6544--- a/arch/mips/include/asm/smtc_proc.h
6545+++ b/arch/mips/include/asm/smtc_proc.h
6546@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6547
6548 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6549
6550-extern atomic_t smtc_fpu_recoveries;
6551+extern atomic_unchecked_t smtc_fpu_recoveries;
6552
6553 #endif /* __ASM_SMTC_PROC_H */
6554diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6555index 4f58ef6..5e7081b 100644
6556--- a/arch/mips/include/asm/thread_info.h
6557+++ b/arch/mips/include/asm/thread_info.h
6558@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6559 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6560 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6561 #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
6562+/* li takes a 32bit immediate */
6563+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6564 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6565
6566 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6567@@ -132,13 +134,14 @@ static inline struct thread_info *current_thread_info(void)
6568 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6569 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6570 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6571+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6572
6573 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6574- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6575+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6576
6577 /* work to do in syscall_trace_leave() */
6578 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6579- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6580+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6581
6582 /* work to do on interrupt/exception return */
6583 #define _TIF_WORK_MASK \
6584@@ -146,7 +149,7 @@ static inline struct thread_info *current_thread_info(void)
6585 /* work to do on any return to u-space */
6586 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6587 _TIF_WORK_SYSCALL_EXIT | \
6588- _TIF_SYSCALL_TRACEPOINT)
6589+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6590
6591 /*
6592 * We stash processor id into a COP0 register to retrieve it fast
6593diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6594index 1188e00..41cf144 100644
6595--- a/arch/mips/kernel/binfmt_elfn32.c
6596+++ b/arch/mips/kernel/binfmt_elfn32.c
6597@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6598 #undef ELF_ET_DYN_BASE
6599 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6600
6601+#ifdef CONFIG_PAX_ASLR
6602+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6603+
6604+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6605+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6606+#endif
6607+
6608 #include <asm/processor.h>
6609 #include <linux/module.h>
6610 #include <linux/elfcore.h>
6611diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6612index 202e581..689ca79 100644
6613--- a/arch/mips/kernel/binfmt_elfo32.c
6614+++ b/arch/mips/kernel/binfmt_elfo32.c
6615@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6616 #undef ELF_ET_DYN_BASE
6617 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6618
6619+#ifdef CONFIG_PAX_ASLR
6620+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6621+
6622+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6623+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6624+#endif
6625+
6626 #include <asm/processor.h>
6627
6628 /*
6629diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6630index d1fea7a..45602ea 100644
6631--- a/arch/mips/kernel/irq.c
6632+++ b/arch/mips/kernel/irq.c
6633@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6634 printk("unexpected IRQ # %d\n", irq);
6635 }
6636
6637-atomic_t irq_err_count;
6638+atomic_unchecked_t irq_err_count;
6639
6640 int arch_show_interrupts(struct seq_file *p, int prec)
6641 {
6642- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6643+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6644 return 0;
6645 }
6646
6647 asmlinkage void spurious_interrupt(void)
6648 {
6649- atomic_inc(&irq_err_count);
6650+ atomic_inc_unchecked(&irq_err_count);
6651 }
6652
6653 void __init init_IRQ(void)
6654diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6655index ddc7610..8c58f17 100644
6656--- a/arch/mips/kernel/process.c
6657+++ b/arch/mips/kernel/process.c
6658@@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_struct *task)
6659 out:
6660 return pc;
6661 }
6662-
6663-/*
6664- * Don't forget that the stack pointer must be aligned on a 8 bytes
6665- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6666- */
6667-unsigned long arch_align_stack(unsigned long sp)
6668-{
6669- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6670- sp -= get_random_int() & ~PAGE_MASK;
6671-
6672- return sp & ALMASK;
6673-}
6674diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6675index b52e1d2..1a3ca09 100644
6676--- a/arch/mips/kernel/ptrace.c
6677+++ b/arch/mips/kernel/ptrace.c
6678@@ -652,6 +652,10 @@ long arch_ptrace(struct task_struct *child, long request,
6679 return ret;
6680 }
6681
6682+#ifdef CONFIG_GRKERNSEC_SETXID
6683+extern void gr_delayed_cred_worker(void);
6684+#endif
6685+
6686 /*
6687 * Notification of system call entry/exit
6688 * - triggered by current->work.syscall_trace
6689@@ -668,6 +672,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6690 tracehook_report_syscall_entry(regs))
6691 ret = -1;
6692
6693+#ifdef CONFIG_GRKERNSEC_SETXID
6694+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6695+ gr_delayed_cred_worker();
6696+#endif
6697+
6698 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6699 trace_sys_enter(regs, regs->regs[2]);
6700
6701diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6702index c10aa84..9ec2e60 100644
6703--- a/arch/mips/kernel/smtc-proc.c
6704+++ b/arch/mips/kernel/smtc-proc.c
6705@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6706
6707 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6708
6709-atomic_t smtc_fpu_recoveries;
6710+atomic_unchecked_t smtc_fpu_recoveries;
6711
6712 static int smtc_proc_show(struct seq_file *m, void *v)
6713 {
6714@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6715 for(i = 0; i < NR_CPUS; i++)
6716 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6717 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6718- atomic_read(&smtc_fpu_recoveries));
6719+ atomic_read_unchecked(&smtc_fpu_recoveries));
6720 return 0;
6721 }
6722
6723@@ -73,7 +73,7 @@ void init_smtc_stats(void)
6724 smtc_cpu_stats[i].selfipis = 0;
6725 }
6726
6727- atomic_set(&smtc_fpu_recoveries, 0);
6728+ atomic_set_unchecked(&smtc_fpu_recoveries, 0);
6729
6730 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
6731 }
6732diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
6733index dfc1b91..11a2c07 100644
6734--- a/arch/mips/kernel/smtc.c
6735+++ b/arch/mips/kernel/smtc.c
6736@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
6737 }
6738 smtc_ipi_qdump();
6739 printk("%d Recoveries of \"stolen\" FPU\n",
6740- atomic_read(&smtc_fpu_recoveries));
6741+ atomic_read_unchecked(&smtc_fpu_recoveries));
6742 }
6743
6744
6745diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6746index 84536bf..79caa4d 100644
6747--- a/arch/mips/kernel/sync-r4k.c
6748+++ b/arch/mips/kernel/sync-r4k.c
6749@@ -21,8 +21,8 @@
6750 #include <asm/mipsregs.h>
6751
6752 static atomic_t count_start_flag = ATOMIC_INIT(0);
6753-static atomic_t count_count_start = ATOMIC_INIT(0);
6754-static atomic_t count_count_stop = ATOMIC_INIT(0);
6755+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6756+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6757 static atomic_t count_reference = ATOMIC_INIT(0);
6758
6759 #define COUNTON 100
6760@@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
6761
6762 for (i = 0; i < NR_LOOPS; i++) {
6763 /* slaves loop on '!= 2' */
6764- while (atomic_read(&count_count_start) != 1)
6765+ while (atomic_read_unchecked(&count_count_start) != 1)
6766 mb();
6767- atomic_set(&count_count_stop, 0);
6768+ atomic_set_unchecked(&count_count_stop, 0);
6769 smp_wmb();
6770
6771 /* this lets the slaves write their count register */
6772- atomic_inc(&count_count_start);
6773+ atomic_inc_unchecked(&count_count_start);
6774
6775 /*
6776 * Everyone initialises count in the last loop:
6777@@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
6778 /*
6779 * Wait for all slaves to leave the synchronization point:
6780 */
6781- while (atomic_read(&count_count_stop) != 1)
6782+ while (atomic_read_unchecked(&count_count_stop) != 1)
6783 mb();
6784- atomic_set(&count_count_start, 0);
6785+ atomic_set_unchecked(&count_count_start, 0);
6786 smp_wmb();
6787- atomic_inc(&count_count_stop);
6788+ atomic_inc_unchecked(&count_count_stop);
6789 }
6790 /* Arrange for an interrupt in a short while */
6791 write_c0_compare(read_c0_count() + COUNTON);
6792@@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
6793 initcount = atomic_read(&count_reference);
6794
6795 for (i = 0; i < NR_LOOPS; i++) {
6796- atomic_inc(&count_count_start);
6797- while (atomic_read(&count_count_start) != 2)
6798+ atomic_inc_unchecked(&count_count_start);
6799+ while (atomic_read_unchecked(&count_count_start) != 2)
6800 mb();
6801
6802 /*
6803@@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
6804 if (i == NR_LOOPS-1)
6805 write_c0_count(initcount);
6806
6807- atomic_inc(&count_count_stop);
6808- while (atomic_read(&count_count_stop) != 2)
6809+ atomic_inc_unchecked(&count_count_stop);
6810+ while (atomic_read_unchecked(&count_count_stop) != 2)
6811 mb();
6812 }
6813 /* Arrange for an interrupt in a short while */
6814diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6815index f9c8746..78b64e3 100644
6816--- a/arch/mips/kernel/traps.c
6817+++ b/arch/mips/kernel/traps.c
6818@@ -690,7 +690,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6819 siginfo_t info;
6820
6821 prev_state = exception_enter();
6822- die_if_kernel("Integer overflow", regs);
6823+ if (unlikely(!user_mode(regs))) {
6824+
6825+#ifdef CONFIG_PAX_REFCOUNT
6826+ if (fixup_exception(regs)) {
6827+ pax_report_refcount_overflow(regs);
6828+ exception_exit(prev_state);
6829+ return;
6830+ }
6831+#endif
6832+
6833+ die("Integer overflow", regs);
6834+ }
6835
6836 info.si_code = FPE_INTOVF;
6837 info.si_signo = SIGFPE;
6838diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6839index becc42b..9e43d4b 100644
6840--- a/arch/mips/mm/fault.c
6841+++ b/arch/mips/mm/fault.c
6842@@ -28,6 +28,23 @@
6843 #include <asm/highmem.h> /* For VMALLOC_END */
6844 #include <linux/kdebug.h>
6845
6846+#ifdef CONFIG_PAX_PAGEEXEC
6847+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6848+{
6849+ unsigned long i;
6850+
6851+ printk(KERN_ERR "PAX: bytes at PC: ");
6852+ for (i = 0; i < 5; i++) {
6853+ unsigned int c;
6854+ if (get_user(c, (unsigned int *)pc+i))
6855+ printk(KERN_CONT "???????? ");
6856+ else
6857+ printk(KERN_CONT "%08x ", c);
6858+ }
6859+ printk("\n");
6860+}
6861+#endif
6862+
6863 /*
6864 * This routine handles page faults. It determines the address,
6865 * and the problem, and then passes it off to one of the appropriate
6866@@ -199,6 +216,14 @@ bad_area:
6867 bad_area_nosemaphore:
6868 /* User mode accesses just cause a SIGSEGV */
6869 if (user_mode(regs)) {
6870+
6871+#ifdef CONFIG_PAX_PAGEEXEC
6872+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6873+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6874+ do_group_exit(SIGKILL);
6875+ }
6876+#endif
6877+
6878 tsk->thread.cp0_badvaddr = address;
6879 tsk->thread.error_code = write;
6880 #if 0
6881diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6882index f1baadd..8537544 100644
6883--- a/arch/mips/mm/mmap.c
6884+++ b/arch/mips/mm/mmap.c
6885@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6886 struct vm_area_struct *vma;
6887 unsigned long addr = addr0;
6888 int do_color_align;
6889+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6890 struct vm_unmapped_area_info info;
6891
6892 if (unlikely(len > TASK_SIZE))
6893@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6894 do_color_align = 1;
6895
6896 /* requesting a specific address */
6897+
6898+#ifdef CONFIG_PAX_RANDMMAP
6899+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6900+#endif
6901+
6902 if (addr) {
6903 if (do_color_align)
6904 addr = COLOUR_ALIGN(addr, pgoff);
6905@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6906 addr = PAGE_ALIGN(addr);
6907
6908 vma = find_vma(mm, addr);
6909- if (TASK_SIZE - len >= addr &&
6910- (!vma || addr + len <= vma->vm_start))
6911+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6912 return addr;
6913 }
6914
6915 info.length = len;
6916 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6917 info.align_offset = pgoff << PAGE_SHIFT;
6918+ info.threadstack_offset = offset;
6919
6920 if (dir == DOWN) {
6921 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6922@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6923 {
6924 unsigned long random_factor = 0UL;
6925
6926+#ifdef CONFIG_PAX_RANDMMAP
6927+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6928+#endif
6929+
6930 if (current->flags & PF_RANDOMIZE) {
6931 random_factor = get_random_int();
6932 random_factor = random_factor << PAGE_SHIFT;
6933@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6934
6935 if (mmap_is_legacy()) {
6936 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6937+
6938+#ifdef CONFIG_PAX_RANDMMAP
6939+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6940+ mm->mmap_base += mm->delta_mmap;
6941+#endif
6942+
6943 mm->get_unmapped_area = arch_get_unmapped_area;
6944 } else {
6945 mm->mmap_base = mmap_base(random_factor);
6946+
6947+#ifdef CONFIG_PAX_RANDMMAP
6948+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6949+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6950+#endif
6951+
6952 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6953 }
6954 }
6955
6956-static inline unsigned long brk_rnd(void)
6957-{
6958- unsigned long rnd = get_random_int();
6959-
6960- rnd = rnd << PAGE_SHIFT;
6961- /* 8MB for 32bit, 256MB for 64bit */
6962- if (TASK_IS_32BIT_ADDR)
6963- rnd = rnd & 0x7ffffful;
6964- else
6965- rnd = rnd & 0xffffffful;
6966-
6967- return rnd;
6968-}
6969-
6970-unsigned long arch_randomize_brk(struct mm_struct *mm)
6971-{
6972- unsigned long base = mm->brk;
6973- unsigned long ret;
6974-
6975- ret = PAGE_ALIGN(base + brk_rnd());
6976-
6977- if (ret < mm->brk)
6978- return mm->brk;
6979-
6980- return ret;
6981-}
6982-
6983 int __virt_addr_valid(const volatile void *kaddr)
6984 {
6985 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6986diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
6987index a2358b4..7cead4f 100644
6988--- a/arch/mips/sgi-ip27/ip27-nmi.c
6989+++ b/arch/mips/sgi-ip27/ip27-nmi.c
6990@@ -187,9 +187,9 @@ void
6991 cont_nmi_dump(void)
6992 {
6993 #ifndef REAL_NMI_SIGNAL
6994- static atomic_t nmied_cpus = ATOMIC_INIT(0);
6995+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
6996
6997- atomic_inc(&nmied_cpus);
6998+ atomic_inc_unchecked(&nmied_cpus);
6999 #endif
7000 /*
7001 * Only allow 1 cpu to proceed
7002@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7003 udelay(10000);
7004 }
7005 #else
7006- while (atomic_read(&nmied_cpus) != num_online_cpus());
7007+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7008 #endif
7009
7010 /*
7011diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7012index 967d144..db12197 100644
7013--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7014+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7015@@ -11,12 +11,14 @@
7016 #ifndef _ASM_PROC_CACHE_H
7017 #define _ASM_PROC_CACHE_H
7018
7019+#include <linux/const.h>
7020+
7021 /* L1 cache */
7022
7023 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7024 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7025-#define L1_CACHE_BYTES 16 /* bytes per entry */
7026 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7027+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7028 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7029
7030 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7031diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7032index bcb5df2..84fabd2 100644
7033--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7034+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7035@@ -16,13 +16,15 @@
7036 #ifndef _ASM_PROC_CACHE_H
7037 #define _ASM_PROC_CACHE_H
7038
7039+#include <linux/const.h>
7040+
7041 /*
7042 * L1 cache
7043 */
7044 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7045 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7046-#define L1_CACHE_BYTES 32 /* bytes per entry */
7047 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7048+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7049 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7050
7051 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7052diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7053index 4ce7a01..449202a 100644
7054--- a/arch/openrisc/include/asm/cache.h
7055+++ b/arch/openrisc/include/asm/cache.h
7056@@ -19,11 +19,13 @@
7057 #ifndef __ASM_OPENRISC_CACHE_H
7058 #define __ASM_OPENRISC_CACHE_H
7059
7060+#include <linux/const.h>
7061+
7062 /* FIXME: How can we replace these with values from the CPU...
7063 * they shouldn't be hard-coded!
7064 */
7065
7066-#define L1_CACHE_BYTES 16
7067 #define L1_CACHE_SHIFT 4
7068+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7069
7070 #endif /* __ASM_OPENRISC_CACHE_H */
7071diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7072index 472886c..00e7df9 100644
7073--- a/arch/parisc/include/asm/atomic.h
7074+++ b/arch/parisc/include/asm/atomic.h
7075@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7076 return dec;
7077 }
7078
7079+#define atomic64_read_unchecked(v) atomic64_read(v)
7080+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7081+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7082+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7083+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7084+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7085+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7086+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7087+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7088+
7089 #endif /* !CONFIG_64BIT */
7090
7091
7092diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7093index 47f11c7..3420df2 100644
7094--- a/arch/parisc/include/asm/cache.h
7095+++ b/arch/parisc/include/asm/cache.h
7096@@ -5,6 +5,7 @@
7097 #ifndef __ARCH_PARISC_CACHE_H
7098 #define __ARCH_PARISC_CACHE_H
7099
7100+#include <linux/const.h>
7101
7102 /*
7103 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7104@@ -15,13 +16,13 @@
7105 * just ruin performance.
7106 */
7107 #ifdef CONFIG_PA20
7108-#define L1_CACHE_BYTES 64
7109 #define L1_CACHE_SHIFT 6
7110 #else
7111-#define L1_CACHE_BYTES 32
7112 #define L1_CACHE_SHIFT 5
7113 #endif
7114
7115+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7116+
7117 #ifndef __ASSEMBLY__
7118
7119 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7120diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7121index ad2b503..bdf1651 100644
7122--- a/arch/parisc/include/asm/elf.h
7123+++ b/arch/parisc/include/asm/elf.h
7124@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7125
7126 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7127
7128+#ifdef CONFIG_PAX_ASLR
7129+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7130+
7131+#define PAX_DELTA_MMAP_LEN 16
7132+#define PAX_DELTA_STACK_LEN 16
7133+#endif
7134+
7135 /* This yields a mask that user programs can use to figure out what
7136 instruction set this CPU supports. This could be done in user space,
7137 but it's not easy, and we've already done it here. */
7138diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7139index f213f5b..0af3e8e 100644
7140--- a/arch/parisc/include/asm/pgalloc.h
7141+++ b/arch/parisc/include/asm/pgalloc.h
7142@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7143 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7144 }
7145
7146+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7147+{
7148+ pgd_populate(mm, pgd, pmd);
7149+}
7150+
7151 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7152 {
7153 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7154@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7155 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7156 #define pmd_free(mm, x) do { } while (0)
7157 #define pgd_populate(mm, pmd, pte) BUG()
7158+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7159
7160 #endif
7161
7162diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7163index 34899b5..02dd060 100644
7164--- a/arch/parisc/include/asm/pgtable.h
7165+++ b/arch/parisc/include/asm/pgtable.h
7166@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7167 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7168 #define PAGE_COPY PAGE_EXECREAD
7169 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7170+
7171+#ifdef CONFIG_PAX_PAGEEXEC
7172+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7173+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7174+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7175+#else
7176+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7177+# define PAGE_COPY_NOEXEC PAGE_COPY
7178+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7179+#endif
7180+
7181 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7182 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7183 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7184diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7185index 4006964..fcb3cc2 100644
7186--- a/arch/parisc/include/asm/uaccess.h
7187+++ b/arch/parisc/include/asm/uaccess.h
7188@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7189 const void __user *from,
7190 unsigned long n)
7191 {
7192- int sz = __compiletime_object_size(to);
7193+ size_t sz = __compiletime_object_size(to);
7194 int ret = -EFAULT;
7195
7196- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7197+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7198 ret = __copy_from_user(to, from, n);
7199 else
7200 copy_from_user_overflow();
7201diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7202index 50dfafc..b9fc230 100644
7203--- a/arch/parisc/kernel/module.c
7204+++ b/arch/parisc/kernel/module.c
7205@@ -98,16 +98,38 @@
7206
7207 /* three functions to determine where in the module core
7208 * or init pieces the location is */
7209+static inline int in_init_rx(struct module *me, void *loc)
7210+{
7211+ return (loc >= me->module_init_rx &&
7212+ loc < (me->module_init_rx + me->init_size_rx));
7213+}
7214+
7215+static inline int in_init_rw(struct module *me, void *loc)
7216+{
7217+ return (loc >= me->module_init_rw &&
7218+ loc < (me->module_init_rw + me->init_size_rw));
7219+}
7220+
7221 static inline int in_init(struct module *me, void *loc)
7222 {
7223- return (loc >= me->module_init &&
7224- loc <= (me->module_init + me->init_size));
7225+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7226+}
7227+
7228+static inline int in_core_rx(struct module *me, void *loc)
7229+{
7230+ return (loc >= me->module_core_rx &&
7231+ loc < (me->module_core_rx + me->core_size_rx));
7232+}
7233+
7234+static inline int in_core_rw(struct module *me, void *loc)
7235+{
7236+ return (loc >= me->module_core_rw &&
7237+ loc < (me->module_core_rw + me->core_size_rw));
7238 }
7239
7240 static inline int in_core(struct module *me, void *loc)
7241 {
7242- return (loc >= me->module_core &&
7243- loc <= (me->module_core + me->core_size));
7244+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7245 }
7246
7247 static inline int in_local(struct module *me, void *loc)
7248@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7249 }
7250
7251 /* align things a bit */
7252- me->core_size = ALIGN(me->core_size, 16);
7253- me->arch.got_offset = me->core_size;
7254- me->core_size += gots * sizeof(struct got_entry);
7255+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7256+ me->arch.got_offset = me->core_size_rw;
7257+ me->core_size_rw += gots * sizeof(struct got_entry);
7258
7259- me->core_size = ALIGN(me->core_size, 16);
7260- me->arch.fdesc_offset = me->core_size;
7261- me->core_size += fdescs * sizeof(Elf_Fdesc);
7262+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7263+ me->arch.fdesc_offset = me->core_size_rw;
7264+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7265
7266 me->arch.got_max = gots;
7267 me->arch.fdesc_max = fdescs;
7268@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7269
7270 BUG_ON(value == 0);
7271
7272- got = me->module_core + me->arch.got_offset;
7273+ got = me->module_core_rw + me->arch.got_offset;
7274 for (i = 0; got[i].addr; i++)
7275 if (got[i].addr == value)
7276 goto out;
7277@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7278 #ifdef CONFIG_64BIT
7279 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7280 {
7281- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7282+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7283
7284 if (!value) {
7285 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7286@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7287
7288 /* Create new one */
7289 fdesc->addr = value;
7290- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7291+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7292 return (Elf_Addr)fdesc;
7293 }
7294 #endif /* CONFIG_64BIT */
7295@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7296
7297 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7298 end = table + sechdrs[me->arch.unwind_section].sh_size;
7299- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7300+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7301
7302 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7303 me->arch.unwind_section, table, end, gp);
7304diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7305index 0d3a9d4..20a99b0 100644
7306--- a/arch/parisc/kernel/sys_parisc.c
7307+++ b/arch/parisc/kernel/sys_parisc.c
7308@@ -33,9 +33,11 @@
7309 #include <linux/utsname.h>
7310 #include <linux/personality.h>
7311
7312-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7313+static unsigned long get_unshared_area(unsigned long addr, unsigned long len,
7314+ unsigned long flags)
7315 {
7316 struct vm_unmapped_area_info info;
7317+ unsigned long offset = gr_rand_threadstack_offset(current->mm, NULL, flags);
7318
7319 info.flags = 0;
7320 info.length = len;
7321@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7322 info.high_limit = TASK_SIZE;
7323 info.align_mask = 0;
7324 info.align_offset = 0;
7325+ info.threadstack_offset = offset;
7326 return vm_unmapped_area(&info);
7327 }
7328
7329@@ -69,9 +72,10 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
7330 }
7331
7332 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7333- unsigned long len, unsigned long pgoff)
7334+ unsigned long len, unsigned long pgoff, unsigned long flags)
7335 {
7336 struct vm_unmapped_area_info info;
7337+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7338
7339 info.flags = 0;
7340 info.length = len;
7341@@ -79,6 +83,7 @@ static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7342 info.high_limit = TASK_SIZE;
7343 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7344 info.align_offset = shared_align_offset(filp, pgoff);
7345+ info.threadstack_offset = offset;
7346 return vm_unmapped_area(&info);
7347 }
7348
7349@@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7350 return -EINVAL;
7351 return addr;
7352 }
7353- if (!addr)
7354+ if (!addr) {
7355 addr = TASK_UNMAPPED_BASE;
7356
7357+#ifdef CONFIG_PAX_RANDMMAP
7358+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7359+ addr += current->mm->delta_mmap;
7360+#endif
7361+
7362+ }
7363+
7364 if (filp || (flags & MAP_SHARED))
7365- addr = get_shared_area(filp, addr, len, pgoff);
7366+ addr = get_shared_area(filp, addr, len, pgoff, flags);
7367 else
7368- addr = get_unshared_area(addr, len);
7369+ addr = get_unshared_area(addr, len, flags);
7370
7371 return addr;
7372 }
7373diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7374index 1cd1d0c..44ec918 100644
7375--- a/arch/parisc/kernel/traps.c
7376+++ b/arch/parisc/kernel/traps.c
7377@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7378
7379 down_read(&current->mm->mmap_sem);
7380 vma = find_vma(current->mm,regs->iaoq[0]);
7381- if (vma && (regs->iaoq[0] >= vma->vm_start)
7382- && (vma->vm_flags & VM_EXEC)) {
7383-
7384+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7385 fault_address = regs->iaoq[0];
7386 fault_space = regs->iasq[0];
7387
7388diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7389index 9d08c71..e2b4d20 100644
7390--- a/arch/parisc/mm/fault.c
7391+++ b/arch/parisc/mm/fault.c
7392@@ -15,6 +15,7 @@
7393 #include <linux/sched.h>
7394 #include <linux/interrupt.h>
7395 #include <linux/module.h>
7396+#include <linux/unistd.h>
7397
7398 #include <asm/uaccess.h>
7399 #include <asm/traps.h>
7400@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7401 static unsigned long
7402 parisc_acctyp(unsigned long code, unsigned int inst)
7403 {
7404- if (code == 6 || code == 16)
7405+ if (code == 6 || code == 7 || code == 16)
7406 return VM_EXEC;
7407
7408 switch (inst & 0xf0000000) {
7409@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7410 }
7411 #endif
7412
7413+#ifdef CONFIG_PAX_PAGEEXEC
7414+/*
7415+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7416+ *
7417+ * returns 1 when task should be killed
7418+ * 2 when rt_sigreturn trampoline was detected
7419+ * 3 when unpatched PLT trampoline was detected
7420+ */
7421+static int pax_handle_fetch_fault(struct pt_regs *regs)
7422+{
7423+
7424+#ifdef CONFIG_PAX_EMUPLT
7425+ int err;
7426+
7427+ do { /* PaX: unpatched PLT emulation */
7428+ unsigned int bl, depwi;
7429+
7430+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7431+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7432+
7433+ if (err)
7434+ break;
7435+
7436+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7437+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7438+
7439+ err = get_user(ldw, (unsigned int *)addr);
7440+ err |= get_user(bv, (unsigned int *)(addr+4));
7441+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7442+
7443+ if (err)
7444+ break;
7445+
7446+ if (ldw == 0x0E801096U &&
7447+ bv == 0xEAC0C000U &&
7448+ ldw2 == 0x0E881095U)
7449+ {
7450+ unsigned int resolver, map;
7451+
7452+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7453+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7454+ if (err)
7455+ break;
7456+
7457+ regs->gr[20] = instruction_pointer(regs)+8;
7458+ regs->gr[21] = map;
7459+ regs->gr[22] = resolver;
7460+ regs->iaoq[0] = resolver | 3UL;
7461+ regs->iaoq[1] = regs->iaoq[0] + 4;
7462+ return 3;
7463+ }
7464+ }
7465+ } while (0);
7466+#endif
7467+
7468+#ifdef CONFIG_PAX_EMUTRAMP
7469+
7470+#ifndef CONFIG_PAX_EMUSIGRT
7471+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7472+ return 1;
7473+#endif
7474+
7475+ do { /* PaX: rt_sigreturn emulation */
7476+ unsigned int ldi1, ldi2, bel, nop;
7477+
7478+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7479+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7480+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7481+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7482+
7483+ if (err)
7484+ break;
7485+
7486+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7487+ ldi2 == 0x3414015AU &&
7488+ bel == 0xE4008200U &&
7489+ nop == 0x08000240U)
7490+ {
7491+ regs->gr[25] = (ldi1 & 2) >> 1;
7492+ regs->gr[20] = __NR_rt_sigreturn;
7493+ regs->gr[31] = regs->iaoq[1] + 16;
7494+ regs->sr[0] = regs->iasq[1];
7495+ regs->iaoq[0] = 0x100UL;
7496+ regs->iaoq[1] = regs->iaoq[0] + 4;
7497+ regs->iasq[0] = regs->sr[2];
7498+ regs->iasq[1] = regs->sr[2];
7499+ return 2;
7500+ }
7501+ } while (0);
7502+#endif
7503+
7504+ return 1;
7505+}
7506+
7507+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7508+{
7509+ unsigned long i;
7510+
7511+ printk(KERN_ERR "PAX: bytes at PC: ");
7512+ for (i = 0; i < 5; i++) {
7513+ unsigned int c;
7514+ if (get_user(c, (unsigned int *)pc+i))
7515+ printk(KERN_CONT "???????? ");
7516+ else
7517+ printk(KERN_CONT "%08x ", c);
7518+ }
7519+ printk("\n");
7520+}
7521+#endif
7522+
7523 int fixup_exception(struct pt_regs *regs)
7524 {
7525 const struct exception_table_entry *fix;
7526@@ -210,8 +321,33 @@ retry:
7527
7528 good_area:
7529
7530- if ((vma->vm_flags & acc_type) != acc_type)
7531+ if ((vma->vm_flags & acc_type) != acc_type) {
7532+
7533+#ifdef CONFIG_PAX_PAGEEXEC
7534+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7535+ (address & ~3UL) == instruction_pointer(regs))
7536+ {
7537+ up_read(&mm->mmap_sem);
7538+ switch (pax_handle_fetch_fault(regs)) {
7539+
7540+#ifdef CONFIG_PAX_EMUPLT
7541+ case 3:
7542+ return;
7543+#endif
7544+
7545+#ifdef CONFIG_PAX_EMUTRAMP
7546+ case 2:
7547+ return;
7548+#endif
7549+
7550+ }
7551+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7552+ do_group_exit(SIGKILL);
7553+ }
7554+#endif
7555+
7556 goto bad_area;
7557+ }
7558
7559 /*
7560 * If for any reason at all we couldn't handle the fault, make
7561diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7562index b44b52c..4cd253c 100644
7563--- a/arch/powerpc/Kconfig
7564+++ b/arch/powerpc/Kconfig
7565@@ -382,6 +382,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
7566 config KEXEC
7567 bool "kexec system call"
7568 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7569+ depends on !GRKERNSEC_KMEM
7570 help
7571 kexec is a system call that implements the ability to shutdown your
7572 current kernel, and to start another kernel. It is like a reboot
7573diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7574index e3b1d41..8e81edf 100644
7575--- a/arch/powerpc/include/asm/atomic.h
7576+++ b/arch/powerpc/include/asm/atomic.h
7577@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7578 return t1;
7579 }
7580
7581+#define atomic64_read_unchecked(v) atomic64_read(v)
7582+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7583+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7584+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7585+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7586+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7587+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7588+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7589+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7590+
7591 #endif /* __powerpc64__ */
7592
7593 #endif /* __KERNEL__ */
7594diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7595index 9e495c9..b6878e5 100644
7596--- a/arch/powerpc/include/asm/cache.h
7597+++ b/arch/powerpc/include/asm/cache.h
7598@@ -3,6 +3,7 @@
7599
7600 #ifdef __KERNEL__
7601
7602+#include <linux/const.h>
7603
7604 /* bytes per L1 cache line */
7605 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7606@@ -22,7 +23,7 @@
7607 #define L1_CACHE_SHIFT 7
7608 #endif
7609
7610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7612
7613 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7614
7615diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7616index 935b5e7..7001d2d 100644
7617--- a/arch/powerpc/include/asm/elf.h
7618+++ b/arch/powerpc/include/asm/elf.h
7619@@ -28,8 +28,19 @@
7620 the loader. We need to make sure that it is out of the way of the program
7621 that it will "exec", and that there is sufficient room for the brk. */
7622
7623-extern unsigned long randomize_et_dyn(unsigned long base);
7624-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7625+#define ELF_ET_DYN_BASE (0x20000000)
7626+
7627+#ifdef CONFIG_PAX_ASLR
7628+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7629+
7630+#ifdef __powerpc64__
7631+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7632+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7633+#else
7634+#define PAX_DELTA_MMAP_LEN 15
7635+#define PAX_DELTA_STACK_LEN 15
7636+#endif
7637+#endif
7638
7639 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
7640
7641@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7642 (0x7ff >> (PAGE_SHIFT - 12)) : \
7643 (0x3ffff >> (PAGE_SHIFT - 12)))
7644
7645-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7646-#define arch_randomize_brk arch_randomize_brk
7647-
7648-
7649 #ifdef CONFIG_SPU_BASE
7650 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7651 #define NT_SPU 1
7652diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7653index 8196e9c..d83a9f3 100644
7654--- a/arch/powerpc/include/asm/exec.h
7655+++ b/arch/powerpc/include/asm/exec.h
7656@@ -4,6 +4,6 @@
7657 #ifndef _ASM_POWERPC_EXEC_H
7658 #define _ASM_POWERPC_EXEC_H
7659
7660-extern unsigned long arch_align_stack(unsigned long sp);
7661+#define arch_align_stack(x) ((x) & ~0xfUL)
7662
7663 #endif /* _ASM_POWERPC_EXEC_H */
7664diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7665index 5acabbd..7ea14fa 100644
7666--- a/arch/powerpc/include/asm/kmap_types.h
7667+++ b/arch/powerpc/include/asm/kmap_types.h
7668@@ -10,7 +10,7 @@
7669 * 2 of the License, or (at your option) any later version.
7670 */
7671
7672-#define KM_TYPE_NR 16
7673+#define KM_TYPE_NR 17
7674
7675 #endif /* __KERNEL__ */
7676 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7677diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7678index 8565c25..2865190 100644
7679--- a/arch/powerpc/include/asm/mman.h
7680+++ b/arch/powerpc/include/asm/mman.h
7681@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7682 }
7683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7684
7685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7687 {
7688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7689 }
7690diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7691index 32e4e21..62afb12 100644
7692--- a/arch/powerpc/include/asm/page.h
7693+++ b/arch/powerpc/include/asm/page.h
7694@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
7695 * and needs to be executable. This means the whole heap ends
7696 * up being executable.
7697 */
7698-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7699- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7700+#define VM_DATA_DEFAULT_FLAGS32 \
7701+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7702+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7703
7704 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7705 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7706@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
7707 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7708 #endif
7709
7710+#define ktla_ktva(addr) (addr)
7711+#define ktva_ktla(addr) (addr)
7712+
7713 #ifndef CONFIG_PPC_BOOK3S_64
7714 /*
7715 * Use the top bit of the higher-level page table entries to indicate whether
7716diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7717index 88693ce..ac6f9ab 100644
7718--- a/arch/powerpc/include/asm/page_64.h
7719+++ b/arch/powerpc/include/asm/page_64.h
7720@@ -153,15 +153,18 @@ do { \
7721 * stack by default, so in the absence of a PT_GNU_STACK program header
7722 * we turn execute permission off.
7723 */
7724-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7725- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7726+#define VM_STACK_DEFAULT_FLAGS32 \
7727+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7728+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7729
7730 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7731 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7732
7733+#ifndef CONFIG_PAX_PAGEEXEC
7734 #define VM_STACK_DEFAULT_FLAGS \
7735 (is_32bit_task() ? \
7736 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7737+#endif
7738
7739 #include <asm-generic/getorder.h>
7740
7741diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7742index 4b0be20..c15a27d 100644
7743--- a/arch/powerpc/include/asm/pgalloc-64.h
7744+++ b/arch/powerpc/include/asm/pgalloc-64.h
7745@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7746 #ifndef CONFIG_PPC_64K_PAGES
7747
7748 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7749+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7750
7751 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7752 {
7753@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7754 pud_set(pud, (unsigned long)pmd);
7755 }
7756
7757+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7758+{
7759+ pud_populate(mm, pud, pmd);
7760+}
7761+
7762 #define pmd_populate(mm, pmd, pte_page) \
7763 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7764 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7765@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
7766 #endif
7767
7768 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7769+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7770
7771 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7772 pte_t *pte)
7773diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7774index 7d6eacf..14c0240 100644
7775--- a/arch/powerpc/include/asm/pgtable.h
7776+++ b/arch/powerpc/include/asm/pgtable.h
7777@@ -2,6 +2,7 @@
7778 #define _ASM_POWERPC_PGTABLE_H
7779 #ifdef __KERNEL__
7780
7781+#include <linux/const.h>
7782 #ifndef __ASSEMBLY__
7783 #include <asm/processor.h> /* For TASK_SIZE */
7784 #include <asm/mmu.h>
7785diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7786index 4aad413..85d86bf 100644
7787--- a/arch/powerpc/include/asm/pte-hash32.h
7788+++ b/arch/powerpc/include/asm/pte-hash32.h
7789@@ -21,6 +21,7 @@
7790 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7791 #define _PAGE_USER 0x004 /* usermode access allowed */
7792 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7793+#define _PAGE_EXEC _PAGE_GUARDED
7794 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7795 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7796 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7797diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7798index fa8388e..f985549 100644
7799--- a/arch/powerpc/include/asm/reg.h
7800+++ b/arch/powerpc/include/asm/reg.h
7801@@ -239,6 +239,7 @@
7802 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7803 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7804 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7805+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7806 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7807 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7808 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7809diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7810index 084e080..9415a3d 100644
7811--- a/arch/powerpc/include/asm/smp.h
7812+++ b/arch/powerpc/include/asm/smp.h
7813@@ -51,7 +51,7 @@ struct smp_ops_t {
7814 int (*cpu_disable)(void);
7815 void (*cpu_die)(unsigned int nr);
7816 int (*cpu_bootable)(unsigned int nr);
7817-};
7818+} __no_const;
7819
7820 extern void smp_send_debugger_break(void);
7821 extern void start_secondary_resume(void);
7822diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7823index 9854c56..7517190 100644
7824--- a/arch/powerpc/include/asm/thread_info.h
7825+++ b/arch/powerpc/include/asm/thread_info.h
7826@@ -91,7 +91,6 @@ static inline struct thread_info *current_thread_info(void)
7827 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7828 TIF_NEED_RESCHED */
7829 #define TIF_32BIT 4 /* 32 bit binary */
7830-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7831 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7832 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7833 #define TIF_SINGLESTEP 8 /* singlestepping active */
7834@@ -108,6 +107,9 @@ static inline struct thread_info *current_thread_info(void)
7835 #if defined(CONFIG_PPC64)
7836 #define TIF_ELF2ABI 18 /* function descriptors must die! */
7837 #endif
7838+#define TIF_PERFMON_WORK 19 /* work for pfm_handle_work() */
7839+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7840+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7841
7842 /* as above, but as bit values */
7843 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7844@@ -127,9 +129,10 @@ static inline struct thread_info *current_thread_info(void)
7845 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7846 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7847 #define _TIF_NOHZ (1<<TIF_NOHZ)
7848+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7849 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7850 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7851- _TIF_NOHZ)
7852+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
7853
7854 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7855 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7856diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7857index 9485b43..4718d50 100644
7858--- a/arch/powerpc/include/asm/uaccess.h
7859+++ b/arch/powerpc/include/asm/uaccess.h
7860@@ -318,52 +318,6 @@ do { \
7861 extern unsigned long __copy_tofrom_user(void __user *to,
7862 const void __user *from, unsigned long size);
7863
7864-#ifndef __powerpc64__
7865-
7866-static inline unsigned long copy_from_user(void *to,
7867- const void __user *from, unsigned long n)
7868-{
7869- unsigned long over;
7870-
7871- if (access_ok(VERIFY_READ, from, n))
7872- return __copy_tofrom_user((__force void __user *)to, from, n);
7873- if ((unsigned long)from < TASK_SIZE) {
7874- over = (unsigned long)from + n - TASK_SIZE;
7875- return __copy_tofrom_user((__force void __user *)to, from,
7876- n - over) + over;
7877- }
7878- return n;
7879-}
7880-
7881-static inline unsigned long copy_to_user(void __user *to,
7882- const void *from, unsigned long n)
7883-{
7884- unsigned long over;
7885-
7886- if (access_ok(VERIFY_WRITE, to, n))
7887- return __copy_tofrom_user(to, (__force void __user *)from, n);
7888- if ((unsigned long)to < TASK_SIZE) {
7889- over = (unsigned long)to + n - TASK_SIZE;
7890- return __copy_tofrom_user(to, (__force void __user *)from,
7891- n - over) + over;
7892- }
7893- return n;
7894-}
7895-
7896-#else /* __powerpc64__ */
7897-
7898-#define __copy_in_user(to, from, size) \
7899- __copy_tofrom_user((to), (from), (size))
7900-
7901-extern unsigned long copy_from_user(void *to, const void __user *from,
7902- unsigned long n);
7903-extern unsigned long copy_to_user(void __user *to, const void *from,
7904- unsigned long n);
7905-extern unsigned long copy_in_user(void __user *to, const void __user *from,
7906- unsigned long n);
7907-
7908-#endif /* __powerpc64__ */
7909-
7910 static inline unsigned long __copy_from_user_inatomic(void *to,
7911 const void __user *from, unsigned long n)
7912 {
7913@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7914 if (ret == 0)
7915 return 0;
7916 }
7917+
7918+ if (!__builtin_constant_p(n))
7919+ check_object_size(to, n, false);
7920+
7921 return __copy_tofrom_user((__force void __user *)to, from, n);
7922 }
7923
7924@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7925 if (ret == 0)
7926 return 0;
7927 }
7928+
7929+ if (!__builtin_constant_p(n))
7930+ check_object_size(from, n, true);
7931+
7932 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7933 }
7934
7935@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7936 return __copy_to_user_inatomic(to, from, size);
7937 }
7938
7939+#ifndef __powerpc64__
7940+
7941+static inline unsigned long __must_check copy_from_user(void *to,
7942+ const void __user *from, unsigned long n)
7943+{
7944+ unsigned long over;
7945+
7946+ if ((long)n < 0)
7947+ return n;
7948+
7949+ if (access_ok(VERIFY_READ, from, n)) {
7950+ if (!__builtin_constant_p(n))
7951+ check_object_size(to, n, false);
7952+ return __copy_tofrom_user((__force void __user *)to, from, n);
7953+ }
7954+ if ((unsigned long)from < TASK_SIZE) {
7955+ over = (unsigned long)from + n - TASK_SIZE;
7956+ if (!__builtin_constant_p(n - over))
7957+ check_object_size(to, n - over, false);
7958+ return __copy_tofrom_user((__force void __user *)to, from,
7959+ n - over) + over;
7960+ }
7961+ return n;
7962+}
7963+
7964+static inline unsigned long __must_check copy_to_user(void __user *to,
7965+ const void *from, unsigned long n)
7966+{
7967+ unsigned long over;
7968+
7969+ if ((long)n < 0)
7970+ return n;
7971+
7972+ if (access_ok(VERIFY_WRITE, to, n)) {
7973+ if (!__builtin_constant_p(n))
7974+ check_object_size(from, n, true);
7975+ return __copy_tofrom_user(to, (__force void __user *)from, n);
7976+ }
7977+ if ((unsigned long)to < TASK_SIZE) {
7978+ over = (unsigned long)to + n - TASK_SIZE;
7979+ if (!__builtin_constant_p(n))
7980+ check_object_size(from, n - over, true);
7981+ return __copy_tofrom_user(to, (__force void __user *)from,
7982+ n - over) + over;
7983+ }
7984+ return n;
7985+}
7986+
7987+#else /* __powerpc64__ */
7988+
7989+#define __copy_in_user(to, from, size) \
7990+ __copy_tofrom_user((to), (from), (size))
7991+
7992+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7993+{
7994+ if ((long)n < 0 || n > INT_MAX)
7995+ return n;
7996+
7997+ if (!__builtin_constant_p(n))
7998+ check_object_size(to, n, false);
7999+
8000+ if (likely(access_ok(VERIFY_READ, from, n)))
8001+ n = __copy_from_user(to, from, n);
8002+ else
8003+ memset(to, 0, n);
8004+ return n;
8005+}
8006+
8007+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8008+{
8009+ if ((long)n < 0 || n > INT_MAX)
8010+ return n;
8011+
8012+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8013+ if (!__builtin_constant_p(n))
8014+ check_object_size(from, n, true);
8015+ n = __copy_to_user(to, from, n);
8016+ }
8017+ return n;
8018+}
8019+
8020+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8021+ unsigned long n);
8022+
8023+#endif /* __powerpc64__ */
8024+
8025 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8026
8027 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8028diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8029index e775156..af2d1c0 100644
8030--- a/arch/powerpc/kernel/exceptions-64e.S
8031+++ b/arch/powerpc/kernel/exceptions-64e.S
8032@@ -759,6 +759,7 @@ storage_fault_common:
8033 std r14,_DAR(r1)
8034 std r15,_DSISR(r1)
8035 addi r3,r1,STACK_FRAME_OVERHEAD
8036+ bl .save_nvgprs
8037 mr r4,r14
8038 mr r5,r15
8039 ld r14,PACA_EXGEN+EX_R14(r13)
8040@@ -767,8 +768,7 @@ storage_fault_common:
8041 cmpdi r3,0
8042 bne- 1f
8043 b .ret_from_except_lite
8044-1: bl .save_nvgprs
8045- mr r5,r3
8046+1: mr r5,r3
8047 addi r3,r1,STACK_FRAME_OVERHEAD
8048 ld r4,_DAR(r1)
8049 bl .bad_page_fault
8050diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8051index 9f905e4..1d6b3d2 100644
8052--- a/arch/powerpc/kernel/exceptions-64s.S
8053+++ b/arch/powerpc/kernel/exceptions-64s.S
8054@@ -1390,10 +1390,10 @@ handle_page_fault:
8055 11: ld r4,_DAR(r1)
8056 ld r5,_DSISR(r1)
8057 addi r3,r1,STACK_FRAME_OVERHEAD
8058+ bl .save_nvgprs
8059 bl .do_page_fault
8060 cmpdi r3,0
8061 beq+ 12f
8062- bl .save_nvgprs
8063 mr r5,r3
8064 addi r3,r1,STACK_FRAME_OVERHEAD
8065 lwz r4,_DAR(r1)
8066diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8067index 6cff040..74ac5d1 100644
8068--- a/arch/powerpc/kernel/module_32.c
8069+++ b/arch/powerpc/kernel/module_32.c
8070@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8071 me->arch.core_plt_section = i;
8072 }
8073 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8074- printk("Module doesn't contain .plt or .init.plt sections.\n");
8075+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8076 return -ENOEXEC;
8077 }
8078
8079@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
8080
8081 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8082 /* Init, or core PLT? */
8083- if (location >= mod->module_core
8084- && location < mod->module_core + mod->core_size)
8085+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8086+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8087 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8088- else
8089+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8090+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8091 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8092+ else {
8093+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8094+ return ~0UL;
8095+ }
8096
8097 /* Find this entry, or if that fails, the next avail. entry */
8098 while (entry->jump[0]) {
8099@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8100 }
8101 #ifdef CONFIG_DYNAMIC_FTRACE
8102 module->arch.tramp =
8103- do_plt_call(module->module_core,
8104+ do_plt_call(module->module_core_rx,
8105 (unsigned long)ftrace_caller,
8106 sechdrs, module);
8107 #endif
8108diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8109index 4a96556..dd95f6c 100644
8110--- a/arch/powerpc/kernel/process.c
8111+++ b/arch/powerpc/kernel/process.c
8112@@ -888,8 +888,8 @@ void show_regs(struct pt_regs * regs)
8113 * Lookup NIP late so we have the best change of getting the
8114 * above info out without failing
8115 */
8116- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8117- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8118+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8119+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8120 #endif
8121 show_stack(current, (unsigned long *) regs->gpr[1]);
8122 if (!user_mode(regs))
8123@@ -1376,10 +1376,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8124 newsp = stack[0];
8125 ip = stack[STACK_FRAME_LR_SAVE];
8126 if (!firstframe || ip != lr) {
8127- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8128+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8129 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8130 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8131- printk(" (%pS)",
8132+ printk(" (%pA)",
8133 (void *)current->ret_stack[curr_frame].ret);
8134 curr_frame--;
8135 }
8136@@ -1399,7 +1399,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8137 struct pt_regs *regs = (struct pt_regs *)
8138 (sp + STACK_FRAME_OVERHEAD);
8139 lr = regs->link;
8140- printk("--- Exception: %lx at %pS\n LR = %pS\n",
8141+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
8142 regs->trap, (void *)regs->nip, (void *)lr);
8143 firstframe = 1;
8144 }
8145@@ -1435,58 +1435,3 @@ void notrace __ppc64_runlatch_off(void)
8146 mtspr(SPRN_CTRLT, ctrl);
8147 }
8148 #endif /* CONFIG_PPC64 */
8149-
8150-unsigned long arch_align_stack(unsigned long sp)
8151-{
8152- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8153- sp -= get_random_int() & ~PAGE_MASK;
8154- return sp & ~0xf;
8155-}
8156-
8157-static inline unsigned long brk_rnd(void)
8158-{
8159- unsigned long rnd = 0;
8160-
8161- /* 8MB for 32bit, 1GB for 64bit */
8162- if (is_32bit_task())
8163- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8164- else
8165- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8166-
8167- return rnd << PAGE_SHIFT;
8168-}
8169-
8170-unsigned long arch_randomize_brk(struct mm_struct *mm)
8171-{
8172- unsigned long base = mm->brk;
8173- unsigned long ret;
8174-
8175-#ifdef CONFIG_PPC_STD_MMU_64
8176- /*
8177- * If we are using 1TB segments and we are allowed to randomise
8178- * the heap, we can put it above 1TB so it is backed by a 1TB
8179- * segment. Otherwise the heap will be in the bottom 1TB
8180- * which always uses 256MB segments and this may result in a
8181- * performance penalty.
8182- */
8183- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8184- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8185-#endif
8186-
8187- ret = PAGE_ALIGN(base + brk_rnd());
8188-
8189- if (ret < mm->brk)
8190- return mm->brk;
8191-
8192- return ret;
8193-}
8194-
8195-unsigned long randomize_et_dyn(unsigned long base)
8196-{
8197- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8198-
8199- if (ret < base)
8200- return base;
8201-
8202- return ret;
8203-}
8204diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8205index 2e3d2bf..35df241 100644
8206--- a/arch/powerpc/kernel/ptrace.c
8207+++ b/arch/powerpc/kernel/ptrace.c
8208@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
8209 return ret;
8210 }
8211
8212+#ifdef CONFIG_GRKERNSEC_SETXID
8213+extern void gr_delayed_cred_worker(void);
8214+#endif
8215+
8216 /*
8217 * We must return the syscall number to actually look up in the table.
8218 * This can be -1L to skip running any syscall at all.
8219@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8220
8221 secure_computing_strict(regs->gpr[0]);
8222
8223+#ifdef CONFIG_GRKERNSEC_SETXID
8224+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8225+ gr_delayed_cred_worker();
8226+#endif
8227+
8228 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8229 tracehook_report_syscall_entry(regs))
8230 /*
8231@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8232 {
8233 int step;
8234
8235+#ifdef CONFIG_GRKERNSEC_SETXID
8236+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8237+ gr_delayed_cred_worker();
8238+#endif
8239+
8240 audit_syscall_exit(regs);
8241
8242 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8243diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8244index 68027bf..b26fd31 100644
8245--- a/arch/powerpc/kernel/signal_32.c
8246+++ b/arch/powerpc/kernel/signal_32.c
8247@@ -1004,7 +1004,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8248 /* Save user registers on the stack */
8249 frame = &rt_sf->uc.uc_mcontext;
8250 addr = frame;
8251- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8252+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8253 sigret = 0;
8254 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8255 } else {
8256diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8257index 4299104..29e2c51 100644
8258--- a/arch/powerpc/kernel/signal_64.c
8259+++ b/arch/powerpc/kernel/signal_64.c
8260@@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8261 #endif
8262
8263 /* Set up to return from userspace. */
8264- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8265+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8266 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8267 } else {
8268 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8269diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8270index 907a472..4ba206f 100644
8271--- a/arch/powerpc/kernel/traps.c
8272+++ b/arch/powerpc/kernel/traps.c
8273@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8274 return flags;
8275 }
8276
8277+extern void gr_handle_kernel_exploit(void);
8278+
8279 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8280 int signr)
8281 {
8282@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8283 panic("Fatal exception in interrupt");
8284 if (panic_on_oops)
8285 panic("Fatal exception");
8286+
8287+ gr_handle_kernel_exploit();
8288+
8289 do_exit(signr);
8290 }
8291
8292diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8293index 094e45c..d82b848 100644
8294--- a/arch/powerpc/kernel/vdso.c
8295+++ b/arch/powerpc/kernel/vdso.c
8296@@ -35,6 +35,7 @@
8297 #include <asm/vdso.h>
8298 #include <asm/vdso_datapage.h>
8299 #include <asm/setup.h>
8300+#include <asm/mman.h>
8301
8302 #undef DEBUG
8303
8304@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8305 vdso_base = VDSO32_MBASE;
8306 #endif
8307
8308- current->mm->context.vdso_base = 0;
8309+ current->mm->context.vdso_base = ~0UL;
8310
8311 /* vDSO has a problem and was disabled, just don't "enable" it for the
8312 * process
8313@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8314 vdso_base = get_unmapped_area(NULL, vdso_base,
8315 (vdso_pages << PAGE_SHIFT) +
8316 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8317- 0, 0);
8318+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8319 if (IS_ERR_VALUE(vdso_base)) {
8320 rc = vdso_base;
8321 goto fail_mmapsem;
8322diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8323index 5eea6f3..5d10396 100644
8324--- a/arch/powerpc/lib/usercopy_64.c
8325+++ b/arch/powerpc/lib/usercopy_64.c
8326@@ -9,22 +9,6 @@
8327 #include <linux/module.h>
8328 #include <asm/uaccess.h>
8329
8330-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8331-{
8332- if (likely(access_ok(VERIFY_READ, from, n)))
8333- n = __copy_from_user(to, from, n);
8334- else
8335- memset(to, 0, n);
8336- return n;
8337-}
8338-
8339-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8340-{
8341- if (likely(access_ok(VERIFY_WRITE, to, n)))
8342- n = __copy_to_user(to, from, n);
8343- return n;
8344-}
8345-
8346 unsigned long copy_in_user(void __user *to, const void __user *from,
8347 unsigned long n)
8348 {
8349@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8350 return n;
8351 }
8352
8353-EXPORT_SYMBOL(copy_from_user);
8354-EXPORT_SYMBOL(copy_to_user);
8355 EXPORT_SYMBOL(copy_in_user);
8356
8357diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8358index 51ab9e7..7d3c78b 100644
8359--- a/arch/powerpc/mm/fault.c
8360+++ b/arch/powerpc/mm/fault.c
8361@@ -33,6 +33,10 @@
8362 #include <linux/magic.h>
8363 #include <linux/ratelimit.h>
8364 #include <linux/context_tracking.h>
8365+#include <linux/slab.h>
8366+#include <linux/pagemap.h>
8367+#include <linux/compiler.h>
8368+#include <linux/unistd.h>
8369
8370 #include <asm/firmware.h>
8371 #include <asm/page.h>
8372@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8373 }
8374 #endif
8375
8376+#ifdef CONFIG_PAX_PAGEEXEC
8377+/*
8378+ * PaX: decide what to do with offenders (regs->nip = fault address)
8379+ *
8380+ * returns 1 when task should be killed
8381+ */
8382+static int pax_handle_fetch_fault(struct pt_regs *regs)
8383+{
8384+ return 1;
8385+}
8386+
8387+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8388+{
8389+ unsigned long i;
8390+
8391+ printk(KERN_ERR "PAX: bytes at PC: ");
8392+ for (i = 0; i < 5; i++) {
8393+ unsigned int c;
8394+ if (get_user(c, (unsigned int __user *)pc+i))
8395+ printk(KERN_CONT "???????? ");
8396+ else
8397+ printk(KERN_CONT "%08x ", c);
8398+ }
8399+ printk("\n");
8400+}
8401+#endif
8402+
8403 /*
8404 * Check whether the instruction at regs->nip is a store using
8405 * an update addressing form which will update r1.
8406@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8407 * indicate errors in DSISR but can validly be set in SRR1.
8408 */
8409 if (trap == 0x400)
8410- error_code &= 0x48200000;
8411+ error_code &= 0x58200000;
8412 else
8413 is_write = error_code & DSISR_ISSTORE;
8414 #else
8415@@ -378,7 +409,7 @@ good_area:
8416 * "undefined". Of those that can be set, this is the only
8417 * one which seems bad.
8418 */
8419- if (error_code & 0x10000000)
8420+ if (error_code & DSISR_GUARDED)
8421 /* Guarded storage error. */
8422 goto bad_area;
8423 #endif /* CONFIG_8xx */
8424@@ -393,7 +424,7 @@ good_area:
8425 * processors use the same I/D cache coherency mechanism
8426 * as embedded.
8427 */
8428- if (error_code & DSISR_PROTFAULT)
8429+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8430 goto bad_area;
8431 #endif /* CONFIG_PPC_STD_MMU */
8432
8433@@ -483,6 +514,23 @@ bad_area:
8434 bad_area_nosemaphore:
8435 /* User mode accesses cause a SIGSEGV */
8436 if (user_mode(regs)) {
8437+
8438+#ifdef CONFIG_PAX_PAGEEXEC
8439+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8440+#ifdef CONFIG_PPC_STD_MMU
8441+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8442+#else
8443+ if (is_exec && regs->nip == address) {
8444+#endif
8445+ switch (pax_handle_fetch_fault(regs)) {
8446+ }
8447+
8448+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8449+ do_group_exit(SIGKILL);
8450+ }
8451+ }
8452+#endif
8453+
8454 _exception(SIGSEGV, regs, code, address);
8455 goto bail;
8456 }
8457diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8458index cb8bdbe..d770680 100644
8459--- a/arch/powerpc/mm/mmap.c
8460+++ b/arch/powerpc/mm/mmap.c
8461@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8462 {
8463 unsigned long rnd = 0;
8464
8465+#ifdef CONFIG_PAX_RANDMMAP
8466+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8467+#endif
8468+
8469 if (current->flags & PF_RANDOMIZE) {
8470 /* 8MB for 32bit, 1GB for 64bit */
8471 if (is_32bit_task())
8472@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8473 */
8474 if (mmap_is_legacy()) {
8475 mm->mmap_base = TASK_UNMAPPED_BASE;
8476+
8477+#ifdef CONFIG_PAX_RANDMMAP
8478+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8479+ mm->mmap_base += mm->delta_mmap;
8480+#endif
8481+
8482 mm->get_unmapped_area = arch_get_unmapped_area;
8483 } else {
8484 mm->mmap_base = mmap_base();
8485+
8486+#ifdef CONFIG_PAX_RANDMMAP
8487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8488+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8489+#endif
8490+
8491 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8492 }
8493 }
8494diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8495index 7ce9cf3..a964087 100644
8496--- a/arch/powerpc/mm/slice.c
8497+++ b/arch/powerpc/mm/slice.c
8498@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8499 if ((mm->task_size - len) < addr)
8500 return 0;
8501 vma = find_vma(mm, addr);
8502- return (!vma || (addr + len) <= vma->vm_start);
8503+ return check_heap_stack_gap(vma, addr, len, 0);
8504 }
8505
8506 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8507@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8508 info.align_offset = 0;
8509
8510 addr = TASK_UNMAPPED_BASE;
8511+
8512+#ifdef CONFIG_PAX_RANDMMAP
8513+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8514+ addr += mm->delta_mmap;
8515+#endif
8516+
8517 while (addr < TASK_SIZE) {
8518 info.low_limit = addr;
8519 if (!slice_scan_available(addr, available, 1, &addr))
8520@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8521 if (fixed && addr > (mm->task_size - len))
8522 return -EINVAL;
8523
8524+#ifdef CONFIG_PAX_RANDMMAP
8525+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8526+ addr = 0;
8527+#endif
8528+
8529 /* If hint, make sure it matches our alignment restrictions */
8530 if (!fixed && addr) {
8531 addr = _ALIGN_UP(addr, 1ul << pshift);
8532diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8533index 9098692..3d54cd1 100644
8534--- a/arch/powerpc/platforms/cell/spufs/file.c
8535+++ b/arch/powerpc/platforms/cell/spufs/file.c
8536@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8537 return VM_FAULT_NOPAGE;
8538 }
8539
8540-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8541+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8542 unsigned long address,
8543- void *buf, int len, int write)
8544+ void *buf, size_t len, int write)
8545 {
8546 struct spu_context *ctx = vma->vm_file->private_data;
8547 unsigned long offset = address - vma->vm_start;
8548diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8549index fa9aaf7..3f5d836 100644
8550--- a/arch/s390/include/asm/atomic.h
8551+++ b/arch/s390/include/asm/atomic.h
8552@@ -398,6 +398,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8553 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8554 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8555
8556+#define atomic64_read_unchecked(v) atomic64_read(v)
8557+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8558+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8559+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8560+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8561+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8562+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8563+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8564+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8565+
8566 #define smp_mb__before_atomic_dec() smp_mb()
8567 #define smp_mb__after_atomic_dec() smp_mb()
8568 #define smp_mb__before_atomic_inc() smp_mb()
8569diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8570index 4d7ccac..d03d0ad 100644
8571--- a/arch/s390/include/asm/cache.h
8572+++ b/arch/s390/include/asm/cache.h
8573@@ -9,8 +9,10 @@
8574 #ifndef __ARCH_S390_CACHE_H
8575 #define __ARCH_S390_CACHE_H
8576
8577-#define L1_CACHE_BYTES 256
8578+#include <linux/const.h>
8579+
8580 #define L1_CACHE_SHIFT 8
8581+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8582 #define NET_SKB_PAD 32
8583
8584 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8585diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8586index 78f4f87..598ce39 100644
8587--- a/arch/s390/include/asm/elf.h
8588+++ b/arch/s390/include/asm/elf.h
8589@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8590 the loader. We need to make sure that it is out of the way of the program
8591 that it will "exec", and that there is sufficient room for the brk. */
8592
8593-extern unsigned long randomize_et_dyn(unsigned long base);
8594-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8595+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8596+
8597+#ifdef CONFIG_PAX_ASLR
8598+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8599+
8600+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8601+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8602+#endif
8603
8604 /* This yields a mask that user programs can use to figure out what
8605 instruction set this CPU supports. */
8606@@ -222,9 +228,6 @@ struct linux_binprm;
8607 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8608 int arch_setup_additional_pages(struct linux_binprm *, int);
8609
8610-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8611-#define arch_randomize_brk arch_randomize_brk
8612-
8613 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8614
8615 #endif
8616diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8617index c4a93d6..4d2a9b4 100644
8618--- a/arch/s390/include/asm/exec.h
8619+++ b/arch/s390/include/asm/exec.h
8620@@ -7,6 +7,6 @@
8621 #ifndef __ASM_EXEC_H
8622 #define __ASM_EXEC_H
8623
8624-extern unsigned long arch_align_stack(unsigned long sp);
8625+#define arch_align_stack(x) ((x) & ~0xfUL)
8626
8627 #endif /* __ASM_EXEC_H */
8628diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8629index 79330af..a3a7b06 100644
8630--- a/arch/s390/include/asm/uaccess.h
8631+++ b/arch/s390/include/asm/uaccess.h
8632@@ -245,6 +245,10 @@ static inline unsigned long __must_check
8633 copy_to_user(void __user *to, const void *from, unsigned long n)
8634 {
8635 might_fault();
8636+
8637+ if ((long)n < 0)
8638+ return n;
8639+
8640 return __copy_to_user(to, from, n);
8641 }
8642
8643@@ -268,6 +272,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8644 static inline unsigned long __must_check
8645 __copy_from_user(void *to, const void __user *from, unsigned long n)
8646 {
8647+ if ((long)n < 0)
8648+ return n;
8649+
8650 return uaccess.copy_from_user(n, from, to);
8651 }
8652
8653@@ -296,10 +303,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8654 static inline unsigned long __must_check
8655 copy_from_user(void *to, const void __user *from, unsigned long n)
8656 {
8657- unsigned int sz = __compiletime_object_size(to);
8658+ size_t sz = __compiletime_object_size(to);
8659
8660 might_fault();
8661- if (unlikely(sz != -1 && sz < n)) {
8662+
8663+ if ((long)n < 0)
8664+ return n;
8665+
8666+ if (unlikely(sz != (size_t)-1 && sz < n)) {
8667 copy_from_user_overflow();
8668 return n;
8669 }
8670diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8671index b89b591..fd9609d 100644
8672--- a/arch/s390/kernel/module.c
8673+++ b/arch/s390/kernel/module.c
8674@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8675
8676 /* Increase core size by size of got & plt and set start
8677 offsets for got and plt. */
8678- me->core_size = ALIGN(me->core_size, 4);
8679- me->arch.got_offset = me->core_size;
8680- me->core_size += me->arch.got_size;
8681- me->arch.plt_offset = me->core_size;
8682- me->core_size += me->arch.plt_size;
8683+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
8684+ me->arch.got_offset = me->core_size_rw;
8685+ me->core_size_rw += me->arch.got_size;
8686+ me->arch.plt_offset = me->core_size_rx;
8687+ me->core_size_rx += me->arch.plt_size;
8688 return 0;
8689 }
8690
8691@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8692 if (info->got_initialized == 0) {
8693 Elf_Addr *gotent;
8694
8695- gotent = me->module_core + me->arch.got_offset +
8696+ gotent = me->module_core_rw + me->arch.got_offset +
8697 info->got_offset;
8698 *gotent = val;
8699 info->got_initialized = 1;
8700@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8701 rc = apply_rela_bits(loc, val, 0, 64, 0);
8702 else if (r_type == R_390_GOTENT ||
8703 r_type == R_390_GOTPLTENT) {
8704- val += (Elf_Addr) me->module_core - loc;
8705+ val += (Elf_Addr) me->module_core_rw - loc;
8706 rc = apply_rela_bits(loc, val, 1, 32, 1);
8707 }
8708 break;
8709@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8710 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8711 if (info->plt_initialized == 0) {
8712 unsigned int *ip;
8713- ip = me->module_core + me->arch.plt_offset +
8714+ ip = me->module_core_rx + me->arch.plt_offset +
8715 info->plt_offset;
8716 #ifndef CONFIG_64BIT
8717 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8718@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8719 val - loc + 0xffffUL < 0x1ffffeUL) ||
8720 (r_type == R_390_PLT32DBL &&
8721 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8722- val = (Elf_Addr) me->module_core +
8723+ val = (Elf_Addr) me->module_core_rx +
8724 me->arch.plt_offset +
8725 info->plt_offset;
8726 val += rela->r_addend - loc;
8727@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8728 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8729 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8730 val = val + rela->r_addend -
8731- ((Elf_Addr) me->module_core + me->arch.got_offset);
8732+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8733 if (r_type == R_390_GOTOFF16)
8734 rc = apply_rela_bits(loc, val, 0, 16, 0);
8735 else if (r_type == R_390_GOTOFF32)
8736@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8737 break;
8738 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8739 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8740- val = (Elf_Addr) me->module_core + me->arch.got_offset +
8741+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8742 rela->r_addend - loc;
8743 if (r_type == R_390_GOTPC)
8744 rc = apply_rela_bits(loc, val, 1, 32, 0);
8745diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8746index 7ed0d4e..1dfc145 100644
8747--- a/arch/s390/kernel/process.c
8748+++ b/arch/s390/kernel/process.c
8749@@ -242,39 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
8750 }
8751 return 0;
8752 }
8753-
8754-unsigned long arch_align_stack(unsigned long sp)
8755-{
8756- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8757- sp -= get_random_int() & ~PAGE_MASK;
8758- return sp & ~0xf;
8759-}
8760-
8761-static inline unsigned long brk_rnd(void)
8762-{
8763- /* 8MB for 32bit, 1GB for 64bit */
8764- if (is_32bit_task())
8765- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8766- else
8767- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8768-}
8769-
8770-unsigned long arch_randomize_brk(struct mm_struct *mm)
8771-{
8772- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8773-
8774- if (ret < mm->brk)
8775- return mm->brk;
8776- return ret;
8777-}
8778-
8779-unsigned long randomize_et_dyn(unsigned long base)
8780-{
8781- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8782-
8783- if (!(current->flags & PF_RANDOMIZE))
8784- return base;
8785- if (ret < base)
8786- return base;
8787- return ret;
8788-}
8789diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8790index 9b436c2..54fbf0a 100644
8791--- a/arch/s390/mm/mmap.c
8792+++ b/arch/s390/mm/mmap.c
8793@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8794 */
8795 if (mmap_is_legacy()) {
8796 mm->mmap_base = mmap_base_legacy();
8797+
8798+#ifdef CONFIG_PAX_RANDMMAP
8799+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8800+ mm->mmap_base += mm->delta_mmap;
8801+#endif
8802+
8803 mm->get_unmapped_area = arch_get_unmapped_area;
8804 } else {
8805 mm->mmap_base = mmap_base();
8806+
8807+#ifdef CONFIG_PAX_RANDMMAP
8808+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8809+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8810+#endif
8811+
8812 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8813 }
8814 }
8815@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8816 */
8817 if (mmap_is_legacy()) {
8818 mm->mmap_base = mmap_base_legacy();
8819+
8820+#ifdef CONFIG_PAX_RANDMMAP
8821+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8822+ mm->mmap_base += mm->delta_mmap;
8823+#endif
8824+
8825 mm->get_unmapped_area = s390_get_unmapped_area;
8826 } else {
8827 mm->mmap_base = mmap_base();
8828+
8829+#ifdef CONFIG_PAX_RANDMMAP
8830+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8831+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8832+#endif
8833+
8834 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8835 }
8836 }
8837diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8838index ae3d59f..f65f075 100644
8839--- a/arch/score/include/asm/cache.h
8840+++ b/arch/score/include/asm/cache.h
8841@@ -1,7 +1,9 @@
8842 #ifndef _ASM_SCORE_CACHE_H
8843 #define _ASM_SCORE_CACHE_H
8844
8845+#include <linux/const.h>
8846+
8847 #define L1_CACHE_SHIFT 4
8848-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8849+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8850
8851 #endif /* _ASM_SCORE_CACHE_H */
8852diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8853index f9f3cd5..58ff438 100644
8854--- a/arch/score/include/asm/exec.h
8855+++ b/arch/score/include/asm/exec.h
8856@@ -1,6 +1,6 @@
8857 #ifndef _ASM_SCORE_EXEC_H
8858 #define _ASM_SCORE_EXEC_H
8859
8860-extern unsigned long arch_align_stack(unsigned long sp);
8861+#define arch_align_stack(x) (x)
8862
8863 #endif /* _ASM_SCORE_EXEC_H */
8864diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8865index a1519ad3..e8ac1ff 100644
8866--- a/arch/score/kernel/process.c
8867+++ b/arch/score/kernel/process.c
8868@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8869
8870 return task_pt_regs(task)->cp0_epc;
8871 }
8872-
8873-unsigned long arch_align_stack(unsigned long sp)
8874-{
8875- return sp;
8876-}
8877diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8878index ef9e555..331bd29 100644
8879--- a/arch/sh/include/asm/cache.h
8880+++ b/arch/sh/include/asm/cache.h
8881@@ -9,10 +9,11 @@
8882 #define __ASM_SH_CACHE_H
8883 #ifdef __KERNEL__
8884
8885+#include <linux/const.h>
8886 #include <linux/init.h>
8887 #include <cpu/cache.h>
8888
8889-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8890+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8891
8892 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8893
8894diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8895index 6777177..cb5e44f 100644
8896--- a/arch/sh/mm/mmap.c
8897+++ b/arch/sh/mm/mmap.c
8898@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8899 struct mm_struct *mm = current->mm;
8900 struct vm_area_struct *vma;
8901 int do_colour_align;
8902+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8903 struct vm_unmapped_area_info info;
8904
8905 if (flags & MAP_FIXED) {
8906@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8907 if (filp || (flags & MAP_SHARED))
8908 do_colour_align = 1;
8909
8910+#ifdef CONFIG_PAX_RANDMMAP
8911+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8912+#endif
8913+
8914 if (addr) {
8915 if (do_colour_align)
8916 addr = COLOUR_ALIGN(addr, pgoff);
8917@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8918 addr = PAGE_ALIGN(addr);
8919
8920 vma = find_vma(mm, addr);
8921- if (TASK_SIZE - len >= addr &&
8922- (!vma || addr + len <= vma->vm_start))
8923+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8924 return addr;
8925 }
8926
8927 info.flags = 0;
8928 info.length = len;
8929- info.low_limit = TASK_UNMAPPED_BASE;
8930+ info.low_limit = mm->mmap_base;
8931 info.high_limit = TASK_SIZE;
8932 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8933 info.align_offset = pgoff << PAGE_SHIFT;
8934@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8935 struct mm_struct *mm = current->mm;
8936 unsigned long addr = addr0;
8937 int do_colour_align;
8938+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8939 struct vm_unmapped_area_info info;
8940
8941 if (flags & MAP_FIXED) {
8942@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8943 if (filp || (flags & MAP_SHARED))
8944 do_colour_align = 1;
8945
8946+#ifdef CONFIG_PAX_RANDMMAP
8947+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8948+#endif
8949+
8950 /* requesting a specific address */
8951 if (addr) {
8952 if (do_colour_align)
8953@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8954 addr = PAGE_ALIGN(addr);
8955
8956 vma = find_vma(mm, addr);
8957- if (TASK_SIZE - len >= addr &&
8958- (!vma || addr + len <= vma->vm_start))
8959+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8960 return addr;
8961 }
8962
8963@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8964 VM_BUG_ON(addr != -ENOMEM);
8965 info.flags = 0;
8966 info.low_limit = TASK_UNMAPPED_BASE;
8967+
8968+#ifdef CONFIG_PAX_RANDMMAP
8969+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8970+ info.low_limit += mm->delta_mmap;
8971+#endif
8972+
8973 info.high_limit = TASK_SIZE;
8974 addr = vm_unmapped_area(&info);
8975 }
8976diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8977index be56a24..443328f 100644
8978--- a/arch/sparc/include/asm/atomic_64.h
8979+++ b/arch/sparc/include/asm/atomic_64.h
8980@@ -14,18 +14,40 @@
8981 #define ATOMIC64_INIT(i) { (i) }
8982
8983 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8984+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8985+{
8986+ return v->counter;
8987+}
8988 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8989+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8990+{
8991+ return v->counter;
8992+}
8993
8994 #define atomic_set(v, i) (((v)->counter) = i)
8995+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8996+{
8997+ v->counter = i;
8998+}
8999 #define atomic64_set(v, i) (((v)->counter) = i)
9000+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9001+{
9002+ v->counter = i;
9003+}
9004
9005 extern void atomic_add(int, atomic_t *);
9006+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
9007 extern void atomic64_add(long, atomic64_t *);
9008+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
9009 extern void atomic_sub(int, atomic_t *);
9010+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
9011 extern void atomic64_sub(long, atomic64_t *);
9012+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
9013
9014 extern int atomic_add_ret(int, atomic_t *);
9015+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
9016 extern long atomic64_add_ret(long, atomic64_t *);
9017+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
9018 extern int atomic_sub_ret(int, atomic_t *);
9019 extern long atomic64_sub_ret(long, atomic64_t *);
9020
9021@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9022 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
9023
9024 #define atomic_inc_return(v) atomic_add_ret(1, v)
9025+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9026+{
9027+ return atomic_add_ret_unchecked(1, v);
9028+}
9029 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
9030+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9031+{
9032+ return atomic64_add_ret_unchecked(1, v);
9033+}
9034
9035 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
9036 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
9037
9038 #define atomic_add_return(i, v) atomic_add_ret(i, v)
9039+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9040+{
9041+ return atomic_add_ret_unchecked(i, v);
9042+}
9043 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
9044+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9045+{
9046+ return atomic64_add_ret_unchecked(i, v);
9047+}
9048
9049 /*
9050 * atomic_inc_and_test - increment and test
9051@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9052 * other cases.
9053 */
9054 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9055+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9056+{
9057+ return atomic_inc_return_unchecked(v) == 0;
9058+}
9059 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9060
9061 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9062@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9063 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9064
9065 #define atomic_inc(v) atomic_add(1, v)
9066+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9067+{
9068+ atomic_add_unchecked(1, v);
9069+}
9070 #define atomic64_inc(v) atomic64_add(1, v)
9071+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9072+{
9073+ atomic64_add_unchecked(1, v);
9074+}
9075
9076 #define atomic_dec(v) atomic_sub(1, v)
9077+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9078+{
9079+ atomic_sub_unchecked(1, v);
9080+}
9081 #define atomic64_dec(v) atomic64_sub(1, v)
9082+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9083+{
9084+ atomic64_sub_unchecked(1, v);
9085+}
9086
9087 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9088 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9089
9090 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9091+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9092+{
9093+ return cmpxchg(&v->counter, old, new);
9094+}
9095 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9096+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9097+{
9098+ return xchg(&v->counter, new);
9099+}
9100
9101 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9102 {
9103- int c, old;
9104+ int c, old, new;
9105 c = atomic_read(v);
9106 for (;;) {
9107- if (unlikely(c == (u)))
9108+ if (unlikely(c == u))
9109 break;
9110- old = atomic_cmpxchg((v), c, c + (a));
9111+
9112+ asm volatile("addcc %2, %0, %0\n"
9113+
9114+#ifdef CONFIG_PAX_REFCOUNT
9115+ "tvs %%icc, 6\n"
9116+#endif
9117+
9118+ : "=r" (new)
9119+ : "0" (c), "ir" (a)
9120+ : "cc");
9121+
9122+ old = atomic_cmpxchg(v, c, new);
9123 if (likely(old == c))
9124 break;
9125 c = old;
9126@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9127 #define atomic64_cmpxchg(v, o, n) \
9128 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9129 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9130+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9131+{
9132+ return xchg(&v->counter, new);
9133+}
9134
9135 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9136 {
9137- long c, old;
9138+ long c, old, new;
9139 c = atomic64_read(v);
9140 for (;;) {
9141- if (unlikely(c == (u)))
9142+ if (unlikely(c == u))
9143 break;
9144- old = atomic64_cmpxchg((v), c, c + (a));
9145+
9146+ asm volatile("addcc %2, %0, %0\n"
9147+
9148+#ifdef CONFIG_PAX_REFCOUNT
9149+ "tvs %%xcc, 6\n"
9150+#endif
9151+
9152+ : "=r" (new)
9153+ : "0" (c), "ir" (a)
9154+ : "cc");
9155+
9156+ old = atomic64_cmpxchg(v, c, new);
9157 if (likely(old == c))
9158 break;
9159 c = old;
9160 }
9161- return c != (u);
9162+ return c != u;
9163 }
9164
9165 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9166diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9167index 5bb6991..5c2132e 100644
9168--- a/arch/sparc/include/asm/cache.h
9169+++ b/arch/sparc/include/asm/cache.h
9170@@ -7,10 +7,12 @@
9171 #ifndef _SPARC_CACHE_H
9172 #define _SPARC_CACHE_H
9173
9174+#include <linux/const.h>
9175+
9176 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9177
9178 #define L1_CACHE_SHIFT 5
9179-#define L1_CACHE_BYTES 32
9180+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9181
9182 #ifdef CONFIG_SPARC32
9183 #define SMP_CACHE_BYTES_SHIFT 5
9184diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9185index a24e41f..47677ff 100644
9186--- a/arch/sparc/include/asm/elf_32.h
9187+++ b/arch/sparc/include/asm/elf_32.h
9188@@ -114,6 +114,13 @@ typedef struct {
9189
9190 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9191
9192+#ifdef CONFIG_PAX_ASLR
9193+#define PAX_ELF_ET_DYN_BASE 0x10000UL
9194+
9195+#define PAX_DELTA_MMAP_LEN 16
9196+#define PAX_DELTA_STACK_LEN 16
9197+#endif
9198+
9199 /* This yields a mask that user programs can use to figure out what
9200 instruction set this cpu supports. This can NOT be done in userspace
9201 on Sparc. */
9202diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9203index 370ca1e..d4f4a98 100644
9204--- a/arch/sparc/include/asm/elf_64.h
9205+++ b/arch/sparc/include/asm/elf_64.h
9206@@ -189,6 +189,13 @@ typedef struct {
9207 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9208 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9209
9210+#ifdef CONFIG_PAX_ASLR
9211+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9212+
9213+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9214+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9215+#endif
9216+
9217 extern unsigned long sparc64_elf_hwcap;
9218 #define ELF_HWCAP sparc64_elf_hwcap
9219
9220diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9221index 9b1c36d..209298b 100644
9222--- a/arch/sparc/include/asm/pgalloc_32.h
9223+++ b/arch/sparc/include/asm/pgalloc_32.h
9224@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9225 }
9226
9227 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9228+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9229
9230 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9231 unsigned long address)
9232diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9233index bcfe063..b333142 100644
9234--- a/arch/sparc/include/asm/pgalloc_64.h
9235+++ b/arch/sparc/include/asm/pgalloc_64.h
9236@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9237 }
9238
9239 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9240+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9241
9242 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9243 {
9244diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9245index 502f632..da1917f 100644
9246--- a/arch/sparc/include/asm/pgtable_32.h
9247+++ b/arch/sparc/include/asm/pgtable_32.h
9248@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9249 #define PAGE_SHARED SRMMU_PAGE_SHARED
9250 #define PAGE_COPY SRMMU_PAGE_COPY
9251 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9252+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9253+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9254+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9255 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9256
9257 /* Top-level page directory - dummy used by init-mm.
9258@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9259
9260 /* xwr */
9261 #define __P000 PAGE_NONE
9262-#define __P001 PAGE_READONLY
9263-#define __P010 PAGE_COPY
9264-#define __P011 PAGE_COPY
9265+#define __P001 PAGE_READONLY_NOEXEC
9266+#define __P010 PAGE_COPY_NOEXEC
9267+#define __P011 PAGE_COPY_NOEXEC
9268 #define __P100 PAGE_READONLY
9269 #define __P101 PAGE_READONLY
9270 #define __P110 PAGE_COPY
9271 #define __P111 PAGE_COPY
9272
9273 #define __S000 PAGE_NONE
9274-#define __S001 PAGE_READONLY
9275-#define __S010 PAGE_SHARED
9276-#define __S011 PAGE_SHARED
9277+#define __S001 PAGE_READONLY_NOEXEC
9278+#define __S010 PAGE_SHARED_NOEXEC
9279+#define __S011 PAGE_SHARED_NOEXEC
9280 #define __S100 PAGE_READONLY
9281 #define __S101 PAGE_READONLY
9282 #define __S110 PAGE_SHARED
9283diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9284index 79da178..c2eede8 100644
9285--- a/arch/sparc/include/asm/pgtsrmmu.h
9286+++ b/arch/sparc/include/asm/pgtsrmmu.h
9287@@ -115,6 +115,11 @@
9288 SRMMU_EXEC | SRMMU_REF)
9289 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9290 SRMMU_EXEC | SRMMU_REF)
9291+
9292+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9293+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9294+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9295+
9296 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9297 SRMMU_DIRTY | SRMMU_REF)
9298
9299diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9300index 9689176..63c18ea 100644
9301--- a/arch/sparc/include/asm/spinlock_64.h
9302+++ b/arch/sparc/include/asm/spinlock_64.h
9303@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9304
9305 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9306
9307-static void inline arch_read_lock(arch_rwlock_t *lock)
9308+static inline void arch_read_lock(arch_rwlock_t *lock)
9309 {
9310 unsigned long tmp1, tmp2;
9311
9312 __asm__ __volatile__ (
9313 "1: ldsw [%2], %0\n"
9314 " brlz,pn %0, 2f\n"
9315-"4: add %0, 1, %1\n"
9316+"4: addcc %0, 1, %1\n"
9317+
9318+#ifdef CONFIG_PAX_REFCOUNT
9319+" tvs %%icc, 6\n"
9320+#endif
9321+
9322 " cas [%2], %0, %1\n"
9323 " cmp %0, %1\n"
9324 " bne,pn %%icc, 1b\n"
9325@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9326 " .previous"
9327 : "=&r" (tmp1), "=&r" (tmp2)
9328 : "r" (lock)
9329- : "memory");
9330+ : "memory", "cc");
9331 }
9332
9333-static int inline arch_read_trylock(arch_rwlock_t *lock)
9334+static inline int arch_read_trylock(arch_rwlock_t *lock)
9335 {
9336 int tmp1, tmp2;
9337
9338@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9339 "1: ldsw [%2], %0\n"
9340 " brlz,a,pn %0, 2f\n"
9341 " mov 0, %0\n"
9342-" add %0, 1, %1\n"
9343+" addcc %0, 1, %1\n"
9344+
9345+#ifdef CONFIG_PAX_REFCOUNT
9346+" tvs %%icc, 6\n"
9347+#endif
9348+
9349 " cas [%2], %0, %1\n"
9350 " cmp %0, %1\n"
9351 " bne,pn %%icc, 1b\n"
9352@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9353 return tmp1;
9354 }
9355
9356-static void inline arch_read_unlock(arch_rwlock_t *lock)
9357+static inline void arch_read_unlock(arch_rwlock_t *lock)
9358 {
9359 unsigned long tmp1, tmp2;
9360
9361 __asm__ __volatile__(
9362 "1: lduw [%2], %0\n"
9363-" sub %0, 1, %1\n"
9364+" subcc %0, 1, %1\n"
9365+
9366+#ifdef CONFIG_PAX_REFCOUNT
9367+" tvs %%icc, 6\n"
9368+#endif
9369+
9370 " cas [%2], %0, %1\n"
9371 " cmp %0, %1\n"
9372 " bne,pn %%xcc, 1b\n"
9373@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9374 : "memory");
9375 }
9376
9377-static void inline arch_write_lock(arch_rwlock_t *lock)
9378+static inline void arch_write_lock(arch_rwlock_t *lock)
9379 {
9380 unsigned long mask, tmp1, tmp2;
9381
9382@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9383 : "memory");
9384 }
9385
9386-static void inline arch_write_unlock(arch_rwlock_t *lock)
9387+static inline void arch_write_unlock(arch_rwlock_t *lock)
9388 {
9389 __asm__ __volatile__(
9390 " stw %%g0, [%0]"
9391@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9392 : "memory");
9393 }
9394
9395-static int inline arch_write_trylock(arch_rwlock_t *lock)
9396+static inline int arch_write_trylock(arch_rwlock_t *lock)
9397 {
9398 unsigned long mask, tmp1, tmp2, result;
9399
9400diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9401index 96efa7a..16858bf 100644
9402--- a/arch/sparc/include/asm/thread_info_32.h
9403+++ b/arch/sparc/include/asm/thread_info_32.h
9404@@ -49,6 +49,8 @@ struct thread_info {
9405 unsigned long w_saved;
9406
9407 struct restart_block restart_block;
9408+
9409+ unsigned long lowest_stack;
9410 };
9411
9412 /*
9413diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9414index a5f01ac..703b554 100644
9415--- a/arch/sparc/include/asm/thread_info_64.h
9416+++ b/arch/sparc/include/asm/thread_info_64.h
9417@@ -63,6 +63,8 @@ struct thread_info {
9418 struct pt_regs *kern_una_regs;
9419 unsigned int kern_una_insn;
9420
9421+ unsigned long lowest_stack;
9422+
9423 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9424 };
9425
9426@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
9427 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
9428 /* flag bit 4 is available */
9429 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9430-/* flag bit 6 is available */
9431+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9432 #define TIF_32BIT 7 /* 32-bit binary */
9433 #define TIF_NOHZ 8 /* in adaptive nohz mode */
9434 #define TIF_SECCOMP 9 /* secure computing */
9435 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9436 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9437+
9438 /* NOTE: Thread flags >= 12 should be ones we have no interest
9439 * in using in assembly, else we can't use the mask as
9440 * an immediate value in instructions such as andcc.
9441@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9442 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9443 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9444 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9445+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9446
9447 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9448 _TIF_DO_NOTIFY_RESUME_MASK | \
9449 _TIF_NEED_RESCHED)
9450 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9451
9452+#define _TIF_WORK_SYSCALL \
9453+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9454+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
9455+
9456+
9457 /*
9458 * Thread-synchronous status.
9459 *
9460diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9461index 0167d26..767bb0c 100644
9462--- a/arch/sparc/include/asm/uaccess.h
9463+++ b/arch/sparc/include/asm/uaccess.h
9464@@ -1,5 +1,6 @@
9465 #ifndef ___ASM_SPARC_UACCESS_H
9466 #define ___ASM_SPARC_UACCESS_H
9467+
9468 #if defined(__sparc__) && defined(__arch64__)
9469 #include <asm/uaccess_64.h>
9470 #else
9471diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9472index 53a28dd..50c38c3 100644
9473--- a/arch/sparc/include/asm/uaccess_32.h
9474+++ b/arch/sparc/include/asm/uaccess_32.h
9475@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9476
9477 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9478 {
9479- if (n && __access_ok((unsigned long) to, n))
9480+ if ((long)n < 0)
9481+ return n;
9482+
9483+ if (n && __access_ok((unsigned long) to, n)) {
9484+ if (!__builtin_constant_p(n))
9485+ check_object_size(from, n, true);
9486 return __copy_user(to, (__force void __user *) from, n);
9487- else
9488+ } else
9489 return n;
9490 }
9491
9492 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9493 {
9494+ if ((long)n < 0)
9495+ return n;
9496+
9497+ if (!__builtin_constant_p(n))
9498+ check_object_size(from, n, true);
9499+
9500 return __copy_user(to, (__force void __user *) from, n);
9501 }
9502
9503 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9504 {
9505- if (n && __access_ok((unsigned long) from, n))
9506+ if ((long)n < 0)
9507+ return n;
9508+
9509+ if (n && __access_ok((unsigned long) from, n)) {
9510+ if (!__builtin_constant_p(n))
9511+ check_object_size(to, n, false);
9512 return __copy_user((__force void __user *) to, from, n);
9513- else
9514+ } else
9515 return n;
9516 }
9517
9518 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9519 {
9520+ if ((long)n < 0)
9521+ return n;
9522+
9523 return __copy_user((__force void __user *) to, from, n);
9524 }
9525
9526diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9527index ad7e178..c9e7423 100644
9528--- a/arch/sparc/include/asm/uaccess_64.h
9529+++ b/arch/sparc/include/asm/uaccess_64.h
9530@@ -10,6 +10,7 @@
9531 #include <linux/compiler.h>
9532 #include <linux/string.h>
9533 #include <linux/thread_info.h>
9534+#include <linux/kernel.h>
9535 #include <asm/asi.h>
9536 #include <asm/spitfire.h>
9537 #include <asm-generic/uaccess-unaligned.h>
9538@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9539 static inline unsigned long __must_check
9540 copy_from_user(void *to, const void __user *from, unsigned long size)
9541 {
9542- unsigned long ret = ___copy_from_user(to, from, size);
9543+ unsigned long ret;
9544
9545+ if ((long)size < 0 || size > INT_MAX)
9546+ return size;
9547+
9548+ if (!__builtin_constant_p(size))
9549+ check_object_size(to, size, false);
9550+
9551+ ret = ___copy_from_user(to, from, size);
9552 if (unlikely(ret))
9553 ret = copy_from_user_fixup(to, from, size);
9554
9555@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9556 static inline unsigned long __must_check
9557 copy_to_user(void __user *to, const void *from, unsigned long size)
9558 {
9559- unsigned long ret = ___copy_to_user(to, from, size);
9560+ unsigned long ret;
9561
9562+ if ((long)size < 0 || size > INT_MAX)
9563+ return size;
9564+
9565+ if (!__builtin_constant_p(size))
9566+ check_object_size(from, size, true);
9567+
9568+ ret = ___copy_to_user(to, from, size);
9569 if (unlikely(ret))
9570 ret = copy_to_user_fixup(to, from, size);
9571 return ret;
9572diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9573index d15cc17..d0ae796 100644
9574--- a/arch/sparc/kernel/Makefile
9575+++ b/arch/sparc/kernel/Makefile
9576@@ -4,7 +4,7 @@
9577 #
9578
9579 asflags-y := -ansi
9580-ccflags-y := -Werror
9581+#ccflags-y := -Werror
9582
9583 extra-y := head_$(BITS).o
9584
9585diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9586index fdd819d..5af08c8 100644
9587--- a/arch/sparc/kernel/process_32.c
9588+++ b/arch/sparc/kernel/process_32.c
9589@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9590
9591 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9592 r->psr, r->pc, r->npc, r->y, print_tainted());
9593- printk("PC: <%pS>\n", (void *) r->pc);
9594+ printk("PC: <%pA>\n", (void *) r->pc);
9595 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9596 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9597 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9598 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9599 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9600 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9601- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9602+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9603
9604 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9605 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9606@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9607 rw = (struct reg_window32 *) fp;
9608 pc = rw->ins[7];
9609 printk("[%08lx : ", pc);
9610- printk("%pS ] ", (void *) pc);
9611+ printk("%pA ] ", (void *) pc);
9612 fp = rw->ins[6];
9613 } while (++count < 16);
9614 printk("\n");
9615diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9616index 32a280e..84fc6a9 100644
9617--- a/arch/sparc/kernel/process_64.c
9618+++ b/arch/sparc/kernel/process_64.c
9619@@ -159,7 +159,7 @@ static void show_regwindow(struct pt_regs *regs)
9620 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9621 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9622 if (regs->tstate & TSTATE_PRIV)
9623- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9624+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9625 }
9626
9627 void show_regs(struct pt_regs *regs)
9628@@ -168,7 +168,7 @@ void show_regs(struct pt_regs *regs)
9629
9630 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9631 regs->tpc, regs->tnpc, regs->y, print_tainted());
9632- printk("TPC: <%pS>\n", (void *) regs->tpc);
9633+ printk("TPC: <%pA>\n", (void *) regs->tpc);
9634 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9635 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9636 regs->u_regs[3]);
9637@@ -181,7 +181,7 @@ void show_regs(struct pt_regs *regs)
9638 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9639 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9640 regs->u_regs[15]);
9641- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9642+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9643 show_regwindow(regs);
9644 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9645 }
9646@@ -270,7 +270,7 @@ void arch_trigger_all_cpu_backtrace(void)
9647 ((tp && tp->task) ? tp->task->pid : -1));
9648
9649 if (gp->tstate & TSTATE_PRIV) {
9650- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9651+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9652 (void *) gp->tpc,
9653 (void *) gp->o7,
9654 (void *) gp->i7,
9655diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9656index 79cc0d1..ec62734 100644
9657--- a/arch/sparc/kernel/prom_common.c
9658+++ b/arch/sparc/kernel/prom_common.c
9659@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9660
9661 unsigned int prom_early_allocated __initdata;
9662
9663-static struct of_pdt_ops prom_sparc_ops __initdata = {
9664+static struct of_pdt_ops prom_sparc_ops __initconst = {
9665 .nextprop = prom_common_nextprop,
9666 .getproplen = prom_getproplen,
9667 .getproperty = prom_getproperty,
9668diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9669index c13c9f2..d572c34 100644
9670--- a/arch/sparc/kernel/ptrace_64.c
9671+++ b/arch/sparc/kernel/ptrace_64.c
9672@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
9673 return ret;
9674 }
9675
9676+#ifdef CONFIG_GRKERNSEC_SETXID
9677+extern void gr_delayed_cred_worker(void);
9678+#endif
9679+
9680 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9681 {
9682 int ret = 0;
9683@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9684 if (test_thread_flag(TIF_NOHZ))
9685 user_exit();
9686
9687+#ifdef CONFIG_GRKERNSEC_SETXID
9688+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9689+ gr_delayed_cred_worker();
9690+#endif
9691+
9692 if (test_thread_flag(TIF_SYSCALL_TRACE))
9693 ret = tracehook_report_syscall_entry(regs);
9694
9695@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9696 if (test_thread_flag(TIF_NOHZ))
9697 user_exit();
9698
9699+#ifdef CONFIG_GRKERNSEC_SETXID
9700+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9701+ gr_delayed_cred_worker();
9702+#endif
9703+
9704 audit_syscall_exit(regs);
9705
9706 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9707diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
9708index b085311..6f885f7 100644
9709--- a/arch/sparc/kernel/smp_64.c
9710+++ b/arch/sparc/kernel/smp_64.c
9711@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
9712 extern unsigned long xcall_flush_dcache_page_spitfire;
9713
9714 #ifdef CONFIG_DEBUG_DCFLUSH
9715-extern atomic_t dcpage_flushes;
9716-extern atomic_t dcpage_flushes_xcall;
9717+extern atomic_unchecked_t dcpage_flushes;
9718+extern atomic_unchecked_t dcpage_flushes_xcall;
9719 #endif
9720
9721 static inline void __local_flush_dcache_page(struct page *page)
9722@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9723 return;
9724
9725 #ifdef CONFIG_DEBUG_DCFLUSH
9726- atomic_inc(&dcpage_flushes);
9727+ atomic_inc_unchecked(&dcpage_flushes);
9728 #endif
9729
9730 this_cpu = get_cpu();
9731@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9732 xcall_deliver(data0, __pa(pg_addr),
9733 (u64) pg_addr, cpumask_of(cpu));
9734 #ifdef CONFIG_DEBUG_DCFLUSH
9735- atomic_inc(&dcpage_flushes_xcall);
9736+ atomic_inc_unchecked(&dcpage_flushes_xcall);
9737 #endif
9738 }
9739 }
9740@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9741 preempt_disable();
9742
9743 #ifdef CONFIG_DEBUG_DCFLUSH
9744- atomic_inc(&dcpage_flushes);
9745+ atomic_inc_unchecked(&dcpage_flushes);
9746 #endif
9747 data0 = 0;
9748 pg_addr = page_address(page);
9749@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9750 xcall_deliver(data0, __pa(pg_addr),
9751 (u64) pg_addr, cpu_online_mask);
9752 #ifdef CONFIG_DEBUG_DCFLUSH
9753- atomic_inc(&dcpage_flushes_xcall);
9754+ atomic_inc_unchecked(&dcpage_flushes_xcall);
9755 #endif
9756 }
9757 __local_flush_dcache_page(page);
9758diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9759index 3a8d184..49498a8 100644
9760--- a/arch/sparc/kernel/sys_sparc_32.c
9761+++ b/arch/sparc/kernel/sys_sparc_32.c
9762@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9763 if (len > TASK_SIZE - PAGE_SIZE)
9764 return -ENOMEM;
9765 if (!addr)
9766- addr = TASK_UNMAPPED_BASE;
9767+ addr = current->mm->mmap_base;
9768
9769 info.flags = 0;
9770 info.length = len;
9771diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9772index beb0b5a..5a153f7 100644
9773--- a/arch/sparc/kernel/sys_sparc_64.c
9774+++ b/arch/sparc/kernel/sys_sparc_64.c
9775@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9776 struct vm_area_struct * vma;
9777 unsigned long task_size = TASK_SIZE;
9778 int do_color_align;
9779+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9780 struct vm_unmapped_area_info info;
9781
9782 if (flags & MAP_FIXED) {
9783 /* We do not accept a shared mapping if it would violate
9784 * cache aliasing constraints.
9785 */
9786- if ((flags & MAP_SHARED) &&
9787+ if ((filp || (flags & MAP_SHARED)) &&
9788 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9789 return -EINVAL;
9790 return addr;
9791@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9792 if (filp || (flags & MAP_SHARED))
9793 do_color_align = 1;
9794
9795+#ifdef CONFIG_PAX_RANDMMAP
9796+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9797+#endif
9798+
9799 if (addr) {
9800 if (do_color_align)
9801 addr = COLOR_ALIGN(addr, pgoff);
9802@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9803 addr = PAGE_ALIGN(addr);
9804
9805 vma = find_vma(mm, addr);
9806- if (task_size - len >= addr &&
9807- (!vma || addr + len <= vma->vm_start))
9808+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9809 return addr;
9810 }
9811
9812 info.flags = 0;
9813 info.length = len;
9814- info.low_limit = TASK_UNMAPPED_BASE;
9815+ info.low_limit = mm->mmap_base;
9816 info.high_limit = min(task_size, VA_EXCLUDE_START);
9817 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9818 info.align_offset = pgoff << PAGE_SHIFT;
9819+ info.threadstack_offset = offset;
9820 addr = vm_unmapped_area(&info);
9821
9822 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9823 VM_BUG_ON(addr != -ENOMEM);
9824 info.low_limit = VA_EXCLUDE_END;
9825+
9826+#ifdef CONFIG_PAX_RANDMMAP
9827+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9828+ info.low_limit += mm->delta_mmap;
9829+#endif
9830+
9831 info.high_limit = task_size;
9832 addr = vm_unmapped_area(&info);
9833 }
9834@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9835 unsigned long task_size = STACK_TOP32;
9836 unsigned long addr = addr0;
9837 int do_color_align;
9838+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9839 struct vm_unmapped_area_info info;
9840
9841 /* This should only ever run for 32-bit processes. */
9842@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9843 /* We do not accept a shared mapping if it would violate
9844 * cache aliasing constraints.
9845 */
9846- if ((flags & MAP_SHARED) &&
9847+ if ((filp || (flags & MAP_SHARED)) &&
9848 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9849 return -EINVAL;
9850 return addr;
9851@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9852 if (filp || (flags & MAP_SHARED))
9853 do_color_align = 1;
9854
9855+#ifdef CONFIG_PAX_RANDMMAP
9856+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9857+#endif
9858+
9859 /* requesting a specific address */
9860 if (addr) {
9861 if (do_color_align)
9862@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9863 addr = PAGE_ALIGN(addr);
9864
9865 vma = find_vma(mm, addr);
9866- if (task_size - len >= addr &&
9867- (!vma || addr + len <= vma->vm_start))
9868+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9869 return addr;
9870 }
9871
9872@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9873 info.high_limit = mm->mmap_base;
9874 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9875 info.align_offset = pgoff << PAGE_SHIFT;
9876+ info.threadstack_offset = offset;
9877 addr = vm_unmapped_area(&info);
9878
9879 /*
9880@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9881 VM_BUG_ON(addr != -ENOMEM);
9882 info.flags = 0;
9883 info.low_limit = TASK_UNMAPPED_BASE;
9884+
9885+#ifdef CONFIG_PAX_RANDMMAP
9886+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9887+ info.low_limit += mm->delta_mmap;
9888+#endif
9889+
9890 info.high_limit = STACK_TOP32;
9891 addr = vm_unmapped_area(&info);
9892 }
9893@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9894 EXPORT_SYMBOL(get_fb_unmapped_area);
9895
9896 /* Essentially the same as PowerPC. */
9897-static unsigned long mmap_rnd(void)
9898+static unsigned long mmap_rnd(struct mm_struct *mm)
9899 {
9900 unsigned long rnd = 0UL;
9901
9902+#ifdef CONFIG_PAX_RANDMMAP
9903+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9904+#endif
9905+
9906 if (current->flags & PF_RANDOMIZE) {
9907 unsigned long val = get_random_int();
9908 if (test_thread_flag(TIF_32BIT))
9909@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void)
9910
9911 void arch_pick_mmap_layout(struct mm_struct *mm)
9912 {
9913- unsigned long random_factor = mmap_rnd();
9914+ unsigned long random_factor = mmap_rnd(mm);
9915 unsigned long gap;
9916
9917 /*
9918@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9919 gap == RLIM_INFINITY ||
9920 sysctl_legacy_va_layout) {
9921 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9922+
9923+#ifdef CONFIG_PAX_RANDMMAP
9924+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9925+ mm->mmap_base += mm->delta_mmap;
9926+#endif
9927+
9928 mm->get_unmapped_area = arch_get_unmapped_area;
9929 } else {
9930 /* We know it's 32-bit */
9931@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9932 gap = (task_size / 6 * 5);
9933
9934 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9935+
9936+#ifdef CONFIG_PAX_RANDMMAP
9937+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9938+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9939+#endif
9940+
9941 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9942 }
9943 }
9944diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9945index 87729ff..192f9d8 100644
9946--- a/arch/sparc/kernel/syscalls.S
9947+++ b/arch/sparc/kernel/syscalls.S
9948@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9949 #endif
9950 .align 32
9951 1: ldx [%g6 + TI_FLAGS], %l5
9952- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9953+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9954 be,pt %icc, rtrap
9955 nop
9956 call syscall_trace_leave
9957@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9958
9959 srl %i3, 0, %o3 ! IEU0
9960 srl %i2, 0, %o2 ! IEU0 Group
9961- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9962+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9963 bne,pn %icc, linux_syscall_trace32 ! CTI
9964 mov %i0, %l5 ! IEU1
9965 5: call %l7 ! CTI Group brk forced
9966@@ -207,7 +207,7 @@ linux_sparc_syscall:
9967
9968 mov %i3, %o3 ! IEU1
9969 mov %i4, %o4 ! IEU0 Group
9970- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9971+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9972 bne,pn %icc, linux_syscall_trace ! CTI Group
9973 mov %i0, %l5 ! IEU0
9974 2: call %l7 ! CTI Group brk forced
9975@@ -223,7 +223,7 @@ ret_sys_call:
9976
9977 cmp %o0, -ERESTART_RESTARTBLOCK
9978 bgeu,pn %xcc, 1f
9979- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9980+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9981 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9982
9983 2:
9984diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9985index 6629829..036032d 100644
9986--- a/arch/sparc/kernel/traps_32.c
9987+++ b/arch/sparc/kernel/traps_32.c
9988@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9989 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9990 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9991
9992+extern void gr_handle_kernel_exploit(void);
9993+
9994 void die_if_kernel(char *str, struct pt_regs *regs)
9995 {
9996 static int die_counter;
9997@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9998 count++ < 30 &&
9999 (((unsigned long) rw) >= PAGE_OFFSET) &&
10000 !(((unsigned long) rw) & 0x7)) {
10001- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10002+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10003 (void *) rw->ins[7]);
10004 rw = (struct reg_window32 *)rw->ins[6];
10005 }
10006 }
10007 printk("Instruction DUMP:");
10008 instruction_dump ((unsigned long *) regs->pc);
10009- if(regs->psr & PSR_PS)
10010+ if(regs->psr & PSR_PS) {
10011+ gr_handle_kernel_exploit();
10012 do_exit(SIGKILL);
10013+ }
10014 do_exit(SIGSEGV);
10015 }
10016
10017diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10018index 4ced92f..965eeed 100644
10019--- a/arch/sparc/kernel/traps_64.c
10020+++ b/arch/sparc/kernel/traps_64.c
10021@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10022 i + 1,
10023 p->trapstack[i].tstate, p->trapstack[i].tpc,
10024 p->trapstack[i].tnpc, p->trapstack[i].tt);
10025- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10026+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10027 }
10028 }
10029
10030@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10031
10032 lvl -= 0x100;
10033 if (regs->tstate & TSTATE_PRIV) {
10034+
10035+#ifdef CONFIG_PAX_REFCOUNT
10036+ if (lvl == 6)
10037+ pax_report_refcount_overflow(regs);
10038+#endif
10039+
10040 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10041 die_if_kernel(buffer, regs);
10042 }
10043@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10044 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10045 {
10046 char buffer[32];
10047-
10048+
10049 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10050 0, lvl, SIGTRAP) == NOTIFY_STOP)
10051 return;
10052
10053+#ifdef CONFIG_PAX_REFCOUNT
10054+ if (lvl == 6)
10055+ pax_report_refcount_overflow(regs);
10056+#endif
10057+
10058 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10059
10060 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10061@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10062 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10063 printk("%s" "ERROR(%d): ",
10064 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10065- printk("TPC<%pS>\n", (void *) regs->tpc);
10066+ printk("TPC<%pA>\n", (void *) regs->tpc);
10067 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10068 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10069 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10070@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10071 smp_processor_id(),
10072 (type & 0x1) ? 'I' : 'D',
10073 regs->tpc);
10074- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10075+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10076 panic("Irrecoverable Cheetah+ parity error.");
10077 }
10078
10079@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10080 smp_processor_id(),
10081 (type & 0x1) ? 'I' : 'D',
10082 regs->tpc);
10083- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10084+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10085 }
10086
10087 struct sun4v_error_entry {
10088@@ -1837,8 +1848,8 @@ struct sun4v_error_entry {
10089 /*0x38*/u64 reserved_5;
10090 };
10091
10092-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10093-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10094+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10095+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10096
10097 static const char *sun4v_err_type_to_str(u8 type)
10098 {
10099@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10100 }
10101
10102 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10103- int cpu, const char *pfx, atomic_t *ocnt)
10104+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10105 {
10106 u64 *raw_ptr = (u64 *) ent;
10107 u32 attrs;
10108@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10109
10110 show_regs(regs);
10111
10112- if ((cnt = atomic_read(ocnt)) != 0) {
10113- atomic_set(ocnt, 0);
10114+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10115+ atomic_set_unchecked(ocnt, 0);
10116 wmb();
10117 printk("%s: Queue overflowed %d times.\n",
10118 pfx, cnt);
10119@@ -2046,7 +2057,7 @@ out:
10120 */
10121 void sun4v_resum_overflow(struct pt_regs *regs)
10122 {
10123- atomic_inc(&sun4v_resum_oflow_cnt);
10124+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10125 }
10126
10127 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10128@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10129 /* XXX Actually even this can make not that much sense. Perhaps
10130 * XXX we should just pull the plug and panic directly from here?
10131 */
10132- atomic_inc(&sun4v_nonresum_oflow_cnt);
10133+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10134 }
10135
10136 unsigned long sun4v_err_itlb_vaddr;
10137@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10138
10139 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10140 regs->tpc, tl);
10141- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10142+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10143 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10144- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10145+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10146 (void *) regs->u_regs[UREG_I7]);
10147 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10148 "pte[%lx] error[%lx]\n",
10149@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10150
10151 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10152 regs->tpc, tl);
10153- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10154+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10155 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10156- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10157+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10158 (void *) regs->u_regs[UREG_I7]);
10159 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10160 "pte[%lx] error[%lx]\n",
10161@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10162 fp = (unsigned long)sf->fp + STACK_BIAS;
10163 }
10164
10165- printk(" [%016lx] %pS\n", pc, (void *) pc);
10166+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10168 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10169 int index = tsk->curr_ret_stack;
10170 if (tsk->ret_stack && index >= graph) {
10171 pc = tsk->ret_stack[index - graph].ret;
10172- printk(" [%016lx] %pS\n", pc, (void *) pc);
10173+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10174 graph++;
10175 }
10176 }
10177@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10178 return (struct reg_window *) (fp + STACK_BIAS);
10179 }
10180
10181+extern void gr_handle_kernel_exploit(void);
10182+
10183 void die_if_kernel(char *str, struct pt_regs *regs)
10184 {
10185 static int die_counter;
10186@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10187 while (rw &&
10188 count++ < 30 &&
10189 kstack_valid(tp, (unsigned long) rw)) {
10190- printk("Caller[%016lx]: %pS\n", rw->ins[7],
10191+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
10192 (void *) rw->ins[7]);
10193
10194 rw = kernel_stack_up(rw);
10195@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10196 }
10197 user_instruction_dump ((unsigned int __user *) regs->tpc);
10198 }
10199- if (regs->tstate & TSTATE_PRIV)
10200+ if (regs->tstate & TSTATE_PRIV) {
10201+ gr_handle_kernel_exploit();
10202 do_exit(SIGKILL);
10203+ }
10204 do_exit(SIGSEGV);
10205 }
10206 EXPORT_SYMBOL(die_if_kernel);
10207diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10208index 3c1a7cb..73e1923 100644
10209--- a/arch/sparc/kernel/unaligned_64.c
10210+++ b/arch/sparc/kernel/unaligned_64.c
10211@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
10212 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10213
10214 if (__ratelimit(&ratelimit)) {
10215- printk("Kernel unaligned access at TPC[%lx] %pS\n",
10216+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
10217 regs->tpc, (void *) regs->tpc);
10218 }
10219 }
10220diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10221index dbe119b..089c7c1 100644
10222--- a/arch/sparc/lib/Makefile
10223+++ b/arch/sparc/lib/Makefile
10224@@ -2,7 +2,7 @@
10225 #
10226
10227 asflags-y := -ansi -DST_DIV0=0x02
10228-ccflags-y := -Werror
10229+#ccflags-y := -Werror
10230
10231 lib-$(CONFIG_SPARC32) += ashrdi3.o
10232 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10233diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10234index 85c233d..68500e0 100644
10235--- a/arch/sparc/lib/atomic_64.S
10236+++ b/arch/sparc/lib/atomic_64.S
10237@@ -17,7 +17,12 @@
10238 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10239 BACKOFF_SETUP(%o2)
10240 1: lduw [%o1], %g1
10241- add %g1, %o0, %g7
10242+ addcc %g1, %o0, %g7
10243+
10244+#ifdef CONFIG_PAX_REFCOUNT
10245+ tvs %icc, 6
10246+#endif
10247+
10248 cas [%o1], %g1, %g7
10249 cmp %g1, %g7
10250 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10251@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10252 2: BACKOFF_SPIN(%o2, %o3, 1b)
10253 ENDPROC(atomic_add)
10254
10255+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10256+ BACKOFF_SETUP(%o2)
10257+1: lduw [%o1], %g1
10258+ add %g1, %o0, %g7
10259+ cas [%o1], %g1, %g7
10260+ cmp %g1, %g7
10261+ bne,pn %icc, 2f
10262+ nop
10263+ retl
10264+ nop
10265+2: BACKOFF_SPIN(%o2, %o3, 1b)
10266+ENDPROC(atomic_add_unchecked)
10267+
10268 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10269 BACKOFF_SETUP(%o2)
10270 1: lduw [%o1], %g1
10271- sub %g1, %o0, %g7
10272+ subcc %g1, %o0, %g7
10273+
10274+#ifdef CONFIG_PAX_REFCOUNT
10275+ tvs %icc, 6
10276+#endif
10277+
10278 cas [%o1], %g1, %g7
10279 cmp %g1, %g7
10280 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10281@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10282 2: BACKOFF_SPIN(%o2, %o3, 1b)
10283 ENDPROC(atomic_sub)
10284
10285+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10286+ BACKOFF_SETUP(%o2)
10287+1: lduw [%o1], %g1
10288+ sub %g1, %o0, %g7
10289+ cas [%o1], %g1, %g7
10290+ cmp %g1, %g7
10291+ bne,pn %icc, 2f
10292+ nop
10293+ retl
10294+ nop
10295+2: BACKOFF_SPIN(%o2, %o3, 1b)
10296+ENDPROC(atomic_sub_unchecked)
10297+
10298 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10299 BACKOFF_SETUP(%o2)
10300 1: lduw [%o1], %g1
10301- add %g1, %o0, %g7
10302+ addcc %g1, %o0, %g7
10303+
10304+#ifdef CONFIG_PAX_REFCOUNT
10305+ tvs %icc, 6
10306+#endif
10307+
10308 cas [%o1], %g1, %g7
10309 cmp %g1, %g7
10310 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10311@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10312 2: BACKOFF_SPIN(%o2, %o3, 1b)
10313 ENDPROC(atomic_add_ret)
10314
10315+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10316+ BACKOFF_SETUP(%o2)
10317+1: lduw [%o1], %g1
10318+ addcc %g1, %o0, %g7
10319+ cas [%o1], %g1, %g7
10320+ cmp %g1, %g7
10321+ bne,pn %icc, 2f
10322+ add %g7, %o0, %g7
10323+ sra %g7, 0, %o0
10324+ retl
10325+ nop
10326+2: BACKOFF_SPIN(%o2, %o3, 1b)
10327+ENDPROC(atomic_add_ret_unchecked)
10328+
10329 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10330 BACKOFF_SETUP(%o2)
10331 1: lduw [%o1], %g1
10332- sub %g1, %o0, %g7
10333+ subcc %g1, %o0, %g7
10334+
10335+#ifdef CONFIG_PAX_REFCOUNT
10336+ tvs %icc, 6
10337+#endif
10338+
10339 cas [%o1], %g1, %g7
10340 cmp %g1, %g7
10341 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10342@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10343 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10344 BACKOFF_SETUP(%o2)
10345 1: ldx [%o1], %g1
10346- add %g1, %o0, %g7
10347+ addcc %g1, %o0, %g7
10348+
10349+#ifdef CONFIG_PAX_REFCOUNT
10350+ tvs %xcc, 6
10351+#endif
10352+
10353 casx [%o1], %g1, %g7
10354 cmp %g1, %g7
10355 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10356@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10357 2: BACKOFF_SPIN(%o2, %o3, 1b)
10358 ENDPROC(atomic64_add)
10359
10360+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10361+ BACKOFF_SETUP(%o2)
10362+1: ldx [%o1], %g1
10363+ addcc %g1, %o0, %g7
10364+ casx [%o1], %g1, %g7
10365+ cmp %g1, %g7
10366+ bne,pn %xcc, 2f
10367+ nop
10368+ retl
10369+ nop
10370+2: BACKOFF_SPIN(%o2, %o3, 1b)
10371+ENDPROC(atomic64_add_unchecked)
10372+
10373 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10374 BACKOFF_SETUP(%o2)
10375 1: ldx [%o1], %g1
10376- sub %g1, %o0, %g7
10377+ subcc %g1, %o0, %g7
10378+
10379+#ifdef CONFIG_PAX_REFCOUNT
10380+ tvs %xcc, 6
10381+#endif
10382+
10383 casx [%o1], %g1, %g7
10384 cmp %g1, %g7
10385 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10386@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10387 2: BACKOFF_SPIN(%o2, %o3, 1b)
10388 ENDPROC(atomic64_sub)
10389
10390+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10391+ BACKOFF_SETUP(%o2)
10392+1: ldx [%o1], %g1
10393+ subcc %g1, %o0, %g7
10394+ casx [%o1], %g1, %g7
10395+ cmp %g1, %g7
10396+ bne,pn %xcc, 2f
10397+ nop
10398+ retl
10399+ nop
10400+2: BACKOFF_SPIN(%o2, %o3, 1b)
10401+ENDPROC(atomic64_sub_unchecked)
10402+
10403 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10404 BACKOFF_SETUP(%o2)
10405 1: ldx [%o1], %g1
10406- add %g1, %o0, %g7
10407+ addcc %g1, %o0, %g7
10408+
10409+#ifdef CONFIG_PAX_REFCOUNT
10410+ tvs %xcc, 6
10411+#endif
10412+
10413 casx [%o1], %g1, %g7
10414 cmp %g1, %g7
10415 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10416@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10417 2: BACKOFF_SPIN(%o2, %o3, 1b)
10418 ENDPROC(atomic64_add_ret)
10419
10420+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10421+ BACKOFF_SETUP(%o2)
10422+1: ldx [%o1], %g1
10423+ addcc %g1, %o0, %g7
10424+ casx [%o1], %g1, %g7
10425+ cmp %g1, %g7
10426+ bne,pn %xcc, 2f
10427+ add %g7, %o0, %g7
10428+ mov %g7, %o0
10429+ retl
10430+ nop
10431+2: BACKOFF_SPIN(%o2, %o3, 1b)
10432+ENDPROC(atomic64_add_ret_unchecked)
10433+
10434 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10435 BACKOFF_SETUP(%o2)
10436 1: ldx [%o1], %g1
10437- sub %g1, %o0, %g7
10438+ subcc %g1, %o0, %g7
10439+
10440+#ifdef CONFIG_PAX_REFCOUNT
10441+ tvs %xcc, 6
10442+#endif
10443+
10444 casx [%o1], %g1, %g7
10445 cmp %g1, %g7
10446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10447diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10448index 323335b..ed85ea2 100644
10449--- a/arch/sparc/lib/ksyms.c
10450+++ b/arch/sparc/lib/ksyms.c
10451@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
10452
10453 /* Atomic counter implementation. */
10454 EXPORT_SYMBOL(atomic_add);
10455+EXPORT_SYMBOL(atomic_add_unchecked);
10456 EXPORT_SYMBOL(atomic_add_ret);
10457+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10458 EXPORT_SYMBOL(atomic_sub);
10459+EXPORT_SYMBOL(atomic_sub_unchecked);
10460 EXPORT_SYMBOL(atomic_sub_ret);
10461 EXPORT_SYMBOL(atomic64_add);
10462+EXPORT_SYMBOL(atomic64_add_unchecked);
10463 EXPORT_SYMBOL(atomic64_add_ret);
10464+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10465 EXPORT_SYMBOL(atomic64_sub);
10466+EXPORT_SYMBOL(atomic64_sub_unchecked);
10467 EXPORT_SYMBOL(atomic64_sub_ret);
10468 EXPORT_SYMBOL(atomic64_dec_if_positive);
10469
10470diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10471index 30c3ecc..736f015 100644
10472--- a/arch/sparc/mm/Makefile
10473+++ b/arch/sparc/mm/Makefile
10474@@ -2,7 +2,7 @@
10475 #
10476
10477 asflags-y := -ansi
10478-ccflags-y := -Werror
10479+#ccflags-y := -Werror
10480
10481 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10482 obj-y += fault_$(BITS).o
10483diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10484index 59dbd46..1dd7f5e 100644
10485--- a/arch/sparc/mm/fault_32.c
10486+++ b/arch/sparc/mm/fault_32.c
10487@@ -21,6 +21,9 @@
10488 #include <linux/perf_event.h>
10489 #include <linux/interrupt.h>
10490 #include <linux/kdebug.h>
10491+#include <linux/slab.h>
10492+#include <linux/pagemap.h>
10493+#include <linux/compiler.h>
10494
10495 #include <asm/page.h>
10496 #include <asm/pgtable.h>
10497@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10498 return safe_compute_effective_address(regs, insn);
10499 }
10500
10501+#ifdef CONFIG_PAX_PAGEEXEC
10502+#ifdef CONFIG_PAX_DLRESOLVE
10503+static void pax_emuplt_close(struct vm_area_struct *vma)
10504+{
10505+ vma->vm_mm->call_dl_resolve = 0UL;
10506+}
10507+
10508+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10509+{
10510+ unsigned int *kaddr;
10511+
10512+ vmf->page = alloc_page(GFP_HIGHUSER);
10513+ if (!vmf->page)
10514+ return VM_FAULT_OOM;
10515+
10516+ kaddr = kmap(vmf->page);
10517+ memset(kaddr, 0, PAGE_SIZE);
10518+ kaddr[0] = 0x9DE3BFA8U; /* save */
10519+ flush_dcache_page(vmf->page);
10520+ kunmap(vmf->page);
10521+ return VM_FAULT_MAJOR;
10522+}
10523+
10524+static const struct vm_operations_struct pax_vm_ops = {
10525+ .close = pax_emuplt_close,
10526+ .fault = pax_emuplt_fault
10527+};
10528+
10529+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10530+{
10531+ int ret;
10532+
10533+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10534+ vma->vm_mm = current->mm;
10535+ vma->vm_start = addr;
10536+ vma->vm_end = addr + PAGE_SIZE;
10537+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10538+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10539+ vma->vm_ops = &pax_vm_ops;
10540+
10541+ ret = insert_vm_struct(current->mm, vma);
10542+ if (ret)
10543+ return ret;
10544+
10545+ ++current->mm->total_vm;
10546+ return 0;
10547+}
10548+#endif
10549+
10550+/*
10551+ * PaX: decide what to do with offenders (regs->pc = fault address)
10552+ *
10553+ * returns 1 when task should be killed
10554+ * 2 when patched PLT trampoline was detected
10555+ * 3 when unpatched PLT trampoline was detected
10556+ */
10557+static int pax_handle_fetch_fault(struct pt_regs *regs)
10558+{
10559+
10560+#ifdef CONFIG_PAX_EMUPLT
10561+ int err;
10562+
10563+ do { /* PaX: patched PLT emulation #1 */
10564+ unsigned int sethi1, sethi2, jmpl;
10565+
10566+ err = get_user(sethi1, (unsigned int *)regs->pc);
10567+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10568+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10569+
10570+ if (err)
10571+ break;
10572+
10573+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10574+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10575+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10576+ {
10577+ unsigned int addr;
10578+
10579+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10580+ addr = regs->u_regs[UREG_G1];
10581+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10582+ regs->pc = addr;
10583+ regs->npc = addr+4;
10584+ return 2;
10585+ }
10586+ } while (0);
10587+
10588+ do { /* PaX: patched PLT emulation #2 */
10589+ unsigned int ba;
10590+
10591+ err = get_user(ba, (unsigned int *)regs->pc);
10592+
10593+ if (err)
10594+ break;
10595+
10596+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10597+ unsigned int addr;
10598+
10599+ if ((ba & 0xFFC00000U) == 0x30800000U)
10600+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10601+ else
10602+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10603+ regs->pc = addr;
10604+ regs->npc = addr+4;
10605+ return 2;
10606+ }
10607+ } while (0);
10608+
10609+ do { /* PaX: patched PLT emulation #3 */
10610+ unsigned int sethi, bajmpl, nop;
10611+
10612+ err = get_user(sethi, (unsigned int *)regs->pc);
10613+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10614+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10615+
10616+ if (err)
10617+ break;
10618+
10619+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10620+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10621+ nop == 0x01000000U)
10622+ {
10623+ unsigned int addr;
10624+
10625+ addr = (sethi & 0x003FFFFFU) << 10;
10626+ regs->u_regs[UREG_G1] = addr;
10627+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10628+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10629+ else
10630+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10631+ regs->pc = addr;
10632+ regs->npc = addr+4;
10633+ return 2;
10634+ }
10635+ } while (0);
10636+
10637+ do { /* PaX: unpatched PLT emulation step 1 */
10638+ unsigned int sethi, ba, nop;
10639+
10640+ err = get_user(sethi, (unsigned int *)regs->pc);
10641+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
10642+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10643+
10644+ if (err)
10645+ break;
10646+
10647+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10648+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10649+ nop == 0x01000000U)
10650+ {
10651+ unsigned int addr, save, call;
10652+
10653+ if ((ba & 0xFFC00000U) == 0x30800000U)
10654+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10655+ else
10656+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10657+
10658+ err = get_user(save, (unsigned int *)addr);
10659+ err |= get_user(call, (unsigned int *)(addr+4));
10660+ err |= get_user(nop, (unsigned int *)(addr+8));
10661+ if (err)
10662+ break;
10663+
10664+#ifdef CONFIG_PAX_DLRESOLVE
10665+ if (save == 0x9DE3BFA8U &&
10666+ (call & 0xC0000000U) == 0x40000000U &&
10667+ nop == 0x01000000U)
10668+ {
10669+ struct vm_area_struct *vma;
10670+ unsigned long call_dl_resolve;
10671+
10672+ down_read(&current->mm->mmap_sem);
10673+ call_dl_resolve = current->mm->call_dl_resolve;
10674+ up_read(&current->mm->mmap_sem);
10675+ if (likely(call_dl_resolve))
10676+ goto emulate;
10677+
10678+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10679+
10680+ down_write(&current->mm->mmap_sem);
10681+ if (current->mm->call_dl_resolve) {
10682+ call_dl_resolve = current->mm->call_dl_resolve;
10683+ up_write(&current->mm->mmap_sem);
10684+ if (vma)
10685+ kmem_cache_free(vm_area_cachep, vma);
10686+ goto emulate;
10687+ }
10688+
10689+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10690+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10691+ up_write(&current->mm->mmap_sem);
10692+ if (vma)
10693+ kmem_cache_free(vm_area_cachep, vma);
10694+ return 1;
10695+ }
10696+
10697+ if (pax_insert_vma(vma, call_dl_resolve)) {
10698+ up_write(&current->mm->mmap_sem);
10699+ kmem_cache_free(vm_area_cachep, vma);
10700+ return 1;
10701+ }
10702+
10703+ current->mm->call_dl_resolve = call_dl_resolve;
10704+ up_write(&current->mm->mmap_sem);
10705+
10706+emulate:
10707+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10708+ regs->pc = call_dl_resolve;
10709+ regs->npc = addr+4;
10710+ return 3;
10711+ }
10712+#endif
10713+
10714+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10715+ if ((save & 0xFFC00000U) == 0x05000000U &&
10716+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10717+ nop == 0x01000000U)
10718+ {
10719+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10720+ regs->u_regs[UREG_G2] = addr + 4;
10721+ addr = (save & 0x003FFFFFU) << 10;
10722+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10723+ regs->pc = addr;
10724+ regs->npc = addr+4;
10725+ return 3;
10726+ }
10727+ }
10728+ } while (0);
10729+
10730+ do { /* PaX: unpatched PLT emulation step 2 */
10731+ unsigned int save, call, nop;
10732+
10733+ err = get_user(save, (unsigned int *)(regs->pc-4));
10734+ err |= get_user(call, (unsigned int *)regs->pc);
10735+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
10736+ if (err)
10737+ break;
10738+
10739+ if (save == 0x9DE3BFA8U &&
10740+ (call & 0xC0000000U) == 0x40000000U &&
10741+ nop == 0x01000000U)
10742+ {
10743+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10744+
10745+ regs->u_regs[UREG_RETPC] = regs->pc;
10746+ regs->pc = dl_resolve;
10747+ regs->npc = dl_resolve+4;
10748+ return 3;
10749+ }
10750+ } while (0);
10751+#endif
10752+
10753+ return 1;
10754+}
10755+
10756+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10757+{
10758+ unsigned long i;
10759+
10760+ printk(KERN_ERR "PAX: bytes at PC: ");
10761+ for (i = 0; i < 8; i++) {
10762+ unsigned int c;
10763+ if (get_user(c, (unsigned int *)pc+i))
10764+ printk(KERN_CONT "???????? ");
10765+ else
10766+ printk(KERN_CONT "%08x ", c);
10767+ }
10768+ printk("\n");
10769+}
10770+#endif
10771+
10772 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10773 int text_fault)
10774 {
10775@@ -229,6 +503,24 @@ good_area:
10776 if (!(vma->vm_flags & VM_WRITE))
10777 goto bad_area;
10778 } else {
10779+
10780+#ifdef CONFIG_PAX_PAGEEXEC
10781+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10782+ up_read(&mm->mmap_sem);
10783+ switch (pax_handle_fetch_fault(regs)) {
10784+
10785+#ifdef CONFIG_PAX_EMUPLT
10786+ case 2:
10787+ case 3:
10788+ return;
10789+#endif
10790+
10791+ }
10792+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10793+ do_group_exit(SIGKILL);
10794+ }
10795+#endif
10796+
10797 /* Allow reads even for write-only mappings */
10798 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10799 goto bad_area;
10800diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10801index 69bb818..6ca35c8 100644
10802--- a/arch/sparc/mm/fault_64.c
10803+++ b/arch/sparc/mm/fault_64.c
10804@@ -22,6 +22,9 @@
10805 #include <linux/kdebug.h>
10806 #include <linux/percpu.h>
10807 #include <linux/context_tracking.h>
10808+#include <linux/slab.h>
10809+#include <linux/pagemap.h>
10810+#include <linux/compiler.h>
10811
10812 #include <asm/page.h>
10813 #include <asm/pgtable.h>
10814@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10815 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10816 regs->tpc);
10817 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10818- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10819+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10820 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10821 dump_stack();
10822 unhandled_fault(regs->tpc, current, regs);
10823@@ -271,6 +274,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10824 show_regs(regs);
10825 }
10826
10827+#ifdef CONFIG_PAX_PAGEEXEC
10828+#ifdef CONFIG_PAX_DLRESOLVE
10829+static void pax_emuplt_close(struct vm_area_struct *vma)
10830+{
10831+ vma->vm_mm->call_dl_resolve = 0UL;
10832+}
10833+
10834+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10835+{
10836+ unsigned int *kaddr;
10837+
10838+ vmf->page = alloc_page(GFP_HIGHUSER);
10839+ if (!vmf->page)
10840+ return VM_FAULT_OOM;
10841+
10842+ kaddr = kmap(vmf->page);
10843+ memset(kaddr, 0, PAGE_SIZE);
10844+ kaddr[0] = 0x9DE3BFA8U; /* save */
10845+ flush_dcache_page(vmf->page);
10846+ kunmap(vmf->page);
10847+ return VM_FAULT_MAJOR;
10848+}
10849+
10850+static const struct vm_operations_struct pax_vm_ops = {
10851+ .close = pax_emuplt_close,
10852+ .fault = pax_emuplt_fault
10853+};
10854+
10855+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10856+{
10857+ int ret;
10858+
10859+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10860+ vma->vm_mm = current->mm;
10861+ vma->vm_start = addr;
10862+ vma->vm_end = addr + PAGE_SIZE;
10863+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10864+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10865+ vma->vm_ops = &pax_vm_ops;
10866+
10867+ ret = insert_vm_struct(current->mm, vma);
10868+ if (ret)
10869+ return ret;
10870+
10871+ ++current->mm->total_vm;
10872+ return 0;
10873+}
10874+#endif
10875+
10876+/*
10877+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10878+ *
10879+ * returns 1 when task should be killed
10880+ * 2 when patched PLT trampoline was detected
10881+ * 3 when unpatched PLT trampoline was detected
10882+ */
10883+static int pax_handle_fetch_fault(struct pt_regs *regs)
10884+{
10885+
10886+#ifdef CONFIG_PAX_EMUPLT
10887+ int err;
10888+
10889+ do { /* PaX: patched PLT emulation #1 */
10890+ unsigned int sethi1, sethi2, jmpl;
10891+
10892+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10893+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10894+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10895+
10896+ if (err)
10897+ break;
10898+
10899+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10900+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10901+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10902+ {
10903+ unsigned long addr;
10904+
10905+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10906+ addr = regs->u_regs[UREG_G1];
10907+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10908+
10909+ if (test_thread_flag(TIF_32BIT))
10910+ addr &= 0xFFFFFFFFUL;
10911+
10912+ regs->tpc = addr;
10913+ regs->tnpc = addr+4;
10914+ return 2;
10915+ }
10916+ } while (0);
10917+
10918+ do { /* PaX: patched PLT emulation #2 */
10919+ unsigned int ba;
10920+
10921+ err = get_user(ba, (unsigned int *)regs->tpc);
10922+
10923+ if (err)
10924+ break;
10925+
10926+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10927+ unsigned long addr;
10928+
10929+ if ((ba & 0xFFC00000U) == 0x30800000U)
10930+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10931+ else
10932+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10933+
10934+ if (test_thread_flag(TIF_32BIT))
10935+ addr &= 0xFFFFFFFFUL;
10936+
10937+ regs->tpc = addr;
10938+ regs->tnpc = addr+4;
10939+ return 2;
10940+ }
10941+ } while (0);
10942+
10943+ do { /* PaX: patched PLT emulation #3 */
10944+ unsigned int sethi, bajmpl, nop;
10945+
10946+ err = get_user(sethi, (unsigned int *)regs->tpc);
10947+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10948+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10949+
10950+ if (err)
10951+ break;
10952+
10953+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10954+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10955+ nop == 0x01000000U)
10956+ {
10957+ unsigned long addr;
10958+
10959+ addr = (sethi & 0x003FFFFFU) << 10;
10960+ regs->u_regs[UREG_G1] = addr;
10961+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10962+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10963+ else
10964+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10965+
10966+ if (test_thread_flag(TIF_32BIT))
10967+ addr &= 0xFFFFFFFFUL;
10968+
10969+ regs->tpc = addr;
10970+ regs->tnpc = addr+4;
10971+ return 2;
10972+ }
10973+ } while (0);
10974+
10975+ do { /* PaX: patched PLT emulation #4 */
10976+ unsigned int sethi, mov1, call, mov2;
10977+
10978+ err = get_user(sethi, (unsigned int *)regs->tpc);
10979+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10980+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10981+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10982+
10983+ if (err)
10984+ break;
10985+
10986+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10987+ mov1 == 0x8210000FU &&
10988+ (call & 0xC0000000U) == 0x40000000U &&
10989+ mov2 == 0x9E100001U)
10990+ {
10991+ unsigned long addr;
10992+
10993+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10994+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10995+
10996+ if (test_thread_flag(TIF_32BIT))
10997+ addr &= 0xFFFFFFFFUL;
10998+
10999+ regs->tpc = addr;
11000+ regs->tnpc = addr+4;
11001+ return 2;
11002+ }
11003+ } while (0);
11004+
11005+ do { /* PaX: patched PLT emulation #5 */
11006+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11007+
11008+ err = get_user(sethi, (unsigned int *)regs->tpc);
11009+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11010+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11011+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11012+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11013+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11014+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11015+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11016+
11017+ if (err)
11018+ break;
11019+
11020+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11021+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11022+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11023+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11024+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11025+ sllx == 0x83287020U &&
11026+ jmpl == 0x81C04005U &&
11027+ nop == 0x01000000U)
11028+ {
11029+ unsigned long addr;
11030+
11031+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11032+ regs->u_regs[UREG_G1] <<= 32;
11033+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11034+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11035+ regs->tpc = addr;
11036+ regs->tnpc = addr+4;
11037+ return 2;
11038+ }
11039+ } while (0);
11040+
11041+ do { /* PaX: patched PLT emulation #6 */
11042+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11043+
11044+ err = get_user(sethi, (unsigned int *)regs->tpc);
11045+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11046+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11047+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11048+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11049+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11050+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11051+
11052+ if (err)
11053+ break;
11054+
11055+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11056+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11057+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11058+ sllx == 0x83287020U &&
11059+ (or & 0xFFFFE000U) == 0x8A116000U &&
11060+ jmpl == 0x81C04005U &&
11061+ nop == 0x01000000U)
11062+ {
11063+ unsigned long addr;
11064+
11065+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11066+ regs->u_regs[UREG_G1] <<= 32;
11067+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11068+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11069+ regs->tpc = addr;
11070+ regs->tnpc = addr+4;
11071+ return 2;
11072+ }
11073+ } while (0);
11074+
11075+ do { /* PaX: unpatched PLT emulation step 1 */
11076+ unsigned int sethi, ba, nop;
11077+
11078+ err = get_user(sethi, (unsigned int *)regs->tpc);
11079+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11080+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11081+
11082+ if (err)
11083+ break;
11084+
11085+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11086+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11087+ nop == 0x01000000U)
11088+ {
11089+ unsigned long addr;
11090+ unsigned int save, call;
11091+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11092+
11093+ if ((ba & 0xFFC00000U) == 0x30800000U)
11094+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11095+ else
11096+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11097+
11098+ if (test_thread_flag(TIF_32BIT))
11099+ addr &= 0xFFFFFFFFUL;
11100+
11101+ err = get_user(save, (unsigned int *)addr);
11102+ err |= get_user(call, (unsigned int *)(addr+4));
11103+ err |= get_user(nop, (unsigned int *)(addr+8));
11104+ if (err)
11105+ break;
11106+
11107+#ifdef CONFIG_PAX_DLRESOLVE
11108+ if (save == 0x9DE3BFA8U &&
11109+ (call & 0xC0000000U) == 0x40000000U &&
11110+ nop == 0x01000000U)
11111+ {
11112+ struct vm_area_struct *vma;
11113+ unsigned long call_dl_resolve;
11114+
11115+ down_read(&current->mm->mmap_sem);
11116+ call_dl_resolve = current->mm->call_dl_resolve;
11117+ up_read(&current->mm->mmap_sem);
11118+ if (likely(call_dl_resolve))
11119+ goto emulate;
11120+
11121+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11122+
11123+ down_write(&current->mm->mmap_sem);
11124+ if (current->mm->call_dl_resolve) {
11125+ call_dl_resolve = current->mm->call_dl_resolve;
11126+ up_write(&current->mm->mmap_sem);
11127+ if (vma)
11128+ kmem_cache_free(vm_area_cachep, vma);
11129+ goto emulate;
11130+ }
11131+
11132+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11133+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11134+ up_write(&current->mm->mmap_sem);
11135+ if (vma)
11136+ kmem_cache_free(vm_area_cachep, vma);
11137+ return 1;
11138+ }
11139+
11140+ if (pax_insert_vma(vma, call_dl_resolve)) {
11141+ up_write(&current->mm->mmap_sem);
11142+ kmem_cache_free(vm_area_cachep, vma);
11143+ return 1;
11144+ }
11145+
11146+ current->mm->call_dl_resolve = call_dl_resolve;
11147+ up_write(&current->mm->mmap_sem);
11148+
11149+emulate:
11150+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11151+ regs->tpc = call_dl_resolve;
11152+ regs->tnpc = addr+4;
11153+ return 3;
11154+ }
11155+#endif
11156+
11157+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11158+ if ((save & 0xFFC00000U) == 0x05000000U &&
11159+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11160+ nop == 0x01000000U)
11161+ {
11162+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11163+ regs->u_regs[UREG_G2] = addr + 4;
11164+ addr = (save & 0x003FFFFFU) << 10;
11165+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11166+
11167+ if (test_thread_flag(TIF_32BIT))
11168+ addr &= 0xFFFFFFFFUL;
11169+
11170+ regs->tpc = addr;
11171+ regs->tnpc = addr+4;
11172+ return 3;
11173+ }
11174+
11175+ /* PaX: 64-bit PLT stub */
11176+ err = get_user(sethi1, (unsigned int *)addr);
11177+ err |= get_user(sethi2, (unsigned int *)(addr+4));
11178+ err |= get_user(or1, (unsigned int *)(addr+8));
11179+ err |= get_user(or2, (unsigned int *)(addr+12));
11180+ err |= get_user(sllx, (unsigned int *)(addr+16));
11181+ err |= get_user(add, (unsigned int *)(addr+20));
11182+ err |= get_user(jmpl, (unsigned int *)(addr+24));
11183+ err |= get_user(nop, (unsigned int *)(addr+28));
11184+ if (err)
11185+ break;
11186+
11187+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11188+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11189+ (or1 & 0xFFFFE000U) == 0x88112000U &&
11190+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11191+ sllx == 0x89293020U &&
11192+ add == 0x8A010005U &&
11193+ jmpl == 0x89C14000U &&
11194+ nop == 0x01000000U)
11195+ {
11196+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11197+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11198+ regs->u_regs[UREG_G4] <<= 32;
11199+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11200+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11201+ regs->u_regs[UREG_G4] = addr + 24;
11202+ addr = regs->u_regs[UREG_G5];
11203+ regs->tpc = addr;
11204+ regs->tnpc = addr+4;
11205+ return 3;
11206+ }
11207+ }
11208+ } while (0);
11209+
11210+#ifdef CONFIG_PAX_DLRESOLVE
11211+ do { /* PaX: unpatched PLT emulation step 2 */
11212+ unsigned int save, call, nop;
11213+
11214+ err = get_user(save, (unsigned int *)(regs->tpc-4));
11215+ err |= get_user(call, (unsigned int *)regs->tpc);
11216+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11217+ if (err)
11218+ break;
11219+
11220+ if (save == 0x9DE3BFA8U &&
11221+ (call & 0xC0000000U) == 0x40000000U &&
11222+ nop == 0x01000000U)
11223+ {
11224+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11225+
11226+ if (test_thread_flag(TIF_32BIT))
11227+ dl_resolve &= 0xFFFFFFFFUL;
11228+
11229+ regs->u_regs[UREG_RETPC] = regs->tpc;
11230+ regs->tpc = dl_resolve;
11231+ regs->tnpc = dl_resolve+4;
11232+ return 3;
11233+ }
11234+ } while (0);
11235+#endif
11236+
11237+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
11238+ unsigned int sethi, ba, nop;
11239+
11240+ err = get_user(sethi, (unsigned int *)regs->tpc);
11241+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11242+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11243+
11244+ if (err)
11245+ break;
11246+
11247+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11248+ (ba & 0xFFF00000U) == 0x30600000U &&
11249+ nop == 0x01000000U)
11250+ {
11251+ unsigned long addr;
11252+
11253+ addr = (sethi & 0x003FFFFFU) << 10;
11254+ regs->u_regs[UREG_G1] = addr;
11255+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11256+
11257+ if (test_thread_flag(TIF_32BIT))
11258+ addr &= 0xFFFFFFFFUL;
11259+
11260+ regs->tpc = addr;
11261+ regs->tnpc = addr+4;
11262+ return 2;
11263+ }
11264+ } while (0);
11265+
11266+#endif
11267+
11268+ return 1;
11269+}
11270+
11271+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11272+{
11273+ unsigned long i;
11274+
11275+ printk(KERN_ERR "PAX: bytes at PC: ");
11276+ for (i = 0; i < 8; i++) {
11277+ unsigned int c;
11278+ if (get_user(c, (unsigned int *)pc+i))
11279+ printk(KERN_CONT "???????? ");
11280+ else
11281+ printk(KERN_CONT "%08x ", c);
11282+ }
11283+ printk("\n");
11284+}
11285+#endif
11286+
11287 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11288 {
11289 enum ctx_state prev_state = exception_enter();
11290@@ -344,6 +807,29 @@ retry:
11291 if (!vma)
11292 goto bad_area;
11293
11294+#ifdef CONFIG_PAX_PAGEEXEC
11295+ /* PaX: detect ITLB misses on non-exec pages */
11296+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11297+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11298+ {
11299+ if (address != regs->tpc)
11300+ goto good_area;
11301+
11302+ up_read(&mm->mmap_sem);
11303+ switch (pax_handle_fetch_fault(regs)) {
11304+
11305+#ifdef CONFIG_PAX_EMUPLT
11306+ case 2:
11307+ case 3:
11308+ return;
11309+#endif
11310+
11311+ }
11312+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11313+ do_group_exit(SIGKILL);
11314+ }
11315+#endif
11316+
11317 /* Pure DTLB misses do not tell us whether the fault causing
11318 * load/store/atomic was a write or not, it only says that there
11319 * was no match. So in such a case we (carefully) read the
11320diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11321index 3096317..a7b7654 100644
11322--- a/arch/sparc/mm/hugetlbpage.c
11323+++ b/arch/sparc/mm/hugetlbpage.c
11324@@ -26,7 +26,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11325 unsigned long addr,
11326 unsigned long len,
11327 unsigned long pgoff,
11328- unsigned long flags)
11329+ unsigned long flags,
11330+ unsigned long offset)
11331 {
11332 unsigned long task_size = TASK_SIZE;
11333 struct vm_unmapped_area_info info;
11334@@ -36,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11335
11336 info.flags = 0;
11337 info.length = len;
11338- info.low_limit = TASK_UNMAPPED_BASE;
11339+ info.low_limit = mm->mmap_base;
11340 info.high_limit = min(task_size, VA_EXCLUDE_START);
11341 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11342 info.align_offset = 0;
11343+ info.threadstack_offset = offset;
11344 addr = vm_unmapped_area(&info);
11345
11346 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11347 VM_BUG_ON(addr != -ENOMEM);
11348 info.low_limit = VA_EXCLUDE_END;
11349+
11350+#ifdef CONFIG_PAX_RANDMMAP
11351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11352+ info.low_limit += mm->delta_mmap;
11353+#endif
11354+
11355 info.high_limit = task_size;
11356 addr = vm_unmapped_area(&info);
11357 }
11358@@ -56,7 +64,8 @@ static unsigned long
11359 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11360 const unsigned long len,
11361 const unsigned long pgoff,
11362- const unsigned long flags)
11363+ const unsigned long flags,
11364+ const unsigned long offset)
11365 {
11366 struct mm_struct *mm = current->mm;
11367 unsigned long addr = addr0;
11368@@ -71,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11369 info.high_limit = mm->mmap_base;
11370 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11371 info.align_offset = 0;
11372+ info.threadstack_offset = offset;
11373 addr = vm_unmapped_area(&info);
11374
11375 /*
11376@@ -83,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11377 VM_BUG_ON(addr != -ENOMEM);
11378 info.flags = 0;
11379 info.low_limit = TASK_UNMAPPED_BASE;
11380+
11381+#ifdef CONFIG_PAX_RANDMMAP
11382+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11383+ info.low_limit += mm->delta_mmap;
11384+#endif
11385+
11386 info.high_limit = STACK_TOP32;
11387 addr = vm_unmapped_area(&info);
11388 }
11389@@ -97,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11390 struct mm_struct *mm = current->mm;
11391 struct vm_area_struct *vma;
11392 unsigned long task_size = TASK_SIZE;
11393+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11394
11395 if (test_thread_flag(TIF_32BIT))
11396 task_size = STACK_TOP32;
11397@@ -112,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11398 return addr;
11399 }
11400
11401+#ifdef CONFIG_PAX_RANDMMAP
11402+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11403+#endif
11404+
11405 if (addr) {
11406 addr = ALIGN(addr, HPAGE_SIZE);
11407 vma = find_vma(mm, addr);
11408- if (task_size - len >= addr &&
11409- (!vma || addr + len <= vma->vm_start))
11410+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11411 return addr;
11412 }
11413 if (mm->get_unmapped_area == arch_get_unmapped_area)
11414 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11415- pgoff, flags);
11416+ pgoff, flags, offset);
11417 else
11418 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11419- pgoff, flags);
11420+ pgoff, flags, offset);
11421 }
11422
11423 pte_t *huge_pte_alloc(struct mm_struct *mm,
11424diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
11425index 5322e53..f820c5e 100644
11426--- a/arch/sparc/mm/init_64.c
11427+++ b/arch/sparc/mm/init_64.c
11428@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
11429 int num_kernel_image_mappings;
11430
11431 #ifdef CONFIG_DEBUG_DCFLUSH
11432-atomic_t dcpage_flushes = ATOMIC_INIT(0);
11433+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
11434 #ifdef CONFIG_SMP
11435-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11436+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11437 #endif
11438 #endif
11439
11440@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
11441 {
11442 BUG_ON(tlb_type == hypervisor);
11443 #ifdef CONFIG_DEBUG_DCFLUSH
11444- atomic_inc(&dcpage_flushes);
11445+ atomic_inc_unchecked(&dcpage_flushes);
11446 #endif
11447
11448 #ifdef DCACHE_ALIASING_POSSIBLE
11449@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
11450
11451 #ifdef CONFIG_DEBUG_DCFLUSH
11452 seq_printf(m, "DCPageFlushes\t: %d\n",
11453- atomic_read(&dcpage_flushes));
11454+ atomic_read_unchecked(&dcpage_flushes));
11455 #ifdef CONFIG_SMP
11456 seq_printf(m, "DCPageFlushesXC\t: %d\n",
11457- atomic_read(&dcpage_flushes_xcall));
11458+ atomic_read_unchecked(&dcpage_flushes_xcall));
11459 #endif /* CONFIG_SMP */
11460 #endif /* CONFIG_DEBUG_DCFLUSH */
11461 }
11462diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
11463index b3692ce..e4517c9 100644
11464--- a/arch/tile/Kconfig
11465+++ b/arch/tile/Kconfig
11466@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz"
11467
11468 config KEXEC
11469 bool "kexec system call"
11470+ depends on !GRKERNSEC_KMEM
11471 ---help---
11472 kexec is a system call that implements the ability to shutdown your
11473 current kernel, and to start another kernel. It is like a reboot
11474diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11475index ad220ee..2f537b3 100644
11476--- a/arch/tile/include/asm/atomic_64.h
11477+++ b/arch/tile/include/asm/atomic_64.h
11478@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11479
11480 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11481
11482+#define atomic64_read_unchecked(v) atomic64_read(v)
11483+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11484+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11485+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11486+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11487+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11488+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11489+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11490+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11491+
11492 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11493 #define smp_mb__before_atomic_dec() smp_mb()
11494 #define smp_mb__after_atomic_dec() smp_mb()
11495diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11496index 6160761..00cac88 100644
11497--- a/arch/tile/include/asm/cache.h
11498+++ b/arch/tile/include/asm/cache.h
11499@@ -15,11 +15,12 @@
11500 #ifndef _ASM_TILE_CACHE_H
11501 #define _ASM_TILE_CACHE_H
11502
11503+#include <linux/const.h>
11504 #include <arch/chip.h>
11505
11506 /* bytes per L1 data cache line */
11507 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11508-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11509+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11510
11511 /* bytes per L2 cache line */
11512 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11513diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11514index b6cde32..c0cb736 100644
11515--- a/arch/tile/include/asm/uaccess.h
11516+++ b/arch/tile/include/asm/uaccess.h
11517@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11518 const void __user *from,
11519 unsigned long n)
11520 {
11521- int sz = __compiletime_object_size(to);
11522+ size_t sz = __compiletime_object_size(to);
11523
11524- if (likely(sz == -1 || sz >= n))
11525+ if (likely(sz == (size_t)-1 || sz >= n))
11526 n = _copy_from_user(to, from, n);
11527 else
11528 copy_from_user_overflow();
11529diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11530index 0cb3bba..7338b2d 100644
11531--- a/arch/tile/mm/hugetlbpage.c
11532+++ b/arch/tile/mm/hugetlbpage.c
11533@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11534 info.high_limit = TASK_SIZE;
11535 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11536 info.align_offset = 0;
11537+ info.threadstack_offset = 0;
11538 return vm_unmapped_area(&info);
11539 }
11540
11541@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11542 info.high_limit = current->mm->mmap_base;
11543 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11544 info.align_offset = 0;
11545+ info.threadstack_offset = 0;
11546 addr = vm_unmapped_area(&info);
11547
11548 /*
11549diff --git a/arch/um/Makefile b/arch/um/Makefile
11550index 36e658a..71a5c5a 100644
11551--- a/arch/um/Makefile
11552+++ b/arch/um/Makefile
11553@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11554 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11555 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11556
11557+ifdef CONSTIFY_PLUGIN
11558+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11559+endif
11560+
11561 #This will adjust *FLAGS accordingly to the platform.
11562 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11563
11564diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11565index 19e1bdd..3665b77 100644
11566--- a/arch/um/include/asm/cache.h
11567+++ b/arch/um/include/asm/cache.h
11568@@ -1,6 +1,7 @@
11569 #ifndef __UM_CACHE_H
11570 #define __UM_CACHE_H
11571
11572+#include <linux/const.h>
11573
11574 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11575 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11576@@ -12,6 +13,6 @@
11577 # define L1_CACHE_SHIFT 5
11578 #endif
11579
11580-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11581+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11582
11583 #endif
11584diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11585index 2e0a6b1..a64d0f5 100644
11586--- a/arch/um/include/asm/kmap_types.h
11587+++ b/arch/um/include/asm/kmap_types.h
11588@@ -8,6 +8,6 @@
11589
11590 /* No more #include "asm/arch/kmap_types.h" ! */
11591
11592-#define KM_TYPE_NR 14
11593+#define KM_TYPE_NR 15
11594
11595 #endif
11596diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11597index 5ff53d9..5850cdf 100644
11598--- a/arch/um/include/asm/page.h
11599+++ b/arch/um/include/asm/page.h
11600@@ -14,6 +14,9 @@
11601 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11602 #define PAGE_MASK (~(PAGE_SIZE-1))
11603
11604+#define ktla_ktva(addr) (addr)
11605+#define ktva_ktla(addr) (addr)
11606+
11607 #ifndef __ASSEMBLY__
11608
11609 struct page;
11610diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11611index 0032f92..cd151e0 100644
11612--- a/arch/um/include/asm/pgtable-3level.h
11613+++ b/arch/um/include/asm/pgtable-3level.h
11614@@ -58,6 +58,7 @@
11615 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11616 #define pud_populate(mm, pud, pmd) \
11617 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11618+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11619
11620 #ifdef CONFIG_64BIT
11621 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11622diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11623index eecc414..48adb87 100644
11624--- a/arch/um/kernel/process.c
11625+++ b/arch/um/kernel/process.c
11626@@ -356,22 +356,6 @@ int singlestepping(void * t)
11627 return 2;
11628 }
11629
11630-/*
11631- * Only x86 and x86_64 have an arch_align_stack().
11632- * All other arches have "#define arch_align_stack(x) (x)"
11633- * in their asm/system.h
11634- * As this is included in UML from asm-um/system-generic.h,
11635- * we can use it to behave as the subarch does.
11636- */
11637-#ifndef arch_align_stack
11638-unsigned long arch_align_stack(unsigned long sp)
11639-{
11640- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11641- sp -= get_random_int() % 8192;
11642- return sp & ~0xf;
11643-}
11644-#endif
11645-
11646 unsigned long get_wchan(struct task_struct *p)
11647 {
11648 unsigned long stack_page, sp, ip;
11649diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11650index ad8f795..2c7eec6 100644
11651--- a/arch/unicore32/include/asm/cache.h
11652+++ b/arch/unicore32/include/asm/cache.h
11653@@ -12,8 +12,10 @@
11654 #ifndef __UNICORE_CACHE_H__
11655 #define __UNICORE_CACHE_H__
11656
11657-#define L1_CACHE_SHIFT (5)
11658-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11659+#include <linux/const.h>
11660+
11661+#define L1_CACHE_SHIFT 5
11662+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11663
11664 /*
11665 * Memory returned by kmalloc() may be used for DMA, so we must make
11666diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11667index 0952ecd..75e0e8a 100644
11668--- a/arch/x86/Kconfig
11669+++ b/arch/x86/Kconfig
11670@@ -249,7 +249,7 @@ config X86_HT
11671
11672 config X86_32_LAZY_GS
11673 def_bool y
11674- depends on X86_32 && !CC_STACKPROTECTOR
11675+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11676
11677 config ARCH_HWEIGHT_CFLAGS
11678 string
11679@@ -1104,6 +1104,7 @@ config MICROCODE_EARLY
11680
11681 config X86_MSR
11682 tristate "/dev/cpu/*/msr - Model-specific register support"
11683+ depends on !GRKERNSEC_KMEM
11684 ---help---
11685 This device gives privileged processes access to the x86
11686 Model-Specific Registers (MSRs). It is a character device with
11687@@ -1127,7 +1128,7 @@ choice
11688
11689 config NOHIGHMEM
11690 bool "off"
11691- depends on !X86_NUMAQ
11692+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11693 ---help---
11694 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11695 However, the address space of 32-bit x86 processors is only 4
11696@@ -1164,7 +1165,7 @@ config NOHIGHMEM
11697
11698 config HIGHMEM4G
11699 bool "4GB"
11700- depends on !X86_NUMAQ
11701+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11702 ---help---
11703 Select this if you have a 32-bit processor and between 1 and 4
11704 gigabytes of physical RAM.
11705@@ -1217,7 +1218,7 @@ config PAGE_OFFSET
11706 hex
11707 default 0xB0000000 if VMSPLIT_3G_OPT
11708 default 0x80000000 if VMSPLIT_2G
11709- default 0x78000000 if VMSPLIT_2G_OPT
11710+ default 0x70000000 if VMSPLIT_2G_OPT
11711 default 0x40000000 if VMSPLIT_1G
11712 default 0xC0000000
11713 depends on X86_32
11714@@ -1619,6 +1620,7 @@ config SECCOMP
11715
11716 config CC_STACKPROTECTOR
11717 bool "Enable -fstack-protector buffer overflow detection"
11718+ depends on X86_64 || !PAX_MEMORY_UDEREF
11719 ---help---
11720 This option turns on the -fstack-protector GCC feature. This
11721 feature puts, at the beginning of functions, a canary value on
11722@@ -1637,6 +1639,7 @@ source kernel/Kconfig.hz
11723
11724 config KEXEC
11725 bool "kexec system call"
11726+ depends on !GRKERNSEC_KMEM
11727 ---help---
11728 kexec is a system call that implements the ability to shutdown your
11729 current kernel, and to start another kernel. It is like a reboot
11730@@ -1738,6 +1741,8 @@ config X86_NEED_RELOCS
11731 config PHYSICAL_ALIGN
11732 hex "Alignment value to which kernel should be aligned"
11733 default "0x1000000"
11734+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11735+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11736 range 0x2000 0x1000000 if X86_32
11737 range 0x200000 0x1000000 if X86_64
11738 ---help---
11739@@ -1817,9 +1822,10 @@ config DEBUG_HOTPLUG_CPU0
11740 If unsure, say N.
11741
11742 config COMPAT_VDSO
11743- def_bool y
11744+ def_bool n
11745 prompt "Compat VDSO support"
11746 depends on X86_32 || IA32_EMULATION
11747+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11748 ---help---
11749 Map the 32-bit VDSO to the predictable old-style address too.
11750
11751diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11752index c026cca..14657ae 100644
11753--- a/arch/x86/Kconfig.cpu
11754+++ b/arch/x86/Kconfig.cpu
11755@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11756
11757 config X86_F00F_BUG
11758 def_bool y
11759- depends on M586MMX || M586TSC || M586 || M486
11760+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11761
11762 config X86_INVD_BUG
11763 def_bool y
11764@@ -327,7 +327,7 @@ config X86_INVD_BUG
11765
11766 config X86_ALIGNMENT_16
11767 def_bool y
11768- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11769+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11770
11771 config X86_INTEL_USERCOPY
11772 def_bool y
11773@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11774 # generates cmov.
11775 config X86_CMOV
11776 def_bool y
11777- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11778+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11779
11780 config X86_MINIMUM_CPU_FAMILY
11781 int
11782diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11783index 0f3621e..282f24b 100644
11784--- a/arch/x86/Kconfig.debug
11785+++ b/arch/x86/Kconfig.debug
11786@@ -84,7 +84,7 @@ config X86_PTDUMP
11787 config DEBUG_RODATA
11788 bool "Write protect kernel read-only data structures"
11789 default y
11790- depends on DEBUG_KERNEL
11791+ depends on DEBUG_KERNEL && BROKEN
11792 ---help---
11793 Mark the kernel read-only data as write-protected in the pagetables,
11794 in order to catch accidental (and incorrect) writes to such const
11795@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
11796
11797 config DEBUG_SET_MODULE_RONX
11798 bool "Set loadable kernel module data as NX and text as RO"
11799- depends on MODULES
11800+ depends on MODULES && BROKEN
11801 ---help---
11802 This option helps catch unintended modifications to loadable
11803 kernel module's text and read-only data. It also prevents execution
11804diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11805index 57d0215..b4373fb 100644
11806--- a/arch/x86/Makefile
11807+++ b/arch/x86/Makefile
11808@@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
11809 # CPU-specific tuning. Anything which can be shared with UML should go here.
11810 include $(srctree)/arch/x86/Makefile_32.cpu
11811 KBUILD_CFLAGS += $(cflags-y)
11812-
11813- # temporary until string.h is fixed
11814- KBUILD_CFLAGS += -ffreestanding
11815 else
11816 BITS := 64
11817 UTS_MACHINE := x86_64
11818 CHECKFLAGS += -D__x86_64__ -m64
11819
11820+ biarch := $(call cc-option,-m64)
11821 KBUILD_AFLAGS += -m64
11822 KBUILD_CFLAGS += -m64
11823
11824@@ -89,6 +87,9 @@ else
11825 KBUILD_CFLAGS += -maccumulate-outgoing-args
11826 endif
11827
11828+# temporary until string.h is fixed
11829+KBUILD_CFLAGS += -ffreestanding
11830+
11831 ifdef CONFIG_CC_STACKPROTECTOR
11832 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
11833 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
11834@@ -247,3 +248,12 @@ define archhelp
11835 echo ' FDINITRD=file initrd for the booted kernel'
11836 echo ' kvmconfig - Enable additional options for guest kernel support'
11837 endef
11838+
11839+define OLD_LD
11840+
11841+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11842+*** Please upgrade your binutils to 2.18 or newer
11843+endef
11844+
11845+archprepare:
11846+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11847diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11848index d9c1195..a26ca0d 100644
11849--- a/arch/x86/boot/Makefile
11850+++ b/arch/x86/boot/Makefile
11851@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
11852 $(call cc-option, -fno-unit-at-a-time)) \
11853 $(call cc-option, -fno-stack-protector) \
11854 $(call cc-option, -mpreferred-stack-boundary=2)
11855+ifdef CONSTIFY_PLUGIN
11856+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11857+endif
11858 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11859 GCOV_PROFILE := n
11860
11861diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11862index 878e4b9..20537ab 100644
11863--- a/arch/x86/boot/bitops.h
11864+++ b/arch/x86/boot/bitops.h
11865@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11866 u8 v;
11867 const u32 *p = (const u32 *)addr;
11868
11869- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11870+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11871 return v;
11872 }
11873
11874@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11875
11876 static inline void set_bit(int nr, void *addr)
11877 {
11878- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11879+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11880 }
11881
11882 #endif /* BOOT_BITOPS_H */
11883diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11884index ef72bae..353a184 100644
11885--- a/arch/x86/boot/boot.h
11886+++ b/arch/x86/boot/boot.h
11887@@ -85,7 +85,7 @@ static inline void io_delay(void)
11888 static inline u16 ds(void)
11889 {
11890 u16 seg;
11891- asm("movw %%ds,%0" : "=rm" (seg));
11892+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11893 return seg;
11894 }
11895
11896@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11897 static inline int memcmp(const void *s1, const void *s2, size_t len)
11898 {
11899 u8 diff;
11900- asm("repe; cmpsb; setnz %0"
11901+ asm volatile("repe; cmpsb; setnz %0"
11902 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11903 return diff;
11904 }
11905diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11906index c8a6792..2402765 100644
11907--- a/arch/x86/boot/compressed/Makefile
11908+++ b/arch/x86/boot/compressed/Makefile
11909@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
11910 KBUILD_CFLAGS += -mno-mmx -mno-sse
11911 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11912 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11913+ifdef CONSTIFY_PLUGIN
11914+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11915+endif
11916
11917 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11918 GCOV_PROFILE := n
11919diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11920index a53440e..c3dbf1e 100644
11921--- a/arch/x86/boot/compressed/efi_stub_32.S
11922+++ b/arch/x86/boot/compressed/efi_stub_32.S
11923@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11924 * parameter 2, ..., param n. To make things easy, we save the return
11925 * address of efi_call_phys in a global variable.
11926 */
11927- popl %ecx
11928- movl %ecx, saved_return_addr(%edx)
11929- /* get the function pointer into ECX*/
11930- popl %ecx
11931- movl %ecx, efi_rt_function_ptr(%edx)
11932+ popl saved_return_addr(%edx)
11933+ popl efi_rt_function_ptr(%edx)
11934
11935 /*
11936 * 3. Call the physical function.
11937 */
11938- call *%ecx
11939+ call *efi_rt_function_ptr(%edx)
11940
11941 /*
11942 * 4. Balance the stack. And because EAX contain the return value,
11943@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11944 1: popl %edx
11945 subl $1b, %edx
11946
11947- movl efi_rt_function_ptr(%edx), %ecx
11948- pushl %ecx
11949+ pushl efi_rt_function_ptr(%edx)
11950
11951 /*
11952 * 10. Push the saved return address onto the stack and return.
11953 */
11954- movl saved_return_addr(%edx), %ecx
11955- pushl %ecx
11956- ret
11957+ jmpl *saved_return_addr(%edx)
11958 ENDPROC(efi_call_phys)
11959 .previous
11960
11961diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11962index 5d6f689..9d06730 100644
11963--- a/arch/x86/boot/compressed/head_32.S
11964+++ b/arch/x86/boot/compressed/head_32.S
11965@@ -118,7 +118,7 @@ preferred_addr:
11966 notl %eax
11967 andl %eax, %ebx
11968 #else
11969- movl $LOAD_PHYSICAL_ADDR, %ebx
11970+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11971 #endif
11972
11973 /* Target address to relocate to for decompression */
11974diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11975index c337422..2c5be72 100644
11976--- a/arch/x86/boot/compressed/head_64.S
11977+++ b/arch/x86/boot/compressed/head_64.S
11978@@ -95,7 +95,7 @@ ENTRY(startup_32)
11979 notl %eax
11980 andl %eax, %ebx
11981 #else
11982- movl $LOAD_PHYSICAL_ADDR, %ebx
11983+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11984 #endif
11985
11986 /* Target address to relocate to for decompression */
11987@@ -270,7 +270,7 @@ preferred_addr:
11988 notq %rax
11989 andq %rax, %rbp
11990 #else
11991- movq $LOAD_PHYSICAL_ADDR, %rbp
11992+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11993 #endif
11994
11995 /* Target address to relocate to for decompression */
11996@@ -362,8 +362,8 @@ gdt:
11997 .long gdt
11998 .word 0
11999 .quad 0x0000000000000000 /* NULL descriptor */
12000- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12001- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12002+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12003+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12004 .quad 0x0080890000000000 /* TS descriptor */
12005 .quad 0x0000000000000000 /* TS continued */
12006 gdt_end:
12007diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12008index 434f077..b6b4b38 100644
12009--- a/arch/x86/boot/compressed/misc.c
12010+++ b/arch/x86/boot/compressed/misc.c
12011@@ -283,7 +283,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12012 * Calculate the delta between where vmlinux was linked to load
12013 * and where it was actually loaded.
12014 */
12015- delta = min_addr - LOAD_PHYSICAL_ADDR;
12016+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12017 if (!delta) {
12018 debug_putstr("No relocation needed... ");
12019 return;
12020@@ -380,7 +380,7 @@ static void parse_elf(void *output)
12021 case PT_LOAD:
12022 #ifdef CONFIG_RELOCATABLE
12023 dest = output;
12024- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12025+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12026 #else
12027 dest = (void *)(phdr->p_paddr);
12028 #endif
12029@@ -432,7 +432,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
12030 error("Destination address too large");
12031 #endif
12032 #ifndef CONFIG_RELOCATABLE
12033- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12034+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12035 error("Wrong destination address");
12036 #endif
12037
12038diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12039index 4d3ff03..e4972ff 100644
12040--- a/arch/x86/boot/cpucheck.c
12041+++ b/arch/x86/boot/cpucheck.c
12042@@ -74,7 +74,7 @@ static int has_fpu(void)
12043 u16 fcw = -1, fsw = -1;
12044 u32 cr0;
12045
12046- asm("movl %%cr0,%0" : "=r" (cr0));
12047+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
12048 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
12049 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
12050 asm volatile("movl %0,%%cr0" : : "r" (cr0));
12051@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
12052 {
12053 u32 f0, f1;
12054
12055- asm("pushfl ; "
12056+ asm volatile("pushfl ; "
12057 "pushfl ; "
12058 "popl %0 ; "
12059 "movl %0,%1 ; "
12060@@ -115,7 +115,7 @@ static void get_flags(void)
12061 set_bit(X86_FEATURE_FPU, cpu.flags);
12062
12063 if (has_eflag(X86_EFLAGS_ID)) {
12064- asm("cpuid"
12065+ asm volatile("cpuid"
12066 : "=a" (max_intel_level),
12067 "=b" (cpu_vendor[0]),
12068 "=d" (cpu_vendor[1]),
12069@@ -124,7 +124,7 @@ static void get_flags(void)
12070
12071 if (max_intel_level >= 0x00000001 &&
12072 max_intel_level <= 0x0000ffff) {
12073- asm("cpuid"
12074+ asm volatile("cpuid"
12075 : "=a" (tfms),
12076 "=c" (cpu.flags[4]),
12077 "=d" (cpu.flags[0])
12078@@ -136,7 +136,7 @@ static void get_flags(void)
12079 cpu.model += ((tfms >> 16) & 0xf) << 4;
12080 }
12081
12082- asm("cpuid"
12083+ asm volatile("cpuid"
12084 : "=a" (max_amd_level)
12085 : "a" (0x80000000)
12086 : "ebx", "ecx", "edx");
12087@@ -144,7 +144,7 @@ static void get_flags(void)
12088 if (max_amd_level >= 0x80000001 &&
12089 max_amd_level <= 0x8000ffff) {
12090 u32 eax = 0x80000001;
12091- asm("cpuid"
12092+ asm volatile("cpuid"
12093 : "+a" (eax),
12094 "=c" (cpu.flags[6]),
12095 "=d" (cpu.flags[1])
12096@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12097 u32 ecx = MSR_K7_HWCR;
12098 u32 eax, edx;
12099
12100- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12101+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12102 eax &= ~(1 << 15);
12103- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12104+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12105
12106 get_flags(); /* Make sure it really did something */
12107 err = check_flags();
12108@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12109 u32 ecx = MSR_VIA_FCR;
12110 u32 eax, edx;
12111
12112- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12113+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12114 eax |= (1<<1)|(1<<7);
12115- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12116+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12117
12118 set_bit(X86_FEATURE_CX8, cpu.flags);
12119 err = check_flags();
12120@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12121 u32 eax, edx;
12122 u32 level = 1;
12123
12124- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12125- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12126- asm("cpuid"
12127+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12128+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12129+ asm volatile("cpuid"
12130 : "+a" (level), "=d" (cpu.flags[0])
12131 : : "ecx", "ebx");
12132- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12133+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12134
12135 err = check_flags();
12136 }
12137diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12138index 9ec06a1..2c25e79 100644
12139--- a/arch/x86/boot/header.S
12140+++ b/arch/x86/boot/header.S
12141@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12142 # single linked list of
12143 # struct setup_data
12144
12145-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12146+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12147
12148 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12149+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12150+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12151+#else
12152 #define VO_INIT_SIZE (VO__end - VO__text)
12153+#endif
12154 #if ZO_INIT_SIZE > VO_INIT_SIZE
12155 #define INIT_SIZE ZO_INIT_SIZE
12156 #else
12157diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12158index db75d07..8e6d0af 100644
12159--- a/arch/x86/boot/memory.c
12160+++ b/arch/x86/boot/memory.c
12161@@ -19,7 +19,7 @@
12162
12163 static int detect_memory_e820(void)
12164 {
12165- int count = 0;
12166+ unsigned int count = 0;
12167 struct biosregs ireg, oreg;
12168 struct e820entry *desc = boot_params.e820_map;
12169 static struct e820entry buf; /* static so it is zeroed */
12170diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12171index 11e8c6e..fdbb1ed 100644
12172--- a/arch/x86/boot/video-vesa.c
12173+++ b/arch/x86/boot/video-vesa.c
12174@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
12175
12176 boot_params.screen_info.vesapm_seg = oreg.es;
12177 boot_params.screen_info.vesapm_off = oreg.di;
12178+ boot_params.screen_info.vesapm_size = oreg.cx;
12179 }
12180
12181 /*
12182diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12183index 43eda28..5ab5fdb 100644
12184--- a/arch/x86/boot/video.c
12185+++ b/arch/x86/boot/video.c
12186@@ -96,7 +96,7 @@ static void store_mode_params(void)
12187 static unsigned int get_entry(void)
12188 {
12189 char entry_buf[4];
12190- int i, len = 0;
12191+ unsigned int i, len = 0;
12192 int key;
12193 unsigned int v;
12194
12195diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12196index 9105655..41779c1 100644
12197--- a/arch/x86/crypto/aes-x86_64-asm_64.S
12198+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12199@@ -8,6 +8,8 @@
12200 * including this sentence is retained in full.
12201 */
12202
12203+#include <asm/alternative-asm.h>
12204+
12205 .extern crypto_ft_tab
12206 .extern crypto_it_tab
12207 .extern crypto_fl_tab
12208@@ -70,6 +72,8 @@
12209 je B192; \
12210 leaq 32(r9),r9;
12211
12212+#define ret pax_force_retaddr; ret
12213+
12214 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12215 movq r1,r2; \
12216 movq r3,r4; \
12217diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12218index 477e9d7..c92c7d8 100644
12219--- a/arch/x86/crypto/aesni-intel_asm.S
12220+++ b/arch/x86/crypto/aesni-intel_asm.S
12221@@ -31,6 +31,7 @@
12222
12223 #include <linux/linkage.h>
12224 #include <asm/inst.h>
12225+#include <asm/alternative-asm.h>
12226
12227 #ifdef __x86_64__
12228 .data
12229@@ -205,7 +206,7 @@ enc: .octa 0x2
12230 * num_initial_blocks = b mod 4
12231 * encrypt the initial num_initial_blocks blocks and apply ghash on
12232 * the ciphertext
12233-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12234+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12235 * are clobbered
12236 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12237 */
12238@@ -214,8 +215,8 @@ enc: .octa 0x2
12239 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12240 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12241 mov arg7, %r10 # %r10 = AAD
12242- mov arg8, %r12 # %r12 = aadLen
12243- mov %r12, %r11
12244+ mov arg8, %r15 # %r15 = aadLen
12245+ mov %r15, %r11
12246 pxor %xmm\i, %xmm\i
12247 _get_AAD_loop\num_initial_blocks\operation:
12248 movd (%r10), \TMP1
12249@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12250 psrldq $4, %xmm\i
12251 pxor \TMP1, %xmm\i
12252 add $4, %r10
12253- sub $4, %r12
12254+ sub $4, %r15
12255 jne _get_AAD_loop\num_initial_blocks\operation
12256 cmp $16, %r11
12257 je _get_AAD_loop2_done\num_initial_blocks\operation
12258- mov $16, %r12
12259+ mov $16, %r15
12260 _get_AAD_loop2\num_initial_blocks\operation:
12261 psrldq $4, %xmm\i
12262- sub $4, %r12
12263- cmp %r11, %r12
12264+ sub $4, %r15
12265+ cmp %r11, %r15
12266 jne _get_AAD_loop2\num_initial_blocks\operation
12267 _get_AAD_loop2_done\num_initial_blocks\operation:
12268 movdqa SHUF_MASK(%rip), %xmm14
12269@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
12270 * num_initial_blocks = b mod 4
12271 * encrypt the initial num_initial_blocks blocks and apply ghash on
12272 * the ciphertext
12273-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12274+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12275 * are clobbered
12276 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12277 */
12278@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
12279 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12280 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12281 mov arg7, %r10 # %r10 = AAD
12282- mov arg8, %r12 # %r12 = aadLen
12283- mov %r12, %r11
12284+ mov arg8, %r15 # %r15 = aadLen
12285+ mov %r15, %r11
12286 pxor %xmm\i, %xmm\i
12287 _get_AAD_loop\num_initial_blocks\operation:
12288 movd (%r10), \TMP1
12289@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12290 psrldq $4, %xmm\i
12291 pxor \TMP1, %xmm\i
12292 add $4, %r10
12293- sub $4, %r12
12294+ sub $4, %r15
12295 jne _get_AAD_loop\num_initial_blocks\operation
12296 cmp $16, %r11
12297 je _get_AAD_loop2_done\num_initial_blocks\operation
12298- mov $16, %r12
12299+ mov $16, %r15
12300 _get_AAD_loop2\num_initial_blocks\operation:
12301 psrldq $4, %xmm\i
12302- sub $4, %r12
12303- cmp %r11, %r12
12304+ sub $4, %r15
12305+ cmp %r11, %r15
12306 jne _get_AAD_loop2\num_initial_blocks\operation
12307 _get_AAD_loop2_done\num_initial_blocks\operation:
12308 movdqa SHUF_MASK(%rip), %xmm14
12309@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
12310 *
12311 *****************************************************************************/
12312 ENTRY(aesni_gcm_dec)
12313- push %r12
12314+ push %r15
12315 push %r13
12316 push %r14
12317 mov %rsp, %r14
12318@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
12319 */
12320 sub $VARIABLE_OFFSET, %rsp
12321 and $~63, %rsp # align rsp to 64 bytes
12322- mov %arg6, %r12
12323- movdqu (%r12), %xmm13 # %xmm13 = HashKey
12324+ mov %arg6, %r15
12325+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
12326 movdqa SHUF_MASK(%rip), %xmm2
12327 PSHUFB_XMM %xmm2, %xmm13
12328
12329@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
12330 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
12331 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
12332 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
12333- mov %r13, %r12
12334- and $(3<<4), %r12
12335+ mov %r13, %r15
12336+ and $(3<<4), %r15
12337 jz _initial_num_blocks_is_0_decrypt
12338- cmp $(2<<4), %r12
12339+ cmp $(2<<4), %r15
12340 jb _initial_num_blocks_is_1_decrypt
12341 je _initial_num_blocks_is_2_decrypt
12342 _initial_num_blocks_is_3_decrypt:
12343@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
12344 sub $16, %r11
12345 add %r13, %r11
12346 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
12347- lea SHIFT_MASK+16(%rip), %r12
12348- sub %r13, %r12
12349+ lea SHIFT_MASK+16(%rip), %r15
12350+ sub %r13, %r15
12351 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
12352 # (%r13 is the number of bytes in plaintext mod 16)
12353- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12354+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12355 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
12356
12357 movdqa %xmm1, %xmm2
12358 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
12359- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12360+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12361 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
12362 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
12363 pand %xmm1, %xmm2
12364@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
12365 sub $1, %r13
12366 jne _less_than_8_bytes_left_decrypt
12367 _multiple_of_16_bytes_decrypt:
12368- mov arg8, %r12 # %r13 = aadLen (number of bytes)
12369- shl $3, %r12 # convert into number of bits
12370- movd %r12d, %xmm15 # len(A) in %xmm15
12371+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
12372+ shl $3, %r15 # convert into number of bits
12373+ movd %r15d, %xmm15 # len(A) in %xmm15
12374 shl $3, %arg4 # len(C) in bits (*128)
12375 MOVQ_R64_XMM %arg4, %xmm1
12376 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12377@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
12378 mov %r14, %rsp
12379 pop %r14
12380 pop %r13
12381- pop %r12
12382+ pop %r15
12383+ pax_force_retaddr
12384 ret
12385 ENDPROC(aesni_gcm_dec)
12386
12387@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
12388 * poly = x^128 + x^127 + x^126 + x^121 + 1
12389 ***************************************************************************/
12390 ENTRY(aesni_gcm_enc)
12391- push %r12
12392+ push %r15
12393 push %r13
12394 push %r14
12395 mov %rsp, %r14
12396@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
12397 #
12398 sub $VARIABLE_OFFSET, %rsp
12399 and $~63, %rsp
12400- mov %arg6, %r12
12401- movdqu (%r12), %xmm13
12402+ mov %arg6, %r15
12403+ movdqu (%r15), %xmm13
12404 movdqa SHUF_MASK(%rip), %xmm2
12405 PSHUFB_XMM %xmm2, %xmm13
12406
12407@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
12408 movdqa %xmm13, HashKey(%rsp)
12409 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
12410 and $-16, %r13
12411- mov %r13, %r12
12412+ mov %r13, %r15
12413
12414 # Encrypt first few blocks
12415
12416- and $(3<<4), %r12
12417+ and $(3<<4), %r15
12418 jz _initial_num_blocks_is_0_encrypt
12419- cmp $(2<<4), %r12
12420+ cmp $(2<<4), %r15
12421 jb _initial_num_blocks_is_1_encrypt
12422 je _initial_num_blocks_is_2_encrypt
12423 _initial_num_blocks_is_3_encrypt:
12424@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
12425 sub $16, %r11
12426 add %r13, %r11
12427 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
12428- lea SHIFT_MASK+16(%rip), %r12
12429- sub %r13, %r12
12430+ lea SHIFT_MASK+16(%rip), %r15
12431+ sub %r13, %r15
12432 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
12433 # (%r13 is the number of bytes in plaintext mod 16)
12434- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12435+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12436 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
12437 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
12438- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12439+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12440 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
12441 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
12442 movdqa SHUF_MASK(%rip), %xmm10
12443@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
12444 sub $1, %r13
12445 jne _less_than_8_bytes_left_encrypt
12446 _multiple_of_16_bytes_encrypt:
12447- mov arg8, %r12 # %r12 = addLen (number of bytes)
12448- shl $3, %r12
12449- movd %r12d, %xmm15 # len(A) in %xmm15
12450+ mov arg8, %r15 # %r15 = addLen (number of bytes)
12451+ shl $3, %r15
12452+ movd %r15d, %xmm15 # len(A) in %xmm15
12453 shl $3, %arg4 # len(C) in bits (*128)
12454 MOVQ_R64_XMM %arg4, %xmm1
12455 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12456@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
12457 mov %r14, %rsp
12458 pop %r14
12459 pop %r13
12460- pop %r12
12461+ pop %r15
12462+ pax_force_retaddr
12463 ret
12464 ENDPROC(aesni_gcm_enc)
12465
12466@@ -1722,6 +1725,7 @@ _key_expansion_256a:
12467 pxor %xmm1, %xmm0
12468 movaps %xmm0, (TKEYP)
12469 add $0x10, TKEYP
12470+ pax_force_retaddr
12471 ret
12472 ENDPROC(_key_expansion_128)
12473 ENDPROC(_key_expansion_256a)
12474@@ -1748,6 +1752,7 @@ _key_expansion_192a:
12475 shufps $0b01001110, %xmm2, %xmm1
12476 movaps %xmm1, 0x10(TKEYP)
12477 add $0x20, TKEYP
12478+ pax_force_retaddr
12479 ret
12480 ENDPROC(_key_expansion_192a)
12481
12482@@ -1768,6 +1773,7 @@ _key_expansion_192b:
12483
12484 movaps %xmm0, (TKEYP)
12485 add $0x10, TKEYP
12486+ pax_force_retaddr
12487 ret
12488 ENDPROC(_key_expansion_192b)
12489
12490@@ -1781,6 +1787,7 @@ _key_expansion_256b:
12491 pxor %xmm1, %xmm2
12492 movaps %xmm2, (TKEYP)
12493 add $0x10, TKEYP
12494+ pax_force_retaddr
12495 ret
12496 ENDPROC(_key_expansion_256b)
12497
12498@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
12499 #ifndef __x86_64__
12500 popl KEYP
12501 #endif
12502+ pax_force_retaddr
12503 ret
12504 ENDPROC(aesni_set_key)
12505
12506@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
12507 popl KLEN
12508 popl KEYP
12509 #endif
12510+ pax_force_retaddr
12511 ret
12512 ENDPROC(aesni_enc)
12513
12514@@ -1974,6 +1983,7 @@ _aesni_enc1:
12515 AESENC KEY STATE
12516 movaps 0x70(TKEYP), KEY
12517 AESENCLAST KEY STATE
12518+ pax_force_retaddr
12519 ret
12520 ENDPROC(_aesni_enc1)
12521
12522@@ -2083,6 +2093,7 @@ _aesni_enc4:
12523 AESENCLAST KEY STATE2
12524 AESENCLAST KEY STATE3
12525 AESENCLAST KEY STATE4
12526+ pax_force_retaddr
12527 ret
12528 ENDPROC(_aesni_enc4)
12529
12530@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12531 popl KLEN
12532 popl KEYP
12533 #endif
12534+ pax_force_retaddr
12535 ret
12536 ENDPROC(aesni_dec)
12537
12538@@ -2164,6 +2176,7 @@ _aesni_dec1:
12539 AESDEC KEY STATE
12540 movaps 0x70(TKEYP), KEY
12541 AESDECLAST KEY STATE
12542+ pax_force_retaddr
12543 ret
12544 ENDPROC(_aesni_dec1)
12545
12546@@ -2273,6 +2286,7 @@ _aesni_dec4:
12547 AESDECLAST KEY STATE2
12548 AESDECLAST KEY STATE3
12549 AESDECLAST KEY STATE4
12550+ pax_force_retaddr
12551 ret
12552 ENDPROC(_aesni_dec4)
12553
12554@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12555 popl KEYP
12556 popl LEN
12557 #endif
12558+ pax_force_retaddr
12559 ret
12560 ENDPROC(aesni_ecb_enc)
12561
12562@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12563 popl KEYP
12564 popl LEN
12565 #endif
12566+ pax_force_retaddr
12567 ret
12568 ENDPROC(aesni_ecb_dec)
12569
12570@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12571 popl LEN
12572 popl IVP
12573 #endif
12574+ pax_force_retaddr
12575 ret
12576 ENDPROC(aesni_cbc_enc)
12577
12578@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12579 popl LEN
12580 popl IVP
12581 #endif
12582+ pax_force_retaddr
12583 ret
12584 ENDPROC(aesni_cbc_dec)
12585
12586@@ -2550,6 +2568,7 @@ _aesni_inc_init:
12587 mov $1, TCTR_LOW
12588 MOVQ_R64_XMM TCTR_LOW INC
12589 MOVQ_R64_XMM CTR TCTR_LOW
12590+ pax_force_retaddr
12591 ret
12592 ENDPROC(_aesni_inc_init)
12593
12594@@ -2579,6 +2598,7 @@ _aesni_inc:
12595 .Linc_low:
12596 movaps CTR, IV
12597 PSHUFB_XMM BSWAP_MASK IV
12598+ pax_force_retaddr
12599 ret
12600 ENDPROC(_aesni_inc)
12601
12602@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12603 .Lctr_enc_ret:
12604 movups IV, (IVP)
12605 .Lctr_enc_just_ret:
12606+ pax_force_retaddr
12607 ret
12608 ENDPROC(aesni_ctr_enc)
12609
12610@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12611 pxor INC, STATE4
12612 movdqu STATE4, 0x70(OUTP)
12613
12614+ pax_force_retaddr
12615 ret
12616 ENDPROC(aesni_xts_crypt8)
12617
12618diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12619index 246c670..466e2d6 100644
12620--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12621+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12622@@ -21,6 +21,7 @@
12623 */
12624
12625 #include <linux/linkage.h>
12626+#include <asm/alternative-asm.h>
12627
12628 .file "blowfish-x86_64-asm.S"
12629 .text
12630@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12631 jnz .L__enc_xor;
12632
12633 write_block();
12634+ pax_force_retaddr
12635 ret;
12636 .L__enc_xor:
12637 xor_block();
12638+ pax_force_retaddr
12639 ret;
12640 ENDPROC(__blowfish_enc_blk)
12641
12642@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12643
12644 movq %r11, %rbp;
12645
12646+ pax_force_retaddr
12647 ret;
12648 ENDPROC(blowfish_dec_blk)
12649
12650@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12651
12652 popq %rbx;
12653 popq %rbp;
12654+ pax_force_retaddr
12655 ret;
12656
12657 .L__enc_xor4:
12658@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12659
12660 popq %rbx;
12661 popq %rbp;
12662+ pax_force_retaddr
12663 ret;
12664 ENDPROC(__blowfish_enc_blk_4way)
12665
12666@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12667 popq %rbx;
12668 popq %rbp;
12669
12670+ pax_force_retaddr
12671 ret;
12672 ENDPROC(blowfish_dec_blk_4way)
12673diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12674index ce71f92..1dce7ec 100644
12675--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12676+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12677@@ -16,6 +16,7 @@
12678 */
12679
12680 #include <linux/linkage.h>
12681+#include <asm/alternative-asm.h>
12682
12683 #define CAMELLIA_TABLE_BYTE_LEN 272
12684
12685@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12686 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12687 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12688 %rcx, (%r9));
12689+ pax_force_retaddr
12690 ret;
12691 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12692
12693@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12694 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12695 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12696 %rax, (%r9));
12697+ pax_force_retaddr
12698 ret;
12699 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12700
12701@@ -780,6 +783,7 @@ __camellia_enc_blk16:
12702 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12703 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12704
12705+ pax_force_retaddr
12706 ret;
12707
12708 .align 8
12709@@ -865,6 +869,7 @@ __camellia_dec_blk16:
12710 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12711 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12712
12713+ pax_force_retaddr
12714 ret;
12715
12716 .align 8
12717@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12718 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12719 %xmm8, %rsi);
12720
12721+ pax_force_retaddr
12722 ret;
12723 ENDPROC(camellia_ecb_enc_16way)
12724
12725@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12726 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12727 %xmm8, %rsi);
12728
12729+ pax_force_retaddr
12730 ret;
12731 ENDPROC(camellia_ecb_dec_16way)
12732
12733@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12734 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12735 %xmm8, %rsi);
12736
12737+ pax_force_retaddr
12738 ret;
12739 ENDPROC(camellia_cbc_dec_16way)
12740
12741@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12742 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12743 %xmm8, %rsi);
12744
12745+ pax_force_retaddr
12746 ret;
12747 ENDPROC(camellia_ctr_16way)
12748
12749@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12750 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12751 %xmm8, %rsi);
12752
12753+ pax_force_retaddr
12754 ret;
12755 ENDPROC(camellia_xts_crypt_16way)
12756
12757diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12758index 0e0b886..5a3123c 100644
12759--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12760+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12761@@ -11,6 +11,7 @@
12762 */
12763
12764 #include <linux/linkage.h>
12765+#include <asm/alternative-asm.h>
12766
12767 #define CAMELLIA_TABLE_BYTE_LEN 272
12768
12769@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12770 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12771 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12772 %rcx, (%r9));
12773+ pax_force_retaddr
12774 ret;
12775 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12776
12777@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12778 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12779 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12780 %rax, (%r9));
12781+ pax_force_retaddr
12782 ret;
12783 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12784
12785@@ -820,6 +823,7 @@ __camellia_enc_blk32:
12786 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12787 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12788
12789+ pax_force_retaddr
12790 ret;
12791
12792 .align 8
12793@@ -905,6 +909,7 @@ __camellia_dec_blk32:
12794 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12795 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12796
12797+ pax_force_retaddr
12798 ret;
12799
12800 .align 8
12801@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
12802
12803 vzeroupper;
12804
12805+ pax_force_retaddr
12806 ret;
12807 ENDPROC(camellia_ecb_enc_32way)
12808
12809@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
12810
12811 vzeroupper;
12812
12813+ pax_force_retaddr
12814 ret;
12815 ENDPROC(camellia_ecb_dec_32way)
12816
12817@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
12818
12819 vzeroupper;
12820
12821+ pax_force_retaddr
12822 ret;
12823 ENDPROC(camellia_cbc_dec_32way)
12824
12825@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
12826
12827 vzeroupper;
12828
12829+ pax_force_retaddr
12830 ret;
12831 ENDPROC(camellia_ctr_32way)
12832
12833@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
12834
12835 vzeroupper;
12836
12837+ pax_force_retaddr
12838 ret;
12839 ENDPROC(camellia_xts_crypt_32way)
12840
12841diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12842index 310319c..db3d7b5 100644
12843--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12844+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12845@@ -21,6 +21,7 @@
12846 */
12847
12848 #include <linux/linkage.h>
12849+#include <asm/alternative-asm.h>
12850
12851 .file "camellia-x86_64-asm_64.S"
12852 .text
12853@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12854 enc_outunpack(mov, RT1);
12855
12856 movq RRBP, %rbp;
12857+ pax_force_retaddr
12858 ret;
12859
12860 .L__enc_xor:
12861 enc_outunpack(xor, RT1);
12862
12863 movq RRBP, %rbp;
12864+ pax_force_retaddr
12865 ret;
12866 ENDPROC(__camellia_enc_blk)
12867
12868@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12869 dec_outunpack();
12870
12871 movq RRBP, %rbp;
12872+ pax_force_retaddr
12873 ret;
12874 ENDPROC(camellia_dec_blk)
12875
12876@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12877
12878 movq RRBP, %rbp;
12879 popq %rbx;
12880+ pax_force_retaddr
12881 ret;
12882
12883 .L__enc2_xor:
12884@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12885
12886 movq RRBP, %rbp;
12887 popq %rbx;
12888+ pax_force_retaddr
12889 ret;
12890 ENDPROC(__camellia_enc_blk_2way)
12891
12892@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12893
12894 movq RRBP, %rbp;
12895 movq RXOR, %rbx;
12896+ pax_force_retaddr
12897 ret;
12898 ENDPROC(camellia_dec_blk_2way)
12899diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12900index c35fd5d..2d8c7db 100644
12901--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12902+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12903@@ -24,6 +24,7 @@
12904 */
12905
12906 #include <linux/linkage.h>
12907+#include <asm/alternative-asm.h>
12908
12909 .file "cast5-avx-x86_64-asm_64.S"
12910
12911@@ -281,6 +282,7 @@ __cast5_enc_blk16:
12912 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12913 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12914
12915+ pax_force_retaddr
12916 ret;
12917 ENDPROC(__cast5_enc_blk16)
12918
12919@@ -352,6 +354,7 @@ __cast5_dec_blk16:
12920 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12921 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12922
12923+ pax_force_retaddr
12924 ret;
12925
12926 .L__skip_dec:
12927@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12928 vmovdqu RR4, (6*4*4)(%r11);
12929 vmovdqu RL4, (7*4*4)(%r11);
12930
12931+ pax_force_retaddr
12932 ret;
12933 ENDPROC(cast5_ecb_enc_16way)
12934
12935@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12936 vmovdqu RR4, (6*4*4)(%r11);
12937 vmovdqu RL4, (7*4*4)(%r11);
12938
12939+ pax_force_retaddr
12940 ret;
12941 ENDPROC(cast5_ecb_dec_16way)
12942
12943@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
12944 * %rdx: src
12945 */
12946
12947- pushq %r12;
12948+ pushq %r14;
12949
12950 movq %rsi, %r11;
12951- movq %rdx, %r12;
12952+ movq %rdx, %r14;
12953
12954 vmovdqu (0*16)(%rdx), RL1;
12955 vmovdqu (1*16)(%rdx), RR1;
12956@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
12957 call __cast5_dec_blk16;
12958
12959 /* xor with src */
12960- vmovq (%r12), RX;
12961+ vmovq (%r14), RX;
12962 vpshufd $0x4f, RX, RX;
12963 vpxor RX, RR1, RR1;
12964- vpxor 0*16+8(%r12), RL1, RL1;
12965- vpxor 1*16+8(%r12), RR2, RR2;
12966- vpxor 2*16+8(%r12), RL2, RL2;
12967- vpxor 3*16+8(%r12), RR3, RR3;
12968- vpxor 4*16+8(%r12), RL3, RL3;
12969- vpxor 5*16+8(%r12), RR4, RR4;
12970- vpxor 6*16+8(%r12), RL4, RL4;
12971+ vpxor 0*16+8(%r14), RL1, RL1;
12972+ vpxor 1*16+8(%r14), RR2, RR2;
12973+ vpxor 2*16+8(%r14), RL2, RL2;
12974+ vpxor 3*16+8(%r14), RR3, RR3;
12975+ vpxor 4*16+8(%r14), RL3, RL3;
12976+ vpxor 5*16+8(%r14), RR4, RR4;
12977+ vpxor 6*16+8(%r14), RL4, RL4;
12978
12979 vmovdqu RR1, (0*16)(%r11);
12980 vmovdqu RL1, (1*16)(%r11);
12981@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
12982 vmovdqu RR4, (6*16)(%r11);
12983 vmovdqu RL4, (7*16)(%r11);
12984
12985- popq %r12;
12986+ popq %r14;
12987
12988+ pax_force_retaddr
12989 ret;
12990 ENDPROC(cast5_cbc_dec_16way)
12991
12992@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
12993 * %rcx: iv (big endian, 64bit)
12994 */
12995
12996- pushq %r12;
12997+ pushq %r14;
12998
12999 movq %rsi, %r11;
13000- movq %rdx, %r12;
13001+ movq %rdx, %r14;
13002
13003 vpcmpeqd RTMP, RTMP, RTMP;
13004 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13005@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13006 call __cast5_enc_blk16;
13007
13008 /* dst = src ^ iv */
13009- vpxor (0*16)(%r12), RR1, RR1;
13010- vpxor (1*16)(%r12), RL1, RL1;
13011- vpxor (2*16)(%r12), RR2, RR2;
13012- vpxor (3*16)(%r12), RL2, RL2;
13013- vpxor (4*16)(%r12), RR3, RR3;
13014- vpxor (5*16)(%r12), RL3, RL3;
13015- vpxor (6*16)(%r12), RR4, RR4;
13016- vpxor (7*16)(%r12), RL4, RL4;
13017+ vpxor (0*16)(%r14), RR1, RR1;
13018+ vpxor (1*16)(%r14), RL1, RL1;
13019+ vpxor (2*16)(%r14), RR2, RR2;
13020+ vpxor (3*16)(%r14), RL2, RL2;
13021+ vpxor (4*16)(%r14), RR3, RR3;
13022+ vpxor (5*16)(%r14), RL3, RL3;
13023+ vpxor (6*16)(%r14), RR4, RR4;
13024+ vpxor (7*16)(%r14), RL4, RL4;
13025 vmovdqu RR1, (0*16)(%r11);
13026 vmovdqu RL1, (1*16)(%r11);
13027 vmovdqu RR2, (2*16)(%r11);
13028@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13029 vmovdqu RR4, (6*16)(%r11);
13030 vmovdqu RL4, (7*16)(%r11);
13031
13032- popq %r12;
13033+ popq %r14;
13034
13035+ pax_force_retaddr
13036 ret;
13037 ENDPROC(cast5_ctr_16way)
13038diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13039index e3531f8..e123f35 100644
13040--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13041+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13042@@ -24,6 +24,7 @@
13043 */
13044
13045 #include <linux/linkage.h>
13046+#include <asm/alternative-asm.h>
13047 #include "glue_helper-asm-avx.S"
13048
13049 .file "cast6-avx-x86_64-asm_64.S"
13050@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13051 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13052 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13053
13054+ pax_force_retaddr
13055 ret;
13056 ENDPROC(__cast6_enc_blk8)
13057
13058@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13059 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13060 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13061
13062+ pax_force_retaddr
13063 ret;
13064 ENDPROC(__cast6_dec_blk8)
13065
13066@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13067
13068 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13069
13070+ pax_force_retaddr
13071 ret;
13072 ENDPROC(cast6_ecb_enc_8way)
13073
13074@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13075
13076 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13077
13078+ pax_force_retaddr
13079 ret;
13080 ENDPROC(cast6_ecb_dec_8way)
13081
13082@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13083 * %rdx: src
13084 */
13085
13086- pushq %r12;
13087+ pushq %r14;
13088
13089 movq %rsi, %r11;
13090- movq %rdx, %r12;
13091+ movq %rdx, %r14;
13092
13093 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13094
13095 call __cast6_dec_blk8;
13096
13097- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13098+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13099
13100- popq %r12;
13101+ popq %r14;
13102
13103+ pax_force_retaddr
13104 ret;
13105 ENDPROC(cast6_cbc_dec_8way)
13106
13107@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13108 * %rcx: iv (little endian, 128bit)
13109 */
13110
13111- pushq %r12;
13112+ pushq %r14;
13113
13114 movq %rsi, %r11;
13115- movq %rdx, %r12;
13116+ movq %rdx, %r14;
13117
13118 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13119 RD2, RX, RKR, RKM);
13120
13121 call __cast6_enc_blk8;
13122
13123- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13124+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13125
13126- popq %r12;
13127+ popq %r14;
13128
13129+ pax_force_retaddr
13130 ret;
13131 ENDPROC(cast6_ctr_8way)
13132
13133@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13134 /* dst <= regs xor IVs(in dst) */
13135 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13136
13137+ pax_force_retaddr
13138 ret;
13139 ENDPROC(cast6_xts_enc_8way)
13140
13141@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13142 /* dst <= regs xor IVs(in dst) */
13143 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13144
13145+ pax_force_retaddr
13146 ret;
13147 ENDPROC(cast6_xts_dec_8way)
13148diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13149index dbc4339..de6e120 100644
13150--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13151+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13152@@ -45,6 +45,7 @@
13153
13154 #include <asm/inst.h>
13155 #include <linux/linkage.h>
13156+#include <asm/alternative-asm.h>
13157
13158 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13159
13160@@ -312,6 +313,7 @@ do_return:
13161 popq %rsi
13162 popq %rdi
13163 popq %rbx
13164+ pax_force_retaddr
13165 ret
13166
13167 ################################################################
13168diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13169index 586f41a..d02851e 100644
13170--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13171+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13172@@ -18,6 +18,7 @@
13173
13174 #include <linux/linkage.h>
13175 #include <asm/inst.h>
13176+#include <asm/alternative-asm.h>
13177
13178 .data
13179
13180@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
13181 psrlq $1, T2
13182 pxor T2, T1
13183 pxor T1, DATA
13184+ pax_force_retaddr
13185 ret
13186 ENDPROC(__clmul_gf128mul_ble)
13187
13188@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
13189 call __clmul_gf128mul_ble
13190 PSHUFB_XMM BSWAP DATA
13191 movups DATA, (%rdi)
13192+ pax_force_retaddr
13193 ret
13194 ENDPROC(clmul_ghash_mul)
13195
13196@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
13197 PSHUFB_XMM BSWAP DATA
13198 movups DATA, (%rdi)
13199 .Lupdate_just_ret:
13200+ pax_force_retaddr
13201 ret
13202 ENDPROC(clmul_ghash_update)
13203
13204@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
13205 pand .Lpoly, %xmm1
13206 pxor %xmm1, %xmm0
13207 movups %xmm0, (%rdi)
13208+ pax_force_retaddr
13209 ret
13210 ENDPROC(clmul_ghash_setkey)
13211diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13212index 9279e0b..c4b3d2c 100644
13213--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13214+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13215@@ -1,4 +1,5 @@
13216 #include <linux/linkage.h>
13217+#include <asm/alternative-asm.h>
13218
13219 # enter salsa20_encrypt_bytes
13220 ENTRY(salsa20_encrypt_bytes)
13221@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13222 add %r11,%rsp
13223 mov %rdi,%rax
13224 mov %rsi,%rdx
13225+ pax_force_retaddr
13226 ret
13227 # bytesatleast65:
13228 ._bytesatleast65:
13229@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13230 add %r11,%rsp
13231 mov %rdi,%rax
13232 mov %rsi,%rdx
13233+ pax_force_retaddr
13234 ret
13235 ENDPROC(salsa20_keysetup)
13236
13237@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13238 add %r11,%rsp
13239 mov %rdi,%rax
13240 mov %rsi,%rdx
13241+ pax_force_retaddr
13242 ret
13243 ENDPROC(salsa20_ivsetup)
13244diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13245index 2f202f4..d9164d6 100644
13246--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13247+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13248@@ -24,6 +24,7 @@
13249 */
13250
13251 #include <linux/linkage.h>
13252+#include <asm/alternative-asm.h>
13253 #include "glue_helper-asm-avx.S"
13254
13255 .file "serpent-avx-x86_64-asm_64.S"
13256@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
13257 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13258 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13259
13260+ pax_force_retaddr
13261 ret;
13262 ENDPROC(__serpent_enc_blk8_avx)
13263
13264@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
13265 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13266 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13267
13268+ pax_force_retaddr
13269 ret;
13270 ENDPROC(__serpent_dec_blk8_avx)
13271
13272@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
13273
13274 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13275
13276+ pax_force_retaddr
13277 ret;
13278 ENDPROC(serpent_ecb_enc_8way_avx)
13279
13280@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
13281
13282 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13283
13284+ pax_force_retaddr
13285 ret;
13286 ENDPROC(serpent_ecb_dec_8way_avx)
13287
13288@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
13289
13290 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13291
13292+ pax_force_retaddr
13293 ret;
13294 ENDPROC(serpent_cbc_dec_8way_avx)
13295
13296@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
13297
13298 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13299
13300+ pax_force_retaddr
13301 ret;
13302 ENDPROC(serpent_ctr_8way_avx)
13303
13304@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
13305 /* dst <= regs xor IVs(in dst) */
13306 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13307
13308+ pax_force_retaddr
13309 ret;
13310 ENDPROC(serpent_xts_enc_8way_avx)
13311
13312@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
13313 /* dst <= regs xor IVs(in dst) */
13314 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13315
13316+ pax_force_retaddr
13317 ret;
13318 ENDPROC(serpent_xts_dec_8way_avx)
13319diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
13320index b222085..abd483c 100644
13321--- a/arch/x86/crypto/serpent-avx2-asm_64.S
13322+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
13323@@ -15,6 +15,7 @@
13324 */
13325
13326 #include <linux/linkage.h>
13327+#include <asm/alternative-asm.h>
13328 #include "glue_helper-asm-avx2.S"
13329
13330 .file "serpent-avx2-asm_64.S"
13331@@ -610,6 +611,7 @@ __serpent_enc_blk16:
13332 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13333 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13334
13335+ pax_force_retaddr
13336 ret;
13337 ENDPROC(__serpent_enc_blk16)
13338
13339@@ -664,6 +666,7 @@ __serpent_dec_blk16:
13340 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13341 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13342
13343+ pax_force_retaddr
13344 ret;
13345 ENDPROC(__serpent_dec_blk16)
13346
13347@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
13348
13349 vzeroupper;
13350
13351+ pax_force_retaddr
13352 ret;
13353 ENDPROC(serpent_ecb_enc_16way)
13354
13355@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
13356
13357 vzeroupper;
13358
13359+ pax_force_retaddr
13360 ret;
13361 ENDPROC(serpent_ecb_dec_16way)
13362
13363@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
13364
13365 vzeroupper;
13366
13367+ pax_force_retaddr
13368 ret;
13369 ENDPROC(serpent_cbc_dec_16way)
13370
13371@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
13372
13373 vzeroupper;
13374
13375+ pax_force_retaddr
13376 ret;
13377 ENDPROC(serpent_ctr_16way)
13378
13379@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
13380
13381 vzeroupper;
13382
13383+ pax_force_retaddr
13384 ret;
13385 ENDPROC(serpent_xts_enc_16way)
13386
13387@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
13388
13389 vzeroupper;
13390
13391+ pax_force_retaddr
13392 ret;
13393 ENDPROC(serpent_xts_dec_16way)
13394diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13395index acc066c..1559cc4 100644
13396--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13397+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13398@@ -25,6 +25,7 @@
13399 */
13400
13401 #include <linux/linkage.h>
13402+#include <asm/alternative-asm.h>
13403
13404 .file "serpent-sse2-x86_64-asm_64.S"
13405 .text
13406@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
13407 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13408 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13409
13410+ pax_force_retaddr
13411 ret;
13412
13413 .L__enc_xor8:
13414 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13415 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13416
13417+ pax_force_retaddr
13418 ret;
13419 ENDPROC(__serpent_enc_blk_8way)
13420
13421@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
13422 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13423 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13424
13425+ pax_force_retaddr
13426 ret;
13427 ENDPROC(serpent_dec_blk_8way)
13428diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
13429index a410950..9dfe7ad 100644
13430--- a/arch/x86/crypto/sha1_ssse3_asm.S
13431+++ b/arch/x86/crypto/sha1_ssse3_asm.S
13432@@ -29,6 +29,7 @@
13433 */
13434
13435 #include <linux/linkage.h>
13436+#include <asm/alternative-asm.h>
13437
13438 #define CTX %rdi // arg1
13439 #define BUF %rsi // arg2
13440@@ -75,9 +76,9 @@
13441
13442 push %rbx
13443 push %rbp
13444- push %r12
13445+ push %r14
13446
13447- mov %rsp, %r12
13448+ mov %rsp, %r14
13449 sub $64, %rsp # allocate workspace
13450 and $~15, %rsp # align stack
13451
13452@@ -99,11 +100,12 @@
13453 xor %rax, %rax
13454 rep stosq
13455
13456- mov %r12, %rsp # deallocate workspace
13457+ mov %r14, %rsp # deallocate workspace
13458
13459- pop %r12
13460+ pop %r14
13461 pop %rbp
13462 pop %rbx
13463+ pax_force_retaddr
13464 ret
13465
13466 ENDPROC(\name)
13467diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
13468index 642f156..51a513c 100644
13469--- a/arch/x86/crypto/sha256-avx-asm.S
13470+++ b/arch/x86/crypto/sha256-avx-asm.S
13471@@ -49,6 +49,7 @@
13472
13473 #ifdef CONFIG_AS_AVX
13474 #include <linux/linkage.h>
13475+#include <asm/alternative-asm.h>
13476
13477 ## assume buffers not aligned
13478 #define VMOVDQ vmovdqu
13479@@ -460,6 +461,7 @@ done_hash:
13480 popq %r13
13481 popq %rbp
13482 popq %rbx
13483+ pax_force_retaddr
13484 ret
13485 ENDPROC(sha256_transform_avx)
13486
13487diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
13488index 9e86944..3795e6a 100644
13489--- a/arch/x86/crypto/sha256-avx2-asm.S
13490+++ b/arch/x86/crypto/sha256-avx2-asm.S
13491@@ -50,6 +50,7 @@
13492
13493 #ifdef CONFIG_AS_AVX2
13494 #include <linux/linkage.h>
13495+#include <asm/alternative-asm.h>
13496
13497 ## assume buffers not aligned
13498 #define VMOVDQ vmovdqu
13499@@ -720,6 +721,7 @@ done_hash:
13500 popq %r12
13501 popq %rbp
13502 popq %rbx
13503+ pax_force_retaddr
13504 ret
13505 ENDPROC(sha256_transform_rorx)
13506
13507diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
13508index f833b74..8c62a9e 100644
13509--- a/arch/x86/crypto/sha256-ssse3-asm.S
13510+++ b/arch/x86/crypto/sha256-ssse3-asm.S
13511@@ -47,6 +47,7 @@
13512 ########################################################################
13513
13514 #include <linux/linkage.h>
13515+#include <asm/alternative-asm.h>
13516
13517 ## assume buffers not aligned
13518 #define MOVDQ movdqu
13519@@ -471,6 +472,7 @@ done_hash:
13520 popq %rbp
13521 popq %rbx
13522
13523+ pax_force_retaddr
13524 ret
13525 ENDPROC(sha256_transform_ssse3)
13526
13527diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
13528index 974dde9..a823ff9 100644
13529--- a/arch/x86/crypto/sha512-avx-asm.S
13530+++ b/arch/x86/crypto/sha512-avx-asm.S
13531@@ -49,6 +49,7 @@
13532
13533 #ifdef CONFIG_AS_AVX
13534 #include <linux/linkage.h>
13535+#include <asm/alternative-asm.h>
13536
13537 .text
13538
13539@@ -364,6 +365,7 @@ updateblock:
13540 mov frame_RSPSAVE(%rsp), %rsp
13541
13542 nowork:
13543+ pax_force_retaddr
13544 ret
13545 ENDPROC(sha512_transform_avx)
13546
13547diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
13548index 568b961..ed20c37 100644
13549--- a/arch/x86/crypto/sha512-avx2-asm.S
13550+++ b/arch/x86/crypto/sha512-avx2-asm.S
13551@@ -51,6 +51,7 @@
13552
13553 #ifdef CONFIG_AS_AVX2
13554 #include <linux/linkage.h>
13555+#include <asm/alternative-asm.h>
13556
13557 .text
13558
13559@@ -678,6 +679,7 @@ done_hash:
13560
13561 # Restore Stack Pointer
13562 mov frame_RSPSAVE(%rsp), %rsp
13563+ pax_force_retaddr
13564 ret
13565 ENDPROC(sha512_transform_rorx)
13566
13567diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
13568index fb56855..6edd768 100644
13569--- a/arch/x86/crypto/sha512-ssse3-asm.S
13570+++ b/arch/x86/crypto/sha512-ssse3-asm.S
13571@@ -48,6 +48,7 @@
13572 ########################################################################
13573
13574 #include <linux/linkage.h>
13575+#include <asm/alternative-asm.h>
13576
13577 .text
13578
13579@@ -363,6 +364,7 @@ updateblock:
13580 mov frame_RSPSAVE(%rsp), %rsp
13581
13582 nowork:
13583+ pax_force_retaddr
13584 ret
13585 ENDPROC(sha512_transform_ssse3)
13586
13587diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13588index 0505813..b067311 100644
13589--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13590+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13591@@ -24,6 +24,7 @@
13592 */
13593
13594 #include <linux/linkage.h>
13595+#include <asm/alternative-asm.h>
13596 #include "glue_helper-asm-avx.S"
13597
13598 .file "twofish-avx-x86_64-asm_64.S"
13599@@ -284,6 +285,7 @@ __twofish_enc_blk8:
13600 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
13601 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
13602
13603+ pax_force_retaddr
13604 ret;
13605 ENDPROC(__twofish_enc_blk8)
13606
13607@@ -324,6 +326,7 @@ __twofish_dec_blk8:
13608 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13609 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13610
13611+ pax_force_retaddr
13612 ret;
13613 ENDPROC(__twofish_dec_blk8)
13614
13615@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13616
13617 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13618
13619+ pax_force_retaddr
13620 ret;
13621 ENDPROC(twofish_ecb_enc_8way)
13622
13623@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13624
13625 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13626
13627+ pax_force_retaddr
13628 ret;
13629 ENDPROC(twofish_ecb_dec_8way)
13630
13631@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
13632 * %rdx: src
13633 */
13634
13635- pushq %r12;
13636+ pushq %r14;
13637
13638 movq %rsi, %r11;
13639- movq %rdx, %r12;
13640+ movq %rdx, %r14;
13641
13642 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13643
13644 call __twofish_dec_blk8;
13645
13646- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13647+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13648
13649- popq %r12;
13650+ popq %r14;
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(twofish_cbc_dec_8way)
13655
13656@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
13657 * %rcx: iv (little endian, 128bit)
13658 */
13659
13660- pushq %r12;
13661+ pushq %r14;
13662
13663 movq %rsi, %r11;
13664- movq %rdx, %r12;
13665+ movq %rdx, %r14;
13666
13667 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13668 RD2, RX0, RX1, RY0);
13669
13670 call __twofish_enc_blk8;
13671
13672- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13673+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13674
13675- popq %r12;
13676+ popq %r14;
13677
13678+ pax_force_retaddr
13679 ret;
13680 ENDPROC(twofish_ctr_8way)
13681
13682@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13683 /* dst <= regs xor IVs(in dst) */
13684 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13685
13686+ pax_force_retaddr
13687 ret;
13688 ENDPROC(twofish_xts_enc_8way)
13689
13690@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13691 /* dst <= regs xor IVs(in dst) */
13692 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13693
13694+ pax_force_retaddr
13695 ret;
13696 ENDPROC(twofish_xts_dec_8way)
13697diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13698index 1c3b7ce..02f578d 100644
13699--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13700+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13701@@ -21,6 +21,7 @@
13702 */
13703
13704 #include <linux/linkage.h>
13705+#include <asm/alternative-asm.h>
13706
13707 .file "twofish-x86_64-asm-3way.S"
13708 .text
13709@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13710 popq %r13;
13711 popq %r14;
13712 popq %r15;
13713+ pax_force_retaddr
13714 ret;
13715
13716 .L__enc_xor3:
13717@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13718 popq %r13;
13719 popq %r14;
13720 popq %r15;
13721+ pax_force_retaddr
13722 ret;
13723 ENDPROC(__twofish_enc_blk_3way)
13724
13725@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13726 popq %r13;
13727 popq %r14;
13728 popq %r15;
13729+ pax_force_retaddr
13730 ret;
13731 ENDPROC(twofish_dec_blk_3way)
13732diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13733index a039d21..524b8b2 100644
13734--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13735+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13736@@ -22,6 +22,7 @@
13737
13738 #include <linux/linkage.h>
13739 #include <asm/asm-offsets.h>
13740+#include <asm/alternative-asm.h>
13741
13742 #define a_offset 0
13743 #define b_offset 4
13744@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13745
13746 popq R1
13747 movq $1,%rax
13748+ pax_force_retaddr
13749 ret
13750 ENDPROC(twofish_enc_blk)
13751
13752@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13753
13754 popq R1
13755 movq $1,%rax
13756+ pax_force_retaddr
13757 ret
13758 ENDPROC(twofish_dec_blk)
13759diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13760index d21ff89..6da8e6e 100644
13761--- a/arch/x86/ia32/ia32_aout.c
13762+++ b/arch/x86/ia32/ia32_aout.c
13763@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
13764 unsigned long dump_start, dump_size;
13765 struct user32 dump;
13766
13767+ memset(&dump, 0, sizeof(dump));
13768+
13769 fs = get_fs();
13770 set_fs(KERNEL_DS);
13771 has_dumped = 1;
13772diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13773index 2206757..85cbcfa 100644
13774--- a/arch/x86/ia32/ia32_signal.c
13775+++ b/arch/x86/ia32/ia32_signal.c
13776@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
13777 if (__get_user(set.sig[0], &frame->sc.oldmask)
13778 || (_COMPAT_NSIG_WORDS > 1
13779 && __copy_from_user((((char *) &set.sig) + 4),
13780- &frame->extramask,
13781+ frame->extramask,
13782 sizeof(frame->extramask))))
13783 goto badframe;
13784
13785@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13786 sp -= frame_size;
13787 /* Align the stack pointer according to the i386 ABI,
13788 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13789- sp = ((sp + 4) & -16ul) - 4;
13790+ sp = ((sp - 12) & -16ul) - 4;
13791 return (void __user *) sp;
13792 }
13793
13794@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13795 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13796 sigreturn);
13797 else
13798- restorer = &frame->retcode;
13799+ restorer = frame->retcode;
13800 }
13801
13802 put_user_try {
13803@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13804 * These are actually not used anymore, but left because some
13805 * gdb versions depend on them as a marker.
13806 */
13807- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13808+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13809 } put_user_catch(err);
13810
13811 if (err)
13812@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13813 0xb8,
13814 __NR_ia32_rt_sigreturn,
13815 0x80cd,
13816- 0,
13817+ 0
13818 };
13819
13820 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13821@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13822
13823 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13824 restorer = ksig->ka.sa.sa_restorer;
13825+ else if (current->mm->context.vdso)
13826+ /* Return stub is in 32bit vsyscall page */
13827+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13828 else
13829- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13830- rt_sigreturn);
13831+ restorer = frame->retcode;
13832 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13833
13834 /*
13835 * Not actually used anymore, but left because some gdb
13836 * versions need it.
13837 */
13838- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13839+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13840 } put_user_catch(err);
13841
13842 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13843diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13844index 4299eb0..c0687a7 100644
13845--- a/arch/x86/ia32/ia32entry.S
13846+++ b/arch/x86/ia32/ia32entry.S
13847@@ -15,8 +15,10 @@
13848 #include <asm/irqflags.h>
13849 #include <asm/asm.h>
13850 #include <asm/smap.h>
13851+#include <asm/pgtable.h>
13852 #include <linux/linkage.h>
13853 #include <linux/err.h>
13854+#include <asm/alternative-asm.h>
13855
13856 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13857 #include <linux/elf-em.h>
13858@@ -62,12 +64,12 @@
13859 */
13860 .macro LOAD_ARGS32 offset, _r9=0
13861 .if \_r9
13862- movl \offset+16(%rsp),%r9d
13863+ movl \offset+R9(%rsp),%r9d
13864 .endif
13865- movl \offset+40(%rsp),%ecx
13866- movl \offset+48(%rsp),%edx
13867- movl \offset+56(%rsp),%esi
13868- movl \offset+64(%rsp),%edi
13869+ movl \offset+RCX(%rsp),%ecx
13870+ movl \offset+RDX(%rsp),%edx
13871+ movl \offset+RSI(%rsp),%esi
13872+ movl \offset+RDI(%rsp),%edi
13873 movl %eax,%eax /* zero extension */
13874 .endm
13875
13876@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13877 ENDPROC(native_irq_enable_sysexit)
13878 #endif
13879
13880+ .macro pax_enter_kernel_user
13881+ pax_set_fptr_mask
13882+#ifdef CONFIG_PAX_MEMORY_UDEREF
13883+ call pax_enter_kernel_user
13884+#endif
13885+ .endm
13886+
13887+ .macro pax_exit_kernel_user
13888+#ifdef CONFIG_PAX_MEMORY_UDEREF
13889+ call pax_exit_kernel_user
13890+#endif
13891+#ifdef CONFIG_PAX_RANDKSTACK
13892+ pushq %rax
13893+ pushq %r11
13894+ call pax_randomize_kstack
13895+ popq %r11
13896+ popq %rax
13897+#endif
13898+ .endm
13899+
13900+ .macro pax_erase_kstack
13901+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13902+ call pax_erase_kstack
13903+#endif
13904+ .endm
13905+
13906 /*
13907 * 32bit SYSENTER instruction entry.
13908 *
13909@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13910 CFI_REGISTER rsp,rbp
13911 SWAPGS_UNSAFE_STACK
13912 movq PER_CPU_VAR(kernel_stack), %rsp
13913- addq $(KERNEL_STACK_OFFSET),%rsp
13914- /*
13915- * No need to follow this irqs on/off section: the syscall
13916- * disabled irqs, here we enable it straight after entry:
13917- */
13918- ENABLE_INTERRUPTS(CLBR_NONE)
13919 movl %ebp,%ebp /* zero extension */
13920 pushq_cfi $__USER32_DS
13921 /*CFI_REL_OFFSET ss,0*/
13922@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13923 CFI_REL_OFFSET rsp,0
13924 pushfq_cfi
13925 /*CFI_REL_OFFSET rflags,0*/
13926- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13927- CFI_REGISTER rip,r10
13928+ orl $X86_EFLAGS_IF,(%rsp)
13929+ GET_THREAD_INFO(%r11)
13930+ movl TI_sysenter_return(%r11), %r11d
13931+ CFI_REGISTER rip,r11
13932 pushq_cfi $__USER32_CS
13933 /*CFI_REL_OFFSET cs,0*/
13934 movl %eax, %eax
13935- pushq_cfi %r10
13936+ pushq_cfi %r11
13937 CFI_REL_OFFSET rip,0
13938 pushq_cfi %rax
13939 cld
13940 SAVE_ARGS 0,1,0
13941+ pax_enter_kernel_user
13942+
13943+#ifdef CONFIG_PAX_RANDKSTACK
13944+ pax_erase_kstack
13945+#endif
13946+
13947+ /*
13948+ * No need to follow this irqs on/off section: the syscall
13949+ * disabled irqs, here we enable it straight after entry:
13950+ */
13951+ ENABLE_INTERRUPTS(CLBR_NONE)
13952 /* no need to do an access_ok check here because rbp has been
13953 32bit zero extended */
13954+
13955+#ifdef CONFIG_PAX_MEMORY_UDEREF
13956+ addq pax_user_shadow_base,%rbp
13957+ ASM_PAX_OPEN_USERLAND
13958+#endif
13959+
13960 ASM_STAC
13961 1: movl (%rbp),%ebp
13962 _ASM_EXTABLE(1b,ia32_badarg)
13963 ASM_CLAC
13964- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13965- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13966+
13967+#ifdef CONFIG_PAX_MEMORY_UDEREF
13968+ ASM_PAX_CLOSE_USERLAND
13969+#endif
13970+
13971+ GET_THREAD_INFO(%r11)
13972+ orl $TS_COMPAT,TI_status(%r11)
13973+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13974 CFI_REMEMBER_STATE
13975 jnz sysenter_tracesys
13976 cmpq $(IA32_NR_syscalls-1),%rax
13977@@ -162,15 +209,18 @@ sysenter_do_call:
13978 sysenter_dispatch:
13979 call *ia32_sys_call_table(,%rax,8)
13980 movq %rax,RAX-ARGOFFSET(%rsp)
13981+ GET_THREAD_INFO(%r11)
13982 DISABLE_INTERRUPTS(CLBR_NONE)
13983 TRACE_IRQS_OFF
13984- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13985+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13986 jnz sysexit_audit
13987 sysexit_from_sys_call:
13988- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13989+ pax_exit_kernel_user
13990+ pax_erase_kstack
13991+ andl $~TS_COMPAT,TI_status(%r11)
13992 /* clear IF, that popfq doesn't enable interrupts early */
13993- andl $~0x200,EFLAGS-R11(%rsp)
13994- movl RIP-R11(%rsp),%edx /* User %eip */
13995+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
13996+ movl RIP(%rsp),%edx /* User %eip */
13997 CFI_REGISTER rip,rdx
13998 RESTORE_ARGS 0,24,0,0,0,0
13999 xorq %r8,%r8
14000@@ -193,6 +243,9 @@ sysexit_from_sys_call:
14001 movl %eax,%esi /* 2nd arg: syscall number */
14002 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
14003 call __audit_syscall_entry
14004+
14005+ pax_erase_kstack
14006+
14007 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14008 cmpq $(IA32_NR_syscalls-1),%rax
14009 ja ia32_badsys
14010@@ -204,7 +257,7 @@ sysexit_from_sys_call:
14011 .endm
14012
14013 .macro auditsys_exit exit
14014- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14015+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14016 jnz ia32_ret_from_sys_call
14017 TRACE_IRQS_ON
14018 ENABLE_INTERRUPTS(CLBR_NONE)
14019@@ -215,11 +268,12 @@ sysexit_from_sys_call:
14020 1: setbe %al /* 1 if error, 0 if not */
14021 movzbl %al,%edi /* zero-extend that into %edi */
14022 call __audit_syscall_exit
14023+ GET_THREAD_INFO(%r11)
14024 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14025 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14026 DISABLE_INTERRUPTS(CLBR_NONE)
14027 TRACE_IRQS_OFF
14028- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14029+ testl %edi,TI_flags(%r11)
14030 jz \exit
14031 CLEAR_RREGS -ARGOFFSET
14032 jmp int_with_check
14033@@ -237,7 +291,7 @@ sysexit_audit:
14034
14035 sysenter_tracesys:
14036 #ifdef CONFIG_AUDITSYSCALL
14037- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14038+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14039 jz sysenter_auditsys
14040 #endif
14041 SAVE_REST
14042@@ -249,6 +303,9 @@ sysenter_tracesys:
14043 RESTORE_REST
14044 cmpq $(IA32_NR_syscalls-1),%rax
14045 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14046+
14047+ pax_erase_kstack
14048+
14049 jmp sysenter_do_call
14050 CFI_ENDPROC
14051 ENDPROC(ia32_sysenter_target)
14052@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14053 ENTRY(ia32_cstar_target)
14054 CFI_STARTPROC32 simple
14055 CFI_SIGNAL_FRAME
14056- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14057+ CFI_DEF_CFA rsp,0
14058 CFI_REGISTER rip,rcx
14059 /*CFI_REGISTER rflags,r11*/
14060 SWAPGS_UNSAFE_STACK
14061 movl %esp,%r8d
14062 CFI_REGISTER rsp,r8
14063 movq PER_CPU_VAR(kernel_stack),%rsp
14064+ SAVE_ARGS 8*6,0,0
14065+ pax_enter_kernel_user
14066+
14067+#ifdef CONFIG_PAX_RANDKSTACK
14068+ pax_erase_kstack
14069+#endif
14070+
14071 /*
14072 * No need to follow this irqs on/off section: the syscall
14073 * disabled irqs and here we enable it straight after entry:
14074 */
14075 ENABLE_INTERRUPTS(CLBR_NONE)
14076- SAVE_ARGS 8,0,0
14077 movl %eax,%eax /* zero extension */
14078 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14079 movq %rcx,RIP-ARGOFFSET(%rsp)
14080@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
14081 /* no need to do an access_ok check here because r8 has been
14082 32bit zero extended */
14083 /* hardware stack frame is complete now */
14084+
14085+#ifdef CONFIG_PAX_MEMORY_UDEREF
14086+ ASM_PAX_OPEN_USERLAND
14087+ movq pax_user_shadow_base,%r8
14088+ addq RSP-ARGOFFSET(%rsp),%r8
14089+#endif
14090+
14091 ASM_STAC
14092 1: movl (%r8),%r9d
14093 _ASM_EXTABLE(1b,ia32_badarg)
14094 ASM_CLAC
14095- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14096- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14097+
14098+#ifdef CONFIG_PAX_MEMORY_UDEREF
14099+ ASM_PAX_CLOSE_USERLAND
14100+#endif
14101+
14102+ GET_THREAD_INFO(%r11)
14103+ orl $TS_COMPAT,TI_status(%r11)
14104+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14105 CFI_REMEMBER_STATE
14106 jnz cstar_tracesys
14107 cmpq $IA32_NR_syscalls-1,%rax
14108@@ -319,13 +395,16 @@ cstar_do_call:
14109 cstar_dispatch:
14110 call *ia32_sys_call_table(,%rax,8)
14111 movq %rax,RAX-ARGOFFSET(%rsp)
14112+ GET_THREAD_INFO(%r11)
14113 DISABLE_INTERRUPTS(CLBR_NONE)
14114 TRACE_IRQS_OFF
14115- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14116+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14117 jnz sysretl_audit
14118 sysretl_from_sys_call:
14119- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14120- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14121+ pax_exit_kernel_user
14122+ pax_erase_kstack
14123+ andl $~TS_COMPAT,TI_status(%r11)
14124+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14125 movl RIP-ARGOFFSET(%rsp),%ecx
14126 CFI_REGISTER rip,rcx
14127 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14128@@ -352,7 +431,7 @@ sysretl_audit:
14129
14130 cstar_tracesys:
14131 #ifdef CONFIG_AUDITSYSCALL
14132- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14133+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14134 jz cstar_auditsys
14135 #endif
14136 xchgl %r9d,%ebp
14137@@ -366,11 +445,19 @@ cstar_tracesys:
14138 xchgl %ebp,%r9d
14139 cmpq $(IA32_NR_syscalls-1),%rax
14140 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14141+
14142+ pax_erase_kstack
14143+
14144 jmp cstar_do_call
14145 END(ia32_cstar_target)
14146
14147 ia32_badarg:
14148 ASM_CLAC
14149+
14150+#ifdef CONFIG_PAX_MEMORY_UDEREF
14151+ ASM_PAX_CLOSE_USERLAND
14152+#endif
14153+
14154 movq $-EFAULT,%rax
14155 jmp ia32_sysret
14156 CFI_ENDPROC
14157@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
14158 CFI_REL_OFFSET rip,RIP-RIP
14159 PARAVIRT_ADJUST_EXCEPTION_FRAME
14160 SWAPGS
14161- /*
14162- * No need to follow this irqs on/off section: the syscall
14163- * disabled irqs and here we enable it straight after entry:
14164- */
14165- ENABLE_INTERRUPTS(CLBR_NONE)
14166 movl %eax,%eax
14167 pushq_cfi %rax
14168 cld
14169 /* note the registers are not zero extended to the sf.
14170 this could be a problem. */
14171 SAVE_ARGS 0,1,0
14172- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14173- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14174+ pax_enter_kernel_user
14175+
14176+#ifdef CONFIG_PAX_RANDKSTACK
14177+ pax_erase_kstack
14178+#endif
14179+
14180+ /*
14181+ * No need to follow this irqs on/off section: the syscall
14182+ * disabled irqs and here we enable it straight after entry:
14183+ */
14184+ ENABLE_INTERRUPTS(CLBR_NONE)
14185+ GET_THREAD_INFO(%r11)
14186+ orl $TS_COMPAT,TI_status(%r11)
14187+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14188 jnz ia32_tracesys
14189 cmpq $(IA32_NR_syscalls-1),%rax
14190 ja ia32_badsys
14191@@ -442,6 +536,9 @@ ia32_tracesys:
14192 RESTORE_REST
14193 cmpq $(IA32_NR_syscalls-1),%rax
14194 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14195+
14196+ pax_erase_kstack
14197+
14198 jmp ia32_do_call
14199 END(ia32_syscall)
14200
14201diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14202index 8e0ceec..af13504 100644
14203--- a/arch/x86/ia32/sys_ia32.c
14204+++ b/arch/x86/ia32/sys_ia32.c
14205@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14206 */
14207 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14208 {
14209- typeof(ubuf->st_uid) uid = 0;
14210- typeof(ubuf->st_gid) gid = 0;
14211+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
14212+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
14213 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14214 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14215 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14216diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14217index 372231c..51b537d 100644
14218--- a/arch/x86/include/asm/alternative-asm.h
14219+++ b/arch/x86/include/asm/alternative-asm.h
14220@@ -18,6 +18,45 @@
14221 .endm
14222 #endif
14223
14224+#ifdef KERNEXEC_PLUGIN
14225+ .macro pax_force_retaddr_bts rip=0
14226+ btsq $63,\rip(%rsp)
14227+ .endm
14228+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14229+ .macro pax_force_retaddr rip=0, reload=0
14230+ btsq $63,\rip(%rsp)
14231+ .endm
14232+ .macro pax_force_fptr ptr
14233+ btsq $63,\ptr
14234+ .endm
14235+ .macro pax_set_fptr_mask
14236+ .endm
14237+#endif
14238+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14239+ .macro pax_force_retaddr rip=0, reload=0
14240+ .if \reload
14241+ pax_set_fptr_mask
14242+ .endif
14243+ orq %r12,\rip(%rsp)
14244+ .endm
14245+ .macro pax_force_fptr ptr
14246+ orq %r12,\ptr
14247+ .endm
14248+ .macro pax_set_fptr_mask
14249+ movabs $0x8000000000000000,%r12
14250+ .endm
14251+#endif
14252+#else
14253+ .macro pax_force_retaddr rip=0, reload=0
14254+ .endm
14255+ .macro pax_force_fptr ptr
14256+ .endm
14257+ .macro pax_force_retaddr_bts rip=0
14258+ .endm
14259+ .macro pax_set_fptr_mask
14260+ .endm
14261+#endif
14262+
14263 .macro altinstruction_entry orig alt feature orig_len alt_len
14264 .long \orig - .
14265 .long \alt - .
14266diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
14267index 0a3f9c9..c9d081d 100644
14268--- a/arch/x86/include/asm/alternative.h
14269+++ b/arch/x86/include/asm/alternative.h
14270@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14271 ".pushsection .discard,\"aw\",@progbits\n" \
14272 DISCARD_ENTRY(1) \
14273 ".popsection\n" \
14274- ".pushsection .altinstr_replacement, \"ax\"\n" \
14275+ ".pushsection .altinstr_replacement, \"a\"\n" \
14276 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
14277 ".popsection"
14278
14279@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14280 DISCARD_ENTRY(1) \
14281 DISCARD_ENTRY(2) \
14282 ".popsection\n" \
14283- ".pushsection .altinstr_replacement, \"ax\"\n" \
14284+ ".pushsection .altinstr_replacement, \"a\"\n" \
14285 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
14286 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
14287 ".popsection"
14288diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
14289index 1d2091a..f5074c1 100644
14290--- a/arch/x86/include/asm/apic.h
14291+++ b/arch/x86/include/asm/apic.h
14292@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
14293
14294 #ifdef CONFIG_X86_LOCAL_APIC
14295
14296-extern unsigned int apic_verbosity;
14297+extern int apic_verbosity;
14298 extern int local_apic_timer_c2_ok;
14299
14300 extern int disable_apic;
14301diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
14302index 20370c6..a2eb9b0 100644
14303--- a/arch/x86/include/asm/apm.h
14304+++ b/arch/x86/include/asm/apm.h
14305@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
14306 __asm__ __volatile__(APM_DO_ZERO_SEGS
14307 "pushl %%edi\n\t"
14308 "pushl %%ebp\n\t"
14309- "lcall *%%cs:apm_bios_entry\n\t"
14310+ "lcall *%%ss:apm_bios_entry\n\t"
14311 "setc %%al\n\t"
14312 "popl %%ebp\n\t"
14313 "popl %%edi\n\t"
14314@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
14315 __asm__ __volatile__(APM_DO_ZERO_SEGS
14316 "pushl %%edi\n\t"
14317 "pushl %%ebp\n\t"
14318- "lcall *%%cs:apm_bios_entry\n\t"
14319+ "lcall *%%ss:apm_bios_entry\n\t"
14320 "setc %%bl\n\t"
14321 "popl %%ebp\n\t"
14322 "popl %%edi\n\t"
14323diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
14324index b17f4f4..9620151 100644
14325--- a/arch/x86/include/asm/atomic.h
14326+++ b/arch/x86/include/asm/atomic.h
14327@@ -23,7 +23,18 @@
14328 */
14329 static inline int atomic_read(const atomic_t *v)
14330 {
14331- return (*(volatile int *)&(v)->counter);
14332+ return (*(volatile const int *)&(v)->counter);
14333+}
14334+
14335+/**
14336+ * atomic_read_unchecked - read atomic variable
14337+ * @v: pointer of type atomic_unchecked_t
14338+ *
14339+ * Atomically reads the value of @v.
14340+ */
14341+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
14342+{
14343+ return (*(volatile const int *)&(v)->counter);
14344 }
14345
14346 /**
14347@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i)
14348 }
14349
14350 /**
14351+ * atomic_set_unchecked - set atomic variable
14352+ * @v: pointer of type atomic_unchecked_t
14353+ * @i: required value
14354+ *
14355+ * Atomically sets the value of @v to @i.
14356+ */
14357+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
14358+{
14359+ v->counter = i;
14360+}
14361+
14362+/**
14363 * atomic_add - add integer to atomic variable
14364 * @i: integer value to add
14365 * @v: pointer of type atomic_t
14366@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i)
14367 */
14368 static inline void atomic_add(int i, atomic_t *v)
14369 {
14370- asm volatile(LOCK_PREFIX "addl %1,%0"
14371+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14372+
14373+#ifdef CONFIG_PAX_REFCOUNT
14374+ "jno 0f\n"
14375+ LOCK_PREFIX "subl %1,%0\n"
14376+ "int $4\n0:\n"
14377+ _ASM_EXTABLE(0b, 0b)
14378+#endif
14379+
14380+ : "+m" (v->counter)
14381+ : "ir" (i));
14382+}
14383+
14384+/**
14385+ * atomic_add_unchecked - add integer to atomic variable
14386+ * @i: integer value to add
14387+ * @v: pointer of type atomic_unchecked_t
14388+ *
14389+ * Atomically adds @i to @v.
14390+ */
14391+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
14392+{
14393+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14394 : "+m" (v->counter)
14395 : "ir" (i));
14396 }
14397@@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v)
14398 */
14399 static inline void atomic_sub(int i, atomic_t *v)
14400 {
14401- asm volatile(LOCK_PREFIX "subl %1,%0"
14402+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14403+
14404+#ifdef CONFIG_PAX_REFCOUNT
14405+ "jno 0f\n"
14406+ LOCK_PREFIX "addl %1,%0\n"
14407+ "int $4\n0:\n"
14408+ _ASM_EXTABLE(0b, 0b)
14409+#endif
14410+
14411+ : "+m" (v->counter)
14412+ : "ir" (i));
14413+}
14414+
14415+/**
14416+ * atomic_sub_unchecked - subtract integer from atomic variable
14417+ * @i: integer value to subtract
14418+ * @v: pointer of type atomic_unchecked_t
14419+ *
14420+ * Atomically subtracts @i from @v.
14421+ */
14422+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
14423+{
14424+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14425 : "+m" (v->counter)
14426 : "ir" (i));
14427 }
14428@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v)
14429 */
14430 static inline int atomic_sub_and_test(int i, atomic_t *v)
14431 {
14432- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
14433+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
14434 }
14435
14436 /**
14437@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14438 */
14439 static inline void atomic_inc(atomic_t *v)
14440 {
14441- asm volatile(LOCK_PREFIX "incl %0"
14442+ asm volatile(LOCK_PREFIX "incl %0\n"
14443+
14444+#ifdef CONFIG_PAX_REFCOUNT
14445+ "jno 0f\n"
14446+ LOCK_PREFIX "decl %0\n"
14447+ "int $4\n0:\n"
14448+ _ASM_EXTABLE(0b, 0b)
14449+#endif
14450+
14451+ : "+m" (v->counter));
14452+}
14453+
14454+/**
14455+ * atomic_inc_unchecked - increment atomic variable
14456+ * @v: pointer of type atomic_unchecked_t
14457+ *
14458+ * Atomically increments @v by 1.
14459+ */
14460+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
14461+{
14462+ asm volatile(LOCK_PREFIX "incl %0\n"
14463 : "+m" (v->counter));
14464 }
14465
14466@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v)
14467 */
14468 static inline void atomic_dec(atomic_t *v)
14469 {
14470- asm volatile(LOCK_PREFIX "decl %0"
14471+ asm volatile(LOCK_PREFIX "decl %0\n"
14472+
14473+#ifdef CONFIG_PAX_REFCOUNT
14474+ "jno 0f\n"
14475+ LOCK_PREFIX "incl %0\n"
14476+ "int $4\n0:\n"
14477+ _ASM_EXTABLE(0b, 0b)
14478+#endif
14479+
14480+ : "+m" (v->counter));
14481+}
14482+
14483+/**
14484+ * atomic_dec_unchecked - decrement atomic variable
14485+ * @v: pointer of type atomic_unchecked_t
14486+ *
14487+ * Atomically decrements @v by 1.
14488+ */
14489+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
14490+{
14491+ asm volatile(LOCK_PREFIX "decl %0\n"
14492 : "+m" (v->counter));
14493 }
14494
14495@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v)
14496 */
14497 static inline int atomic_dec_and_test(atomic_t *v)
14498 {
14499- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
14500+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
14501 }
14502
14503 /**
14504@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
14505 */
14506 static inline int atomic_inc_and_test(atomic_t *v)
14507 {
14508- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
14509+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
14510+}
14511+
14512+/**
14513+ * atomic_inc_and_test_unchecked - increment and test
14514+ * @v: pointer of type atomic_unchecked_t
14515+ *
14516+ * Atomically increments @v by 1
14517+ * and returns true if the result is zero, or false for all
14518+ * other cases.
14519+ */
14520+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
14521+{
14522+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
14523 }
14524
14525 /**
14526@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
14527 */
14528 static inline int atomic_add_negative(int i, atomic_t *v)
14529 {
14530- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
14531+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
14532 }
14533
14534 /**
14535@@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
14536 */
14537 static inline int atomic_add_return(int i, atomic_t *v)
14538 {
14539+ return i + xadd_check_overflow(&v->counter, i);
14540+}
14541+
14542+/**
14543+ * atomic_add_return_unchecked - add integer and return
14544+ * @i: integer value to add
14545+ * @v: pointer of type atomic_unchecked_t
14546+ *
14547+ * Atomically adds @i to @v and returns @i + @v
14548+ */
14549+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
14550+{
14551 return i + xadd(&v->counter, i);
14552 }
14553
14554@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
14555 }
14556
14557 #define atomic_inc_return(v) (atomic_add_return(1, v))
14558+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
14559+{
14560+ return atomic_add_return_unchecked(1, v);
14561+}
14562 #define atomic_dec_return(v) (atomic_sub_return(1, v))
14563
14564-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14565+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
14566+{
14567+ return cmpxchg(&v->counter, old, new);
14568+}
14569+
14570+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14571 {
14572 return cmpxchg(&v->counter, old, new);
14573 }
14574@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
14575 return xchg(&v->counter, new);
14576 }
14577
14578+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14579+{
14580+ return xchg(&v->counter, new);
14581+}
14582+
14583 /**
14584 * __atomic_add_unless - add unless the number is already a given value
14585 * @v: pointer of type atomic_t
14586@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
14587 * Atomically adds @a to @v, so long as @v was not already @u.
14588 * Returns the old value of @v.
14589 */
14590-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14591+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
14592 {
14593- int c, old;
14594+ int c, old, new;
14595 c = atomic_read(v);
14596 for (;;) {
14597- if (unlikely(c == (u)))
14598+ if (unlikely(c == u))
14599 break;
14600- old = atomic_cmpxchg((v), c, c + (a));
14601+
14602+ asm volatile("addl %2,%0\n"
14603+
14604+#ifdef CONFIG_PAX_REFCOUNT
14605+ "jno 0f\n"
14606+ "subl %2,%0\n"
14607+ "int $4\n0:\n"
14608+ _ASM_EXTABLE(0b, 0b)
14609+#endif
14610+
14611+ : "=r" (new)
14612+ : "0" (c), "ir" (a));
14613+
14614+ old = atomic_cmpxchg(v, c, new);
14615 if (likely(old == c))
14616 break;
14617 c = old;
14618@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14619 }
14620
14621 /**
14622+ * atomic_inc_not_zero_hint - increment if not null
14623+ * @v: pointer of type atomic_t
14624+ * @hint: probable value of the atomic before the increment
14625+ *
14626+ * This version of atomic_inc_not_zero() gives a hint of probable
14627+ * value of the atomic. This helps processor to not read the memory
14628+ * before doing the atomic read/modify/write cycle, lowering
14629+ * number of bus transactions on some arches.
14630+ *
14631+ * Returns: 0 if increment was not done, 1 otherwise.
14632+ */
14633+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14634+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14635+{
14636+ int val, c = hint, new;
14637+
14638+ /* sanity test, should be removed by compiler if hint is a constant */
14639+ if (!hint)
14640+ return __atomic_add_unless(v, 1, 0);
14641+
14642+ do {
14643+ asm volatile("incl %0\n"
14644+
14645+#ifdef CONFIG_PAX_REFCOUNT
14646+ "jno 0f\n"
14647+ "decl %0\n"
14648+ "int $4\n0:\n"
14649+ _ASM_EXTABLE(0b, 0b)
14650+#endif
14651+
14652+ : "=r" (new)
14653+ : "0" (c));
14654+
14655+ val = atomic_cmpxchg(v, c, new);
14656+ if (val == c)
14657+ return 1;
14658+ c = val;
14659+ } while (c);
14660+
14661+ return 0;
14662+}
14663+
14664+/**
14665 * atomic_inc_short - increment of a short integer
14666 * @v: pointer to type int
14667 *
14668@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14669 #endif
14670
14671 /* These are x86-specific, used by some header files */
14672-#define atomic_clear_mask(mask, addr) \
14673- asm volatile(LOCK_PREFIX "andl %0,%1" \
14674- : : "r" (~(mask)), "m" (*(addr)) : "memory")
14675+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14676+{
14677+ asm volatile(LOCK_PREFIX "andl %1,%0"
14678+ : "+m" (v->counter)
14679+ : "r" (~(mask))
14680+ : "memory");
14681+}
14682
14683-#define atomic_set_mask(mask, addr) \
14684- asm volatile(LOCK_PREFIX "orl %0,%1" \
14685- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14686- : "memory")
14687+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14688+{
14689+ asm volatile(LOCK_PREFIX "andl %1,%0"
14690+ : "+m" (v->counter)
14691+ : "r" (~(mask))
14692+ : "memory");
14693+}
14694+
14695+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14696+{
14697+ asm volatile(LOCK_PREFIX "orl %1,%0"
14698+ : "+m" (v->counter)
14699+ : "r" (mask)
14700+ : "memory");
14701+}
14702+
14703+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14704+{
14705+ asm volatile(LOCK_PREFIX "orl %1,%0"
14706+ : "+m" (v->counter)
14707+ : "r" (mask)
14708+ : "memory");
14709+}
14710
14711 /* Atomic operations are already serializing on x86 */
14712 #define smp_mb__before_atomic_dec() barrier()
14713diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14714index b154de7..bf18a5a 100644
14715--- a/arch/x86/include/asm/atomic64_32.h
14716+++ b/arch/x86/include/asm/atomic64_32.h
14717@@ -12,6 +12,14 @@ typedef struct {
14718 u64 __aligned(8) counter;
14719 } atomic64_t;
14720
14721+#ifdef CONFIG_PAX_REFCOUNT
14722+typedef struct {
14723+ u64 __aligned(8) counter;
14724+} atomic64_unchecked_t;
14725+#else
14726+typedef atomic64_t atomic64_unchecked_t;
14727+#endif
14728+
14729 #define ATOMIC64_INIT(val) { (val) }
14730
14731 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14732@@ -37,21 +45,31 @@ typedef struct {
14733 ATOMIC64_DECL_ONE(sym##_386)
14734
14735 ATOMIC64_DECL_ONE(add_386);
14736+ATOMIC64_DECL_ONE(add_unchecked_386);
14737 ATOMIC64_DECL_ONE(sub_386);
14738+ATOMIC64_DECL_ONE(sub_unchecked_386);
14739 ATOMIC64_DECL_ONE(inc_386);
14740+ATOMIC64_DECL_ONE(inc_unchecked_386);
14741 ATOMIC64_DECL_ONE(dec_386);
14742+ATOMIC64_DECL_ONE(dec_unchecked_386);
14743 #endif
14744
14745 #define alternative_atomic64(f, out, in...) \
14746 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14747
14748 ATOMIC64_DECL(read);
14749+ATOMIC64_DECL(read_unchecked);
14750 ATOMIC64_DECL(set);
14751+ATOMIC64_DECL(set_unchecked);
14752 ATOMIC64_DECL(xchg);
14753 ATOMIC64_DECL(add_return);
14754+ATOMIC64_DECL(add_return_unchecked);
14755 ATOMIC64_DECL(sub_return);
14756+ATOMIC64_DECL(sub_return_unchecked);
14757 ATOMIC64_DECL(inc_return);
14758+ATOMIC64_DECL(inc_return_unchecked);
14759 ATOMIC64_DECL(dec_return);
14760+ATOMIC64_DECL(dec_return_unchecked);
14761 ATOMIC64_DECL(dec_if_positive);
14762 ATOMIC64_DECL(inc_not_zero);
14763 ATOMIC64_DECL(add_unless);
14764@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14765 }
14766
14767 /**
14768+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14769+ * @p: pointer to type atomic64_unchecked_t
14770+ * @o: expected value
14771+ * @n: new value
14772+ *
14773+ * Atomically sets @v to @n if it was equal to @o and returns
14774+ * the old value.
14775+ */
14776+
14777+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14778+{
14779+ return cmpxchg64(&v->counter, o, n);
14780+}
14781+
14782+/**
14783 * atomic64_xchg - xchg atomic64 variable
14784 * @v: pointer to type atomic64_t
14785 * @n: value to assign
14786@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14787 }
14788
14789 /**
14790+ * atomic64_set_unchecked - set atomic64 variable
14791+ * @v: pointer to type atomic64_unchecked_t
14792+ * @n: value to assign
14793+ *
14794+ * Atomically sets the value of @v to @n.
14795+ */
14796+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14797+{
14798+ unsigned high = (unsigned)(i >> 32);
14799+ unsigned low = (unsigned)i;
14800+ alternative_atomic64(set, /* no output */,
14801+ "S" (v), "b" (low), "c" (high)
14802+ : "eax", "edx", "memory");
14803+}
14804+
14805+/**
14806 * atomic64_read - read atomic64 variable
14807 * @v: pointer to type atomic64_t
14808 *
14809@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14810 }
14811
14812 /**
14813+ * atomic64_read_unchecked - read atomic64 variable
14814+ * @v: pointer to type atomic64_unchecked_t
14815+ *
14816+ * Atomically reads the value of @v and returns it.
14817+ */
14818+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
14819+{
14820+ long long r;
14821+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14822+ return r;
14823+ }
14824+
14825+/**
14826 * atomic64_add_return - add and return
14827 * @i: integer value to add
14828 * @v: pointer to type atomic64_t
14829@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14830 return i;
14831 }
14832
14833+/**
14834+ * atomic64_add_return_unchecked - add and return
14835+ * @i: integer value to add
14836+ * @v: pointer to type atomic64_unchecked_t
14837+ *
14838+ * Atomically adds @i to @v and returns @i + *@v
14839+ */
14840+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14841+{
14842+ alternative_atomic64(add_return_unchecked,
14843+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14844+ ASM_NO_INPUT_CLOBBER("memory"));
14845+ return i;
14846+}
14847+
14848 /*
14849 * Other variants with different arithmetic operators:
14850 */
14851@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14852 return a;
14853 }
14854
14855+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14856+{
14857+ long long a;
14858+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
14859+ "S" (v) : "memory", "ecx");
14860+ return a;
14861+}
14862+
14863 static inline long long atomic64_dec_return(atomic64_t *v)
14864 {
14865 long long a;
14866@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14867 }
14868
14869 /**
14870+ * atomic64_add_unchecked - add integer to atomic64 variable
14871+ * @i: integer value to add
14872+ * @v: pointer to type atomic64_unchecked_t
14873+ *
14874+ * Atomically adds @i to @v.
14875+ */
14876+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14877+{
14878+ __alternative_atomic64(add_unchecked, add_return_unchecked,
14879+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14880+ ASM_NO_INPUT_CLOBBER("memory"));
14881+ return i;
14882+}
14883+
14884+/**
14885 * atomic64_sub - subtract the atomic64 variable
14886 * @i: integer value to subtract
14887 * @v: pointer to type atomic64_t
14888diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14889index 46e9052..ae45136 100644
14890--- a/arch/x86/include/asm/atomic64_64.h
14891+++ b/arch/x86/include/asm/atomic64_64.h
14892@@ -18,7 +18,19 @@
14893 */
14894 static inline long atomic64_read(const atomic64_t *v)
14895 {
14896- return (*(volatile long *)&(v)->counter);
14897+ return (*(volatile const long *)&(v)->counter);
14898+}
14899+
14900+/**
14901+ * atomic64_read_unchecked - read atomic64 variable
14902+ * @v: pointer of type atomic64_unchecked_t
14903+ *
14904+ * Atomically reads the value of @v.
14905+ * Doesn't imply a read memory barrier.
14906+ */
14907+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
14908+{
14909+ return (*(volatile const long *)&(v)->counter);
14910 }
14911
14912 /**
14913@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14914 }
14915
14916 /**
14917+ * atomic64_set_unchecked - set atomic64 variable
14918+ * @v: pointer to type atomic64_unchecked_t
14919+ * @i: required value
14920+ *
14921+ * Atomically sets the value of @v to @i.
14922+ */
14923+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14924+{
14925+ v->counter = i;
14926+}
14927+
14928+/**
14929 * atomic64_add - add integer to atomic64 variable
14930 * @i: integer value to add
14931 * @v: pointer to type atomic64_t
14932@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14933 */
14934 static inline void atomic64_add(long i, atomic64_t *v)
14935 {
14936+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
14937+
14938+#ifdef CONFIG_PAX_REFCOUNT
14939+ "jno 0f\n"
14940+ LOCK_PREFIX "subq %1,%0\n"
14941+ "int $4\n0:\n"
14942+ _ASM_EXTABLE(0b, 0b)
14943+#endif
14944+
14945+ : "=m" (v->counter)
14946+ : "er" (i), "m" (v->counter));
14947+}
14948+
14949+/**
14950+ * atomic64_add_unchecked - add integer to atomic64 variable
14951+ * @i: integer value to add
14952+ * @v: pointer to type atomic64_unchecked_t
14953+ *
14954+ * Atomically adds @i to @v.
14955+ */
14956+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14957+{
14958 asm volatile(LOCK_PREFIX "addq %1,%0"
14959 : "=m" (v->counter)
14960 : "er" (i), "m" (v->counter));
14961@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14962 */
14963 static inline void atomic64_sub(long i, atomic64_t *v)
14964 {
14965- asm volatile(LOCK_PREFIX "subq %1,%0"
14966+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14967+
14968+#ifdef CONFIG_PAX_REFCOUNT
14969+ "jno 0f\n"
14970+ LOCK_PREFIX "addq %1,%0\n"
14971+ "int $4\n0:\n"
14972+ _ASM_EXTABLE(0b, 0b)
14973+#endif
14974+
14975+ : "=m" (v->counter)
14976+ : "er" (i), "m" (v->counter));
14977+}
14978+
14979+/**
14980+ * atomic64_sub_unchecked - subtract the atomic64 variable
14981+ * @i: integer value to subtract
14982+ * @v: pointer to type atomic64_unchecked_t
14983+ *
14984+ * Atomically subtracts @i from @v.
14985+ */
14986+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14987+{
14988+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14989 : "=m" (v->counter)
14990 : "er" (i), "m" (v->counter));
14991 }
14992@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
14993 */
14994 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14995 {
14996- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
14997+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
14998 }
14999
15000 /**
15001@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15002 */
15003 static inline void atomic64_inc(atomic64_t *v)
15004 {
15005+ asm volatile(LOCK_PREFIX "incq %0\n"
15006+
15007+#ifdef CONFIG_PAX_REFCOUNT
15008+ "jno 0f\n"
15009+ LOCK_PREFIX "decq %0\n"
15010+ "int $4\n0:\n"
15011+ _ASM_EXTABLE(0b, 0b)
15012+#endif
15013+
15014+ : "=m" (v->counter)
15015+ : "m" (v->counter));
15016+}
15017+
15018+/**
15019+ * atomic64_inc_unchecked - increment atomic64 variable
15020+ * @v: pointer to type atomic64_unchecked_t
15021+ *
15022+ * Atomically increments @v by 1.
15023+ */
15024+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15025+{
15026 asm volatile(LOCK_PREFIX "incq %0"
15027 : "=m" (v->counter)
15028 : "m" (v->counter));
15029@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15030 */
15031 static inline void atomic64_dec(atomic64_t *v)
15032 {
15033- asm volatile(LOCK_PREFIX "decq %0"
15034+ asm volatile(LOCK_PREFIX "decq %0\n"
15035+
15036+#ifdef CONFIG_PAX_REFCOUNT
15037+ "jno 0f\n"
15038+ LOCK_PREFIX "incq %0\n"
15039+ "int $4\n0:\n"
15040+ _ASM_EXTABLE(0b, 0b)
15041+#endif
15042+
15043+ : "=m" (v->counter)
15044+ : "m" (v->counter));
15045+}
15046+
15047+/**
15048+ * atomic64_dec_unchecked - decrement atomic64 variable
15049+ * @v: pointer to type atomic64_t
15050+ *
15051+ * Atomically decrements @v by 1.
15052+ */
15053+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15054+{
15055+ asm volatile(LOCK_PREFIX "decq %0\n"
15056 : "=m" (v->counter)
15057 : "m" (v->counter));
15058 }
15059@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15060 */
15061 static inline int atomic64_dec_and_test(atomic64_t *v)
15062 {
15063- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15064+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15065 }
15066
15067 /**
15068@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15069 */
15070 static inline int atomic64_inc_and_test(atomic64_t *v)
15071 {
15072- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15073+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15074 }
15075
15076 /**
15077@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15078 */
15079 static inline int atomic64_add_negative(long i, atomic64_t *v)
15080 {
15081- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15082+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15083 }
15084
15085 /**
15086@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15087 */
15088 static inline long atomic64_add_return(long i, atomic64_t *v)
15089 {
15090+ return i + xadd_check_overflow(&v->counter, i);
15091+}
15092+
15093+/**
15094+ * atomic64_add_return_unchecked - add and return
15095+ * @i: integer value to add
15096+ * @v: pointer to type atomic64_unchecked_t
15097+ *
15098+ * Atomically adds @i to @v and returns @i + @v
15099+ */
15100+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15101+{
15102 return i + xadd(&v->counter, i);
15103 }
15104
15105@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15106 }
15107
15108 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15109+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15110+{
15111+ return atomic64_add_return_unchecked(1, v);
15112+}
15113 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15114
15115 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15116@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15117 return cmpxchg(&v->counter, old, new);
15118 }
15119
15120+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15121+{
15122+ return cmpxchg(&v->counter, old, new);
15123+}
15124+
15125 static inline long atomic64_xchg(atomic64_t *v, long new)
15126 {
15127 return xchg(&v->counter, new);
15128@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15129 */
15130 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15131 {
15132- long c, old;
15133+ long c, old, new;
15134 c = atomic64_read(v);
15135 for (;;) {
15136- if (unlikely(c == (u)))
15137+ if (unlikely(c == u))
15138 break;
15139- old = atomic64_cmpxchg((v), c, c + (a));
15140+
15141+ asm volatile("add %2,%0\n"
15142+
15143+#ifdef CONFIG_PAX_REFCOUNT
15144+ "jno 0f\n"
15145+ "sub %2,%0\n"
15146+ "int $4\n0:\n"
15147+ _ASM_EXTABLE(0b, 0b)
15148+#endif
15149+
15150+ : "=r" (new)
15151+ : "0" (c), "ir" (a));
15152+
15153+ old = atomic64_cmpxchg(v, c, new);
15154 if (likely(old == c))
15155 break;
15156 c = old;
15157 }
15158- return c != (u);
15159+ return c != u;
15160 }
15161
15162 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15163diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15164index 9fc1af7..fc71228 100644
15165--- a/arch/x86/include/asm/bitops.h
15166+++ b/arch/x86/include/asm/bitops.h
15167@@ -49,7 +49,7 @@
15168 * a mask operation on a byte.
15169 */
15170 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15171-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15172+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15173 #define CONST_MASK(nr) (1 << ((nr) & 7))
15174
15175 /**
15176@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
15177 */
15178 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
15179 {
15180- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15181+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15182 }
15183
15184 /**
15185@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
15186 */
15187 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
15188 {
15189- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15190+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15191 }
15192
15193 /**
15194@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
15195 */
15196 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
15197 {
15198- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15199+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15200 }
15201
15202 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
15203@@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15204 *
15205 * Undefined if no bit exists, so code should check against 0 first.
15206 */
15207-static inline unsigned long __ffs(unsigned long word)
15208+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
15209 {
15210 asm("rep; bsf %1,%0"
15211 : "=r" (word)
15212@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word)
15213 *
15214 * Undefined if no zero exists, so code should check against ~0UL first.
15215 */
15216-static inline unsigned long ffz(unsigned long word)
15217+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
15218 {
15219 asm("rep; bsf %1,%0"
15220 : "=r" (word)
15221@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word)
15222 *
15223 * Undefined if no set bit exists, so code should check against 0 first.
15224 */
15225-static inline unsigned long __fls(unsigned long word)
15226+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
15227 {
15228 asm("bsr %1,%0"
15229 : "=r" (word)
15230@@ -436,7 +436,7 @@ static inline int ffs(int x)
15231 * set bit if value is nonzero. The last (most significant) bit is
15232 * at position 32.
15233 */
15234-static inline int fls(int x)
15235+static inline int __intentional_overflow(-1) fls(int x)
15236 {
15237 int r;
15238
15239@@ -478,7 +478,7 @@ static inline int fls(int x)
15240 * at position 64.
15241 */
15242 #ifdef CONFIG_X86_64
15243-static __always_inline int fls64(__u64 x)
15244+static __always_inline long fls64(__u64 x)
15245 {
15246 int bitpos = -1;
15247 /*
15248diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
15249index 4fa687a..60f2d39 100644
15250--- a/arch/x86/include/asm/boot.h
15251+++ b/arch/x86/include/asm/boot.h
15252@@ -6,10 +6,15 @@
15253 #include <uapi/asm/boot.h>
15254
15255 /* Physical address where kernel should be loaded. */
15256-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15257+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15258 + (CONFIG_PHYSICAL_ALIGN - 1)) \
15259 & ~(CONFIG_PHYSICAL_ALIGN - 1))
15260
15261+#ifndef __ASSEMBLY__
15262+extern unsigned char __LOAD_PHYSICAL_ADDR[];
15263+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
15264+#endif
15265+
15266 /* Minimum kernel alignment, as a power of two */
15267 #ifdef CONFIG_X86_64
15268 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
15269diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
15270index 48f99f1..d78ebf9 100644
15271--- a/arch/x86/include/asm/cache.h
15272+++ b/arch/x86/include/asm/cache.h
15273@@ -5,12 +5,13 @@
15274
15275 /* L1 cache line size */
15276 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
15277-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15278+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
15279
15280 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
15281+#define __read_only __attribute__((__section__(".data..read_only")))
15282
15283 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
15284-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15285+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
15286
15287 #ifdef CONFIG_X86_VSMP
15288 #ifdef CONFIG_SMP
15289diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
15290index 9863ee3..4a1f8e1 100644
15291--- a/arch/x86/include/asm/cacheflush.h
15292+++ b/arch/x86/include/asm/cacheflush.h
15293@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
15294 unsigned long pg_flags = pg->flags & _PGMT_MASK;
15295
15296 if (pg_flags == _PGMT_DEFAULT)
15297- return -1;
15298+ return ~0UL;
15299 else if (pg_flags == _PGMT_WC)
15300 return _PAGE_CACHE_WC;
15301 else if (pg_flags == _PGMT_UC_MINUS)
15302diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
15303index cb4c73b..c473c29 100644
15304--- a/arch/x86/include/asm/calling.h
15305+++ b/arch/x86/include/asm/calling.h
15306@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
15307 #define RSP 152
15308 #define SS 160
15309
15310-#define ARGOFFSET R11
15311-#define SWFRAME ORIG_RAX
15312+#define ARGOFFSET R15
15313
15314 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
15315- subq $9*8+\addskip, %rsp
15316- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
15317- movq_cfi rdi, 8*8
15318- movq_cfi rsi, 7*8
15319- movq_cfi rdx, 6*8
15320+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
15321+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
15322+ movq_cfi rdi, RDI
15323+ movq_cfi rsi, RSI
15324+ movq_cfi rdx, RDX
15325
15326 .if \save_rcx
15327- movq_cfi rcx, 5*8
15328+ movq_cfi rcx, RCX
15329 .endif
15330
15331- movq_cfi rax, 4*8
15332+ movq_cfi rax, RAX
15333
15334 .if \save_r891011
15335- movq_cfi r8, 3*8
15336- movq_cfi r9, 2*8
15337- movq_cfi r10, 1*8
15338- movq_cfi r11, 0*8
15339+ movq_cfi r8, R8
15340+ movq_cfi r9, R9
15341+ movq_cfi r10, R10
15342+ movq_cfi r11, R11
15343 .endif
15344
15345+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15346+ movq_cfi r12, R12
15347+#endif
15348+
15349 .endm
15350
15351-#define ARG_SKIP (9*8)
15352+#define ARG_SKIP ORIG_RAX
15353
15354 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
15355 rstor_r8910=1, rstor_rdx=1
15356+
15357+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15358+ movq_cfi_restore R12, r12
15359+#endif
15360+
15361 .if \rstor_r11
15362- movq_cfi_restore 0*8, r11
15363+ movq_cfi_restore R11, r11
15364 .endif
15365
15366 .if \rstor_r8910
15367- movq_cfi_restore 1*8, r10
15368- movq_cfi_restore 2*8, r9
15369- movq_cfi_restore 3*8, r8
15370+ movq_cfi_restore R10, r10
15371+ movq_cfi_restore R9, r9
15372+ movq_cfi_restore R8, r8
15373 .endif
15374
15375 .if \rstor_rax
15376- movq_cfi_restore 4*8, rax
15377+ movq_cfi_restore RAX, rax
15378 .endif
15379
15380 .if \rstor_rcx
15381- movq_cfi_restore 5*8, rcx
15382+ movq_cfi_restore RCX, rcx
15383 .endif
15384
15385 .if \rstor_rdx
15386- movq_cfi_restore 6*8, rdx
15387+ movq_cfi_restore RDX, rdx
15388 .endif
15389
15390- movq_cfi_restore 7*8, rsi
15391- movq_cfi_restore 8*8, rdi
15392+ movq_cfi_restore RSI, rsi
15393+ movq_cfi_restore RDI, rdi
15394
15395- .if ARG_SKIP+\addskip > 0
15396- addq $ARG_SKIP+\addskip, %rsp
15397- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
15398+ .if ORIG_RAX+\addskip > 0
15399+ addq $ORIG_RAX+\addskip, %rsp
15400+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
15401 .endif
15402 .endm
15403
15404- .macro LOAD_ARGS offset, skiprax=0
15405- movq \offset(%rsp), %r11
15406- movq \offset+8(%rsp), %r10
15407- movq \offset+16(%rsp), %r9
15408- movq \offset+24(%rsp), %r8
15409- movq \offset+40(%rsp), %rcx
15410- movq \offset+48(%rsp), %rdx
15411- movq \offset+56(%rsp), %rsi
15412- movq \offset+64(%rsp), %rdi
15413+ .macro LOAD_ARGS skiprax=0
15414+ movq R11(%rsp), %r11
15415+ movq R10(%rsp), %r10
15416+ movq R9(%rsp), %r9
15417+ movq R8(%rsp), %r8
15418+ movq RCX(%rsp), %rcx
15419+ movq RDX(%rsp), %rdx
15420+ movq RSI(%rsp), %rsi
15421+ movq RDI(%rsp), %rdi
15422 .if \skiprax
15423 .else
15424- movq \offset+72(%rsp), %rax
15425+ movq RAX(%rsp), %rax
15426 .endif
15427 .endm
15428
15429-#define REST_SKIP (6*8)
15430-
15431 .macro SAVE_REST
15432- subq $REST_SKIP, %rsp
15433- CFI_ADJUST_CFA_OFFSET REST_SKIP
15434- movq_cfi rbx, 5*8
15435- movq_cfi rbp, 4*8
15436- movq_cfi r12, 3*8
15437- movq_cfi r13, 2*8
15438- movq_cfi r14, 1*8
15439- movq_cfi r15, 0*8
15440+ movq_cfi rbx, RBX
15441+ movq_cfi rbp, RBP
15442+
15443+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15444+ movq_cfi r12, R12
15445+#endif
15446+
15447+ movq_cfi r13, R13
15448+ movq_cfi r14, R14
15449+ movq_cfi r15, R15
15450 .endm
15451
15452 .macro RESTORE_REST
15453- movq_cfi_restore 0*8, r15
15454- movq_cfi_restore 1*8, r14
15455- movq_cfi_restore 2*8, r13
15456- movq_cfi_restore 3*8, r12
15457- movq_cfi_restore 4*8, rbp
15458- movq_cfi_restore 5*8, rbx
15459- addq $REST_SKIP, %rsp
15460- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
15461+ movq_cfi_restore R15, r15
15462+ movq_cfi_restore R14, r14
15463+ movq_cfi_restore R13, r13
15464+
15465+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15466+ movq_cfi_restore R12, r12
15467+#endif
15468+
15469+ movq_cfi_restore RBP, rbp
15470+ movq_cfi_restore RBX, rbx
15471 .endm
15472
15473 .macro SAVE_ALL
15474diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
15475index f50de69..2b0a458 100644
15476--- a/arch/x86/include/asm/checksum_32.h
15477+++ b/arch/x86/include/asm/checksum_32.h
15478@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
15479 int len, __wsum sum,
15480 int *src_err_ptr, int *dst_err_ptr);
15481
15482+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
15483+ int len, __wsum sum,
15484+ int *src_err_ptr, int *dst_err_ptr);
15485+
15486+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
15487+ int len, __wsum sum,
15488+ int *src_err_ptr, int *dst_err_ptr);
15489+
15490 /*
15491 * Note: when you get a NULL pointer exception here this means someone
15492 * passed in an incorrect kernel address to one of these functions.
15493@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
15494
15495 might_sleep();
15496 stac();
15497- ret = csum_partial_copy_generic((__force void *)src, dst,
15498+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
15499 len, sum, err_ptr, NULL);
15500 clac();
15501
15502@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
15503 might_sleep();
15504 if (access_ok(VERIFY_WRITE, dst, len)) {
15505 stac();
15506- ret = csum_partial_copy_generic(src, (__force void *)dst,
15507+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
15508 len, sum, NULL, err_ptr);
15509 clac();
15510 return ret;
15511diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
15512index d47786a..ce1b05d 100644
15513--- a/arch/x86/include/asm/cmpxchg.h
15514+++ b/arch/x86/include/asm/cmpxchg.h
15515@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
15516 __compiletime_error("Bad argument size for cmpxchg");
15517 extern void __xadd_wrong_size(void)
15518 __compiletime_error("Bad argument size for xadd");
15519+extern void __xadd_check_overflow_wrong_size(void)
15520+ __compiletime_error("Bad argument size for xadd_check_overflow");
15521 extern void __add_wrong_size(void)
15522 __compiletime_error("Bad argument size for add");
15523+extern void __add_check_overflow_wrong_size(void)
15524+ __compiletime_error("Bad argument size for add_check_overflow");
15525
15526 /*
15527 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
15528@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
15529 __ret; \
15530 })
15531
15532+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
15533+ ({ \
15534+ __typeof__ (*(ptr)) __ret = (arg); \
15535+ switch (sizeof(*(ptr))) { \
15536+ case __X86_CASE_L: \
15537+ asm volatile (lock #op "l %0, %1\n" \
15538+ "jno 0f\n" \
15539+ "mov %0,%1\n" \
15540+ "int $4\n0:\n" \
15541+ _ASM_EXTABLE(0b, 0b) \
15542+ : "+r" (__ret), "+m" (*(ptr)) \
15543+ : : "memory", "cc"); \
15544+ break; \
15545+ case __X86_CASE_Q: \
15546+ asm volatile (lock #op "q %q0, %1\n" \
15547+ "jno 0f\n" \
15548+ "mov %0,%1\n" \
15549+ "int $4\n0:\n" \
15550+ _ASM_EXTABLE(0b, 0b) \
15551+ : "+r" (__ret), "+m" (*(ptr)) \
15552+ : : "memory", "cc"); \
15553+ break; \
15554+ default: \
15555+ __ ## op ## _check_overflow_wrong_size(); \
15556+ } \
15557+ __ret; \
15558+ })
15559+
15560 /*
15561 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15562 * Since this is generally used to protect other memory information, we
15563@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
15564 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
15565 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
15566
15567+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
15568+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
15569+
15570 #define __add(ptr, inc, lock) \
15571 ({ \
15572 __typeof__ (*(ptr)) __ret = (inc); \
15573diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
15574index 59c6c40..5e0b22c 100644
15575--- a/arch/x86/include/asm/compat.h
15576+++ b/arch/x86/include/asm/compat.h
15577@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
15578 typedef u32 compat_uint_t;
15579 typedef u32 compat_ulong_t;
15580 typedef u64 __attribute__((aligned(4))) compat_u64;
15581-typedef u32 compat_uptr_t;
15582+typedef u32 __user compat_uptr_t;
15583
15584 struct compat_timespec {
15585 compat_time_t tv_sec;
15586diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
15587index 89270b4..f0abf8e 100644
15588--- a/arch/x86/include/asm/cpufeature.h
15589+++ b/arch/x86/include/asm/cpufeature.h
15590@@ -203,7 +203,7 @@
15591 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
15592 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
15593 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
15594-
15595+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
15596
15597 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
15598 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
15599@@ -211,7 +211,7 @@
15600 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
15601 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
15602 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
15603-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
15604+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
15605 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
15606 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
15607 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
15608@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
15609 #undef cpu_has_centaur_mcr
15610 #define cpu_has_centaur_mcr 0
15611
15612+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
15613 #endif /* CONFIG_X86_64 */
15614
15615 #if __GNUC__ >= 4
15616@@ -405,7 +406,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15617
15618 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
15619 t_warn:
15620- warn_pre_alternatives();
15621+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
15622+ warn_pre_alternatives();
15623 return false;
15624 #endif
15625
15626@@ -425,7 +427,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15627 ".section .discard,\"aw\",@progbits\n"
15628 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15629 ".previous\n"
15630- ".section .altinstr_replacement,\"ax\"\n"
15631+ ".section .altinstr_replacement,\"a\"\n"
15632 "3: movb $1,%0\n"
15633 "4:\n"
15634 ".previous\n"
15635@@ -462,7 +464,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15636 " .byte 2b - 1b\n" /* src len */
15637 " .byte 4f - 3f\n" /* repl len */
15638 ".previous\n"
15639- ".section .altinstr_replacement,\"ax\"\n"
15640+ ".section .altinstr_replacement,\"a\"\n"
15641 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
15642 "4:\n"
15643 ".previous\n"
15644@@ -495,7 +497,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15645 ".section .discard,\"aw\",@progbits\n"
15646 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15647 ".previous\n"
15648- ".section .altinstr_replacement,\"ax\"\n"
15649+ ".section .altinstr_replacement,\"a\"\n"
15650 "3: movb $0,%0\n"
15651 "4:\n"
15652 ".previous\n"
15653@@ -509,7 +511,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15654 ".section .discard,\"aw\",@progbits\n"
15655 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
15656 ".previous\n"
15657- ".section .altinstr_replacement,\"ax\"\n"
15658+ ".section .altinstr_replacement,\"a\"\n"
15659 "5: movb $1,%0\n"
15660 "6:\n"
15661 ".previous\n"
15662diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
15663index 50d033a..37deb26 100644
15664--- a/arch/x86/include/asm/desc.h
15665+++ b/arch/x86/include/asm/desc.h
15666@@ -4,6 +4,7 @@
15667 #include <asm/desc_defs.h>
15668 #include <asm/ldt.h>
15669 #include <asm/mmu.h>
15670+#include <asm/pgtable.h>
15671
15672 #include <linux/smp.h>
15673 #include <linux/percpu.h>
15674@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15675
15676 desc->type = (info->read_exec_only ^ 1) << 1;
15677 desc->type |= info->contents << 2;
15678+ desc->type |= info->seg_not_present ^ 1;
15679
15680 desc->s = 1;
15681 desc->dpl = 0x3;
15682@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15683 }
15684
15685 extern struct desc_ptr idt_descr;
15686-extern gate_desc idt_table[];
15687-extern struct desc_ptr debug_idt_descr;
15688-extern gate_desc debug_idt_table[];
15689-
15690-struct gdt_page {
15691- struct desc_struct gdt[GDT_ENTRIES];
15692-} __attribute__((aligned(PAGE_SIZE)));
15693-
15694-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
15695+extern gate_desc idt_table[IDT_ENTRIES];
15696+extern const struct desc_ptr debug_idt_descr;
15697+extern gate_desc debug_idt_table[IDT_ENTRIES];
15698
15699+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
15700 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
15701 {
15702- return per_cpu(gdt_page, cpu).gdt;
15703+ return cpu_gdt_table[cpu];
15704 }
15705
15706 #ifdef CONFIG_X86_64
15707@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
15708 unsigned long base, unsigned dpl, unsigned flags,
15709 unsigned short seg)
15710 {
15711- gate->a = (seg << 16) | (base & 0xffff);
15712- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
15713+ gate->gate.offset_low = base;
15714+ gate->gate.seg = seg;
15715+ gate->gate.reserved = 0;
15716+ gate->gate.type = type;
15717+ gate->gate.s = 0;
15718+ gate->gate.dpl = dpl;
15719+ gate->gate.p = 1;
15720+ gate->gate.offset_high = base >> 16;
15721 }
15722
15723 #endif
15724@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15725
15726 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
15727 {
15728+ pax_open_kernel();
15729 memcpy(&idt[entry], gate, sizeof(*gate));
15730+ pax_close_kernel();
15731 }
15732
15733 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
15734 {
15735+ pax_open_kernel();
15736 memcpy(&ldt[entry], desc, 8);
15737+ pax_close_kernel();
15738 }
15739
15740 static inline void
15741@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15742 default: size = sizeof(*gdt); break;
15743 }
15744
15745+ pax_open_kernel();
15746 memcpy(&gdt[entry], desc, size);
15747+ pax_close_kernel();
15748 }
15749
15750 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
15751@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
15752
15753 static inline void native_load_tr_desc(void)
15754 {
15755+ pax_open_kernel();
15756 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
15757+ pax_close_kernel();
15758 }
15759
15760 static inline void native_load_gdt(const struct desc_ptr *dtr)
15761@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
15762 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15763 unsigned int i;
15764
15765+ pax_open_kernel();
15766 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
15767 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
15768+ pax_close_kernel();
15769 }
15770
15771 #define _LDT_empty(info) \
15772@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
15773 preempt_enable();
15774 }
15775
15776-static inline unsigned long get_desc_base(const struct desc_struct *desc)
15777+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
15778 {
15779 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
15780 }
15781@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
15782 }
15783
15784 #ifdef CONFIG_X86_64
15785-static inline void set_nmi_gate(int gate, void *addr)
15786+static inline void set_nmi_gate(int gate, const void *addr)
15787 {
15788 gate_desc s;
15789
15790@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
15791 #endif
15792
15793 #ifdef CONFIG_TRACING
15794-extern struct desc_ptr trace_idt_descr;
15795-extern gate_desc trace_idt_table[];
15796+extern const struct desc_ptr trace_idt_descr;
15797+extern gate_desc trace_idt_table[IDT_ENTRIES];
15798 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15799 {
15800 write_idt_entry(trace_idt_table, entry, gate);
15801 }
15802
15803-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
15804+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
15805 unsigned dpl, unsigned ist, unsigned seg)
15806 {
15807 gate_desc s;
15808@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15809 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
15810 #endif
15811
15812-static inline void _set_gate(int gate, unsigned type, void *addr,
15813+static inline void _set_gate(int gate, unsigned type, const void *addr,
15814 unsigned dpl, unsigned ist, unsigned seg)
15815 {
15816 gate_desc s;
15817@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
15818 #define set_intr_gate(n, addr) \
15819 do { \
15820 BUG_ON((unsigned)n > 0xFF); \
15821- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
15822+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
15823 __KERNEL_CS); \
15824- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
15825+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
15826 0, 0, __KERNEL_CS); \
15827 } while (0)
15828
15829@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
15830 /*
15831 * This routine sets up an interrupt gate at directory privilege level 3.
15832 */
15833-static inline void set_system_intr_gate(unsigned int n, void *addr)
15834+static inline void set_system_intr_gate(unsigned int n, const void *addr)
15835 {
15836 BUG_ON((unsigned)n > 0xFF);
15837 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15838 }
15839
15840-static inline void set_system_trap_gate(unsigned int n, void *addr)
15841+static inline void set_system_trap_gate(unsigned int n, const void *addr)
15842 {
15843 BUG_ON((unsigned)n > 0xFF);
15844 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15845 }
15846
15847-static inline void set_trap_gate(unsigned int n, void *addr)
15848+static inline void set_trap_gate(unsigned int n, const void *addr)
15849 {
15850 BUG_ON((unsigned)n > 0xFF);
15851 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15852@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15853 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15854 {
15855 BUG_ON((unsigned)n > 0xFF);
15856- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15857+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15858 }
15859
15860-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15861+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15862 {
15863 BUG_ON((unsigned)n > 0xFF);
15864 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15865 }
15866
15867-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15868+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15869 {
15870 BUG_ON((unsigned)n > 0xFF);
15871 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15872@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
15873 else
15874 load_idt((const struct desc_ptr *)&idt_descr);
15875 }
15876+
15877+#ifdef CONFIG_X86_32
15878+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15879+{
15880+ struct desc_struct d;
15881+
15882+ if (likely(limit))
15883+ limit = (limit - 1UL) >> PAGE_SHIFT;
15884+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
15885+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15886+}
15887+#endif
15888+
15889 #endif /* _ASM_X86_DESC_H */
15890diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15891index 278441f..b95a174 100644
15892--- a/arch/x86/include/asm/desc_defs.h
15893+++ b/arch/x86/include/asm/desc_defs.h
15894@@ -31,6 +31,12 @@ struct desc_struct {
15895 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15896 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15897 };
15898+ struct {
15899+ u16 offset_low;
15900+ u16 seg;
15901+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15902+ unsigned offset_high: 16;
15903+ } gate;
15904 };
15905 } __attribute__((packed));
15906
15907diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15908index ced283a..ffe04cc 100644
15909--- a/arch/x86/include/asm/div64.h
15910+++ b/arch/x86/include/asm/div64.h
15911@@ -39,7 +39,7 @@
15912 __mod; \
15913 })
15914
15915-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15916+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15917 {
15918 union {
15919 u64 v64;
15920diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15921index 9c999c1..3860cb8 100644
15922--- a/arch/x86/include/asm/elf.h
15923+++ b/arch/x86/include/asm/elf.h
15924@@ -243,7 +243,25 @@ extern int force_personality32;
15925 the loader. We need to make sure that it is out of the way of the program
15926 that it will "exec", and that there is sufficient room for the brk. */
15927
15928+#ifdef CONFIG_PAX_SEGMEXEC
15929+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15930+#else
15931 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15932+#endif
15933+
15934+#ifdef CONFIG_PAX_ASLR
15935+#ifdef CONFIG_X86_32
15936+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15937+
15938+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15939+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15940+#else
15941+#define PAX_ELF_ET_DYN_BASE 0x400000UL
15942+
15943+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15944+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15945+#endif
15946+#endif
15947
15948 /* This yields a mask that user programs can use to figure out what
15949 instruction set this CPU supports. This could be done in user space,
15950@@ -296,16 +314,12 @@ do { \
15951
15952 #define ARCH_DLINFO \
15953 do { \
15954- if (vdso_enabled) \
15955- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15956- (unsigned long)current->mm->context.vdso); \
15957+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15958 } while (0)
15959
15960 #define ARCH_DLINFO_X32 \
15961 do { \
15962- if (vdso_enabled) \
15963- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15964- (unsigned long)current->mm->context.vdso); \
15965+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15966 } while (0)
15967
15968 #define AT_SYSINFO 32
15969@@ -320,7 +334,7 @@ else \
15970
15971 #endif /* !CONFIG_X86_32 */
15972
15973-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15974+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15975
15976 #define VDSO_ENTRY \
15977 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15978@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15979 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15980 #define compat_arch_setup_additional_pages syscall32_setup_pages
15981
15982-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15983-#define arch_randomize_brk arch_randomize_brk
15984-
15985 /*
15986 * True on X86_32 or when emulating IA32 on X86_64
15987 */
15988diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15989index 77a99ac..39ff7f5 100644
15990--- a/arch/x86/include/asm/emergency-restart.h
15991+++ b/arch/x86/include/asm/emergency-restart.h
15992@@ -1,6 +1,6 @@
15993 #ifndef _ASM_X86_EMERGENCY_RESTART_H
15994 #define _ASM_X86_EMERGENCY_RESTART_H
15995
15996-extern void machine_emergency_restart(void);
15997+extern void machine_emergency_restart(void) __noreturn;
15998
15999 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16000diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16001index d3d7469..677ef72 100644
16002--- a/arch/x86/include/asm/floppy.h
16003+++ b/arch/x86/include/asm/floppy.h
16004@@ -229,18 +229,18 @@ static struct fd_routine_l {
16005 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16006 } fd_routine[] = {
16007 {
16008- request_dma,
16009- free_dma,
16010- get_dma_residue,
16011- dma_mem_alloc,
16012- hard_dma_setup
16013+ ._request_dma = request_dma,
16014+ ._free_dma = free_dma,
16015+ ._get_dma_residue = get_dma_residue,
16016+ ._dma_mem_alloc = dma_mem_alloc,
16017+ ._dma_setup = hard_dma_setup
16018 },
16019 {
16020- vdma_request_dma,
16021- vdma_nop,
16022- vdma_get_dma_residue,
16023- vdma_mem_alloc,
16024- vdma_dma_setup
16025+ ._request_dma = vdma_request_dma,
16026+ ._free_dma = vdma_nop,
16027+ ._get_dma_residue = vdma_get_dma_residue,
16028+ ._dma_mem_alloc = vdma_mem_alloc,
16029+ ._dma_setup = vdma_dma_setup
16030 }
16031 };
16032
16033diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16034index cea1c76..6c0d79b 100644
16035--- a/arch/x86/include/asm/fpu-internal.h
16036+++ b/arch/x86/include/asm/fpu-internal.h
16037@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16038 #define user_insn(insn, output, input...) \
16039 ({ \
16040 int err; \
16041+ pax_open_userland(); \
16042 asm volatile(ASM_STAC "\n" \
16043- "1:" #insn "\n\t" \
16044+ "1:" \
16045+ __copyuser_seg \
16046+ #insn "\n\t" \
16047 "2: " ASM_CLAC "\n" \
16048 ".section .fixup,\"ax\"\n" \
16049 "3: movl $-1,%[err]\n" \
16050@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16051 _ASM_EXTABLE(1b, 3b) \
16052 : [err] "=r" (err), output \
16053 : "0"(0), input); \
16054+ pax_close_userland(); \
16055 err; \
16056 })
16057
16058@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16059 "fnclex\n\t"
16060 "emms\n\t"
16061 "fildl %P[addr]" /* set F?P to defined value */
16062- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16063+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16064 }
16065
16066 return fpu_restore_checking(&tsk->thread.fpu);
16067diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16068index be27ba1..04a8801 100644
16069--- a/arch/x86/include/asm/futex.h
16070+++ b/arch/x86/include/asm/futex.h
16071@@ -12,6 +12,7 @@
16072 #include <asm/smap.h>
16073
16074 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16075+ typecheck(u32 __user *, uaddr); \
16076 asm volatile("\t" ASM_STAC "\n" \
16077 "1:\t" insn "\n" \
16078 "2:\t" ASM_CLAC "\n" \
16079@@ -20,15 +21,16 @@
16080 "\tjmp\t2b\n" \
16081 "\t.previous\n" \
16082 _ASM_EXTABLE(1b, 3b) \
16083- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16084+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16085 : "i" (-EFAULT), "0" (oparg), "1" (0))
16086
16087 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16088+ typecheck(u32 __user *, uaddr); \
16089 asm volatile("\t" ASM_STAC "\n" \
16090 "1:\tmovl %2, %0\n" \
16091 "\tmovl\t%0, %3\n" \
16092 "\t" insn "\n" \
16093- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16094+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16095 "\tjnz\t1b\n" \
16096 "3:\t" ASM_CLAC "\n" \
16097 "\t.section .fixup,\"ax\"\n" \
16098@@ -38,7 +40,7 @@
16099 _ASM_EXTABLE(1b, 4b) \
16100 _ASM_EXTABLE(2b, 4b) \
16101 : "=&a" (oldval), "=&r" (ret), \
16102- "+m" (*uaddr), "=&r" (tem) \
16103+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16104 : "r" (oparg), "i" (-EFAULT), "1" (0))
16105
16106 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16107@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16108
16109 pagefault_disable();
16110
16111+ pax_open_userland();
16112 switch (op) {
16113 case FUTEX_OP_SET:
16114- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16115+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16116 break;
16117 case FUTEX_OP_ADD:
16118- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16119+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16120 uaddr, oparg);
16121 break;
16122 case FUTEX_OP_OR:
16123@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16124 default:
16125 ret = -ENOSYS;
16126 }
16127+ pax_close_userland();
16128
16129 pagefault_enable();
16130
16131@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
16132 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
16133 return -EFAULT;
16134
16135+ pax_open_userland();
16136 asm volatile("\t" ASM_STAC "\n"
16137- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
16138+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
16139 "2:\t" ASM_CLAC "\n"
16140 "\t.section .fixup, \"ax\"\n"
16141 "3:\tmov %3, %0\n"
16142 "\tjmp 2b\n"
16143 "\t.previous\n"
16144 _ASM_EXTABLE(1b, 3b)
16145- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
16146+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
16147 : "i" (-EFAULT), "r" (newval), "1" (oldval)
16148 : "memory"
16149 );
16150+ pax_close_userland();
16151
16152 *uval = oldval;
16153 return ret;
16154diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16155index cba45d9..86344ba 100644
16156--- a/arch/x86/include/asm/hw_irq.h
16157+++ b/arch/x86/include/asm/hw_irq.h
16158@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
16159 extern void enable_IO_APIC(void);
16160
16161 /* Statistics */
16162-extern atomic_t irq_err_count;
16163-extern atomic_t irq_mis_count;
16164+extern atomic_unchecked_t irq_err_count;
16165+extern atomic_unchecked_t irq_mis_count;
16166
16167 /* EISA */
16168 extern void eisa_set_level_irq(unsigned int irq);
16169diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16170index a203659..9889f1c 100644
16171--- a/arch/x86/include/asm/i8259.h
16172+++ b/arch/x86/include/asm/i8259.h
16173@@ -62,7 +62,7 @@ struct legacy_pic {
16174 void (*init)(int auto_eoi);
16175 int (*irq_pending)(unsigned int irq);
16176 void (*make_irq)(unsigned int irq);
16177-};
16178+} __do_const;
16179
16180 extern struct legacy_pic *legacy_pic;
16181 extern struct legacy_pic null_legacy_pic;
16182diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16183index 34f69cb..6d95446 100644
16184--- a/arch/x86/include/asm/io.h
16185+++ b/arch/x86/include/asm/io.h
16186@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16187 "m" (*(volatile type __force *)addr) barrier); }
16188
16189 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16190-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16191-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16192+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16193+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16194
16195 build_mmio_read(__readb, "b", unsigned char, "=q", )
16196-build_mmio_read(__readw, "w", unsigned short, "=r", )
16197-build_mmio_read(__readl, "l", unsigned int, "=r", )
16198+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16199+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16200
16201 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16202 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16203@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16204 return ioremap_nocache(offset, size);
16205 }
16206
16207-extern void iounmap(volatile void __iomem *addr);
16208+extern void iounmap(const volatile void __iomem *addr);
16209
16210 extern void set_iounmap_nonlazy(void);
16211
16212@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
16213
16214 #include <linux/vmalloc.h>
16215
16216+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
16217+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
16218+{
16219+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16220+}
16221+
16222+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
16223+{
16224+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16225+}
16226+
16227 /*
16228 * Convert a virtual cached pointer to an uncached pointer
16229 */
16230diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
16231index bba3cf8..06bc8da 100644
16232--- a/arch/x86/include/asm/irqflags.h
16233+++ b/arch/x86/include/asm/irqflags.h
16234@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
16235 sti; \
16236 sysexit
16237
16238+#define GET_CR0_INTO_RDI mov %cr0, %rdi
16239+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
16240+#define GET_CR3_INTO_RDI mov %cr3, %rdi
16241+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
16242+
16243 #else
16244 #define INTERRUPT_RETURN iret
16245 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
16246diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
16247index 9454c16..e4100e3 100644
16248--- a/arch/x86/include/asm/kprobes.h
16249+++ b/arch/x86/include/asm/kprobes.h
16250@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
16251 #define RELATIVEJUMP_SIZE 5
16252 #define RELATIVECALL_OPCODE 0xe8
16253 #define RELATIVE_ADDR_SIZE 4
16254-#define MAX_STACK_SIZE 64
16255-#define MIN_STACK_SIZE(ADDR) \
16256- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
16257- THREAD_SIZE - (unsigned long)(ADDR))) \
16258- ? (MAX_STACK_SIZE) \
16259- : (((unsigned long)current_thread_info()) + \
16260- THREAD_SIZE - (unsigned long)(ADDR)))
16261+#define MAX_STACK_SIZE 64UL
16262+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
16263
16264 #define flush_insn_slot(p) do { } while (0)
16265
16266diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
16267index 4ad6560..75c7bdd 100644
16268--- a/arch/x86/include/asm/local.h
16269+++ b/arch/x86/include/asm/local.h
16270@@ -10,33 +10,97 @@ typedef struct {
16271 atomic_long_t a;
16272 } local_t;
16273
16274+typedef struct {
16275+ atomic_long_unchecked_t a;
16276+} local_unchecked_t;
16277+
16278 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16279
16280 #define local_read(l) atomic_long_read(&(l)->a)
16281+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
16282 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
16283+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
16284
16285 static inline void local_inc(local_t *l)
16286 {
16287- asm volatile(_ASM_INC "%0"
16288+ asm volatile(_ASM_INC "%0\n"
16289+
16290+#ifdef CONFIG_PAX_REFCOUNT
16291+ "jno 0f\n"
16292+ _ASM_DEC "%0\n"
16293+ "int $4\n0:\n"
16294+ _ASM_EXTABLE(0b, 0b)
16295+#endif
16296+
16297+ : "+m" (l->a.counter));
16298+}
16299+
16300+static inline void local_inc_unchecked(local_unchecked_t *l)
16301+{
16302+ asm volatile(_ASM_INC "%0\n"
16303 : "+m" (l->a.counter));
16304 }
16305
16306 static inline void local_dec(local_t *l)
16307 {
16308- asm volatile(_ASM_DEC "%0"
16309+ asm volatile(_ASM_DEC "%0\n"
16310+
16311+#ifdef CONFIG_PAX_REFCOUNT
16312+ "jno 0f\n"
16313+ _ASM_INC "%0\n"
16314+ "int $4\n0:\n"
16315+ _ASM_EXTABLE(0b, 0b)
16316+#endif
16317+
16318+ : "+m" (l->a.counter));
16319+}
16320+
16321+static inline void local_dec_unchecked(local_unchecked_t *l)
16322+{
16323+ asm volatile(_ASM_DEC "%0\n"
16324 : "+m" (l->a.counter));
16325 }
16326
16327 static inline void local_add(long i, local_t *l)
16328 {
16329- asm volatile(_ASM_ADD "%1,%0"
16330+ asm volatile(_ASM_ADD "%1,%0\n"
16331+
16332+#ifdef CONFIG_PAX_REFCOUNT
16333+ "jno 0f\n"
16334+ _ASM_SUB "%1,%0\n"
16335+ "int $4\n0:\n"
16336+ _ASM_EXTABLE(0b, 0b)
16337+#endif
16338+
16339+ : "+m" (l->a.counter)
16340+ : "ir" (i));
16341+}
16342+
16343+static inline void local_add_unchecked(long i, local_unchecked_t *l)
16344+{
16345+ asm volatile(_ASM_ADD "%1,%0\n"
16346 : "+m" (l->a.counter)
16347 : "ir" (i));
16348 }
16349
16350 static inline void local_sub(long i, local_t *l)
16351 {
16352- asm volatile(_ASM_SUB "%1,%0"
16353+ asm volatile(_ASM_SUB "%1,%0\n"
16354+
16355+#ifdef CONFIG_PAX_REFCOUNT
16356+ "jno 0f\n"
16357+ _ASM_ADD "%1,%0\n"
16358+ "int $4\n0:\n"
16359+ _ASM_EXTABLE(0b, 0b)
16360+#endif
16361+
16362+ : "+m" (l->a.counter)
16363+ : "ir" (i));
16364+}
16365+
16366+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
16367+{
16368+ asm volatile(_ASM_SUB "%1,%0\n"
16369 : "+m" (l->a.counter)
16370 : "ir" (i));
16371 }
16372@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
16373 */
16374 static inline int local_sub_and_test(long i, local_t *l)
16375 {
16376- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
16377+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
16378 }
16379
16380 /**
16381@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
16382 */
16383 static inline int local_dec_and_test(local_t *l)
16384 {
16385- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
16386+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
16387 }
16388
16389 /**
16390@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
16391 */
16392 static inline int local_inc_and_test(local_t *l)
16393 {
16394- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
16395+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
16396 }
16397
16398 /**
16399@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
16400 */
16401 static inline int local_add_negative(long i, local_t *l)
16402 {
16403- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
16404+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
16405 }
16406
16407 /**
16408@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
16409 static inline long local_add_return(long i, local_t *l)
16410 {
16411 long __i = i;
16412+ asm volatile(_ASM_XADD "%0, %1\n"
16413+
16414+#ifdef CONFIG_PAX_REFCOUNT
16415+ "jno 0f\n"
16416+ _ASM_MOV "%0,%1\n"
16417+ "int $4\n0:\n"
16418+ _ASM_EXTABLE(0b, 0b)
16419+#endif
16420+
16421+ : "+r" (i), "+m" (l->a.counter)
16422+ : : "memory");
16423+ return i + __i;
16424+}
16425+
16426+/**
16427+ * local_add_return_unchecked - add and return
16428+ * @i: integer value to add
16429+ * @l: pointer to type local_unchecked_t
16430+ *
16431+ * Atomically adds @i to @l and returns @i + @l
16432+ */
16433+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
16434+{
16435+ long __i = i;
16436 asm volatile(_ASM_XADD "%0, %1;"
16437 : "+r" (i), "+m" (l->a.counter)
16438 : : "memory");
16439@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
16440
16441 #define local_cmpxchg(l, o, n) \
16442 (cmpxchg_local(&((l)->a.counter), (o), (n)))
16443+#define local_cmpxchg_unchecked(l, o, n) \
16444+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
16445 /* Always has a lock prefix */
16446 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
16447
16448diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
16449new file mode 100644
16450index 0000000..2bfd3ba
16451--- /dev/null
16452+++ b/arch/x86/include/asm/mman.h
16453@@ -0,0 +1,15 @@
16454+#ifndef _X86_MMAN_H
16455+#define _X86_MMAN_H
16456+
16457+#include <uapi/asm/mman.h>
16458+
16459+#ifdef __KERNEL__
16460+#ifndef __ASSEMBLY__
16461+#ifdef CONFIG_X86_32
16462+#define arch_mmap_check i386_mmap_check
16463+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
16464+#endif
16465+#endif
16466+#endif
16467+
16468+#endif /* X86_MMAN_H */
16469diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
16470index 5f55e69..e20bfb1 100644
16471--- a/arch/x86/include/asm/mmu.h
16472+++ b/arch/x86/include/asm/mmu.h
16473@@ -9,7 +9,7 @@
16474 * we put the segment information here.
16475 */
16476 typedef struct {
16477- void *ldt;
16478+ struct desc_struct *ldt;
16479 int size;
16480
16481 #ifdef CONFIG_X86_64
16482@@ -18,7 +18,19 @@ typedef struct {
16483 #endif
16484
16485 struct mutex lock;
16486- void *vdso;
16487+ unsigned long vdso;
16488+
16489+#ifdef CONFIG_X86_32
16490+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16491+ unsigned long user_cs_base;
16492+ unsigned long user_cs_limit;
16493+
16494+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16495+ cpumask_t cpu_user_cs_mask;
16496+#endif
16497+
16498+#endif
16499+#endif
16500 } mm_context_t;
16501
16502 #ifdef CONFIG_SMP
16503diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
16504index be12c53..4d24039 100644
16505--- a/arch/x86/include/asm/mmu_context.h
16506+++ b/arch/x86/include/asm/mmu_context.h
16507@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
16508
16509 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
16510 {
16511+
16512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16513+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
16514+ unsigned int i;
16515+ pgd_t *pgd;
16516+
16517+ pax_open_kernel();
16518+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
16519+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
16520+ set_pgd_batched(pgd+i, native_make_pgd(0));
16521+ pax_close_kernel();
16522+ }
16523+#endif
16524+
16525 #ifdef CONFIG_SMP
16526 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
16527 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
16528@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16529 struct task_struct *tsk)
16530 {
16531 unsigned cpu = smp_processor_id();
16532+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16533+ int tlbstate = TLBSTATE_OK;
16534+#endif
16535
16536 if (likely(prev != next)) {
16537 #ifdef CONFIG_SMP
16538+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16539+ tlbstate = this_cpu_read(cpu_tlbstate.state);
16540+#endif
16541 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16542 this_cpu_write(cpu_tlbstate.active_mm, next);
16543 #endif
16544 cpumask_set_cpu(cpu, mm_cpumask(next));
16545
16546 /* Re-load page tables */
16547+#ifdef CONFIG_PAX_PER_CPU_PGD
16548+ pax_open_kernel();
16549+
16550+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16551+ if (static_cpu_has(X86_FEATURE_PCID))
16552+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16553+ else
16554+#endif
16555+
16556+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16557+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16558+ pax_close_kernel();
16559+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16560+
16561+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16562+ if (static_cpu_has(X86_FEATURE_PCID)) {
16563+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16564+ u64 descriptor[2];
16565+ descriptor[0] = PCID_USER;
16566+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16567+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16568+ descriptor[0] = PCID_KERNEL;
16569+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16570+ }
16571+ } else {
16572+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16573+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16574+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16575+ else
16576+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16577+ }
16578+ } else
16579+#endif
16580+
16581+ load_cr3(get_cpu_pgd(cpu, kernel));
16582+#else
16583 load_cr3(next->pgd);
16584+#endif
16585
16586 /* Stop flush ipis for the previous mm */
16587 cpumask_clear_cpu(cpu, mm_cpumask(prev));
16588@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16589 /* Load the LDT, if the LDT is different: */
16590 if (unlikely(prev->context.ldt != next->context.ldt))
16591 load_LDT_nolock(&next->context);
16592+
16593+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16594+ if (!(__supported_pte_mask & _PAGE_NX)) {
16595+ smp_mb__before_clear_bit();
16596+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
16597+ smp_mb__after_clear_bit();
16598+ cpu_set(cpu, next->context.cpu_user_cs_mask);
16599+ }
16600+#endif
16601+
16602+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16603+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
16604+ prev->context.user_cs_limit != next->context.user_cs_limit))
16605+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16606+#ifdef CONFIG_SMP
16607+ else if (unlikely(tlbstate != TLBSTATE_OK))
16608+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16609+#endif
16610+#endif
16611+
16612 }
16613+ else {
16614+
16615+#ifdef CONFIG_PAX_PER_CPU_PGD
16616+ pax_open_kernel();
16617+
16618+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16619+ if (static_cpu_has(X86_FEATURE_PCID))
16620+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16621+ else
16622+#endif
16623+
16624+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16625+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16626+ pax_close_kernel();
16627+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16628+
16629+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16630+ if (static_cpu_has(X86_FEATURE_PCID)) {
16631+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16632+ u64 descriptor[2];
16633+ descriptor[0] = PCID_USER;
16634+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16635+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16636+ descriptor[0] = PCID_KERNEL;
16637+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16638+ }
16639+ } else {
16640+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16641+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16642+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16643+ else
16644+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16645+ }
16646+ } else
16647+#endif
16648+
16649+ load_cr3(get_cpu_pgd(cpu, kernel));
16650+#endif
16651+
16652 #ifdef CONFIG_SMP
16653- else {
16654 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16655 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
16656
16657@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16658 * tlb flush IPI delivery. We must reload CR3
16659 * to make sure to use no freed page tables.
16660 */
16661+
16662+#ifndef CONFIG_PAX_PER_CPU_PGD
16663 load_cr3(next->pgd);
16664+#endif
16665+
16666 load_LDT_nolock(&next->context);
16667+
16668+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16669+ if (!(__supported_pte_mask & _PAGE_NX))
16670+ cpu_set(cpu, next->context.cpu_user_cs_mask);
16671+#endif
16672+
16673+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16674+#ifdef CONFIG_PAX_PAGEEXEC
16675+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
16676+#endif
16677+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16678+#endif
16679+
16680 }
16681+#endif
16682 }
16683-#endif
16684 }
16685
16686 #define activate_mm(prev, next) \
16687diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
16688index e3b7819..b257c64 100644
16689--- a/arch/x86/include/asm/module.h
16690+++ b/arch/x86/include/asm/module.h
16691@@ -5,6 +5,7 @@
16692
16693 #ifdef CONFIG_X86_64
16694 /* X86_64 does not define MODULE_PROC_FAMILY */
16695+#define MODULE_PROC_FAMILY ""
16696 #elif defined CONFIG_M486
16697 #define MODULE_PROC_FAMILY "486 "
16698 #elif defined CONFIG_M586
16699@@ -57,8 +58,20 @@
16700 #error unknown processor family
16701 #endif
16702
16703-#ifdef CONFIG_X86_32
16704-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
16705+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
16706+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
16707+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
16708+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
16709+#else
16710+#define MODULE_PAX_KERNEXEC ""
16711 #endif
16712
16713+#ifdef CONFIG_PAX_MEMORY_UDEREF
16714+#define MODULE_PAX_UDEREF "UDEREF "
16715+#else
16716+#define MODULE_PAX_UDEREF ""
16717+#endif
16718+
16719+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
16720+
16721 #endif /* _ASM_X86_MODULE_H */
16722diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
16723index 86f9301..b365cda 100644
16724--- a/arch/x86/include/asm/nmi.h
16725+++ b/arch/x86/include/asm/nmi.h
16726@@ -40,11 +40,11 @@ struct nmiaction {
16727 nmi_handler_t handler;
16728 unsigned long flags;
16729 const char *name;
16730-};
16731+} __do_const;
16732
16733 #define register_nmi_handler(t, fn, fg, n, init...) \
16734 ({ \
16735- static struct nmiaction init fn##_na = { \
16736+ static const struct nmiaction init fn##_na = { \
16737 .handler = (fn), \
16738 .name = (n), \
16739 .flags = (fg), \
16740@@ -52,7 +52,7 @@ struct nmiaction {
16741 __register_nmi_handler((t), &fn##_na); \
16742 })
16743
16744-int __register_nmi_handler(unsigned int, struct nmiaction *);
16745+int __register_nmi_handler(unsigned int, const struct nmiaction *);
16746
16747 void unregister_nmi_handler(unsigned int, const char *);
16748
16749diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
16750index c878924..21f4889 100644
16751--- a/arch/x86/include/asm/page.h
16752+++ b/arch/x86/include/asm/page.h
16753@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
16754 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
16755
16756 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
16757+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
16758
16759 #define __boot_va(x) __va(x)
16760 #define __boot_pa(x) __pa(x)
16761diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
16762index 0f1ddee..e2fc3d1 100644
16763--- a/arch/x86/include/asm/page_64.h
16764+++ b/arch/x86/include/asm/page_64.h
16765@@ -7,9 +7,9 @@
16766
16767 /* duplicated to the one in bootmem.h */
16768 extern unsigned long max_pfn;
16769-extern unsigned long phys_base;
16770+extern const unsigned long phys_base;
16771
16772-static inline unsigned long __phys_addr_nodebug(unsigned long x)
16773+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
16774 {
16775 unsigned long y = x - __START_KERNEL_map;
16776
16777diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
16778index 401f350..dee5d13 100644
16779--- a/arch/x86/include/asm/paravirt.h
16780+++ b/arch/x86/include/asm/paravirt.h
16781@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
16782 return (pmd_t) { ret };
16783 }
16784
16785-static inline pmdval_t pmd_val(pmd_t pmd)
16786+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
16787 {
16788 pmdval_t ret;
16789
16790@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
16791 val);
16792 }
16793
16794+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16795+{
16796+ pgdval_t val = native_pgd_val(pgd);
16797+
16798+ if (sizeof(pgdval_t) > sizeof(long))
16799+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
16800+ val, (u64)val >> 32);
16801+ else
16802+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
16803+ val);
16804+}
16805+
16806 static inline void pgd_clear(pgd_t *pgdp)
16807 {
16808 set_pgd(pgdp, __pgd(0));
16809@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
16810 pv_mmu_ops.set_fixmap(idx, phys, flags);
16811 }
16812
16813+#ifdef CONFIG_PAX_KERNEXEC
16814+static inline unsigned long pax_open_kernel(void)
16815+{
16816+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
16817+}
16818+
16819+static inline unsigned long pax_close_kernel(void)
16820+{
16821+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
16822+}
16823+#else
16824+static inline unsigned long pax_open_kernel(void) { return 0; }
16825+static inline unsigned long pax_close_kernel(void) { return 0; }
16826+#endif
16827+
16828 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
16829
16830 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
16831@@ -906,7 +933,7 @@ extern void default_banner(void);
16832
16833 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
16834 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
16835-#define PARA_INDIRECT(addr) *%cs:addr
16836+#define PARA_INDIRECT(addr) *%ss:addr
16837 #endif
16838
16839 #define INTERRUPT_RETURN \
16840@@ -981,6 +1008,21 @@ extern void default_banner(void);
16841 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16842 CLBR_NONE, \
16843 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16844+
16845+#define GET_CR0_INTO_RDI \
16846+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16847+ mov %rax,%rdi
16848+
16849+#define SET_RDI_INTO_CR0 \
16850+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16851+
16852+#define GET_CR3_INTO_RDI \
16853+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16854+ mov %rax,%rdi
16855+
16856+#define SET_RDI_INTO_CR3 \
16857+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16858+
16859 #endif /* CONFIG_X86_32 */
16860
16861 #endif /* __ASSEMBLY__ */
16862diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16863index aab8f67..0fb0ee4 100644
16864--- a/arch/x86/include/asm/paravirt_types.h
16865+++ b/arch/x86/include/asm/paravirt_types.h
16866@@ -84,7 +84,7 @@ struct pv_init_ops {
16867 */
16868 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16869 unsigned long addr, unsigned len);
16870-};
16871+} __no_const __no_randomize_layout;
16872
16873
16874 struct pv_lazy_ops {
16875@@ -92,13 +92,13 @@ struct pv_lazy_ops {
16876 void (*enter)(void);
16877 void (*leave)(void);
16878 void (*flush)(void);
16879-};
16880+} __no_randomize_layout;
16881
16882 struct pv_time_ops {
16883 unsigned long long (*sched_clock)(void);
16884 unsigned long long (*steal_clock)(int cpu);
16885 unsigned long (*get_tsc_khz)(void);
16886-};
16887+} __no_const __no_randomize_layout;
16888
16889 struct pv_cpu_ops {
16890 /* hooks for various privileged instructions */
16891@@ -192,7 +192,7 @@ struct pv_cpu_ops {
16892
16893 void (*start_context_switch)(struct task_struct *prev);
16894 void (*end_context_switch)(struct task_struct *next);
16895-};
16896+} __no_const __no_randomize_layout;
16897
16898 struct pv_irq_ops {
16899 /*
16900@@ -215,7 +215,7 @@ struct pv_irq_ops {
16901 #ifdef CONFIG_X86_64
16902 void (*adjust_exception_frame)(void);
16903 #endif
16904-};
16905+} __no_randomize_layout;
16906
16907 struct pv_apic_ops {
16908 #ifdef CONFIG_X86_LOCAL_APIC
16909@@ -223,7 +223,7 @@ struct pv_apic_ops {
16910 unsigned long start_eip,
16911 unsigned long start_esp);
16912 #endif
16913-};
16914+} __no_const __no_randomize_layout;
16915
16916 struct pv_mmu_ops {
16917 unsigned long (*read_cr2)(void);
16918@@ -313,6 +313,7 @@ struct pv_mmu_ops {
16919 struct paravirt_callee_save make_pud;
16920
16921 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16922+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16923 #endif /* PAGETABLE_LEVELS == 4 */
16924 #endif /* PAGETABLE_LEVELS >= 3 */
16925
16926@@ -324,7 +325,13 @@ struct pv_mmu_ops {
16927 an mfn. We can tell which is which from the index. */
16928 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16929 phys_addr_t phys, pgprot_t flags);
16930-};
16931+
16932+#ifdef CONFIG_PAX_KERNEXEC
16933+ unsigned long (*pax_open_kernel)(void);
16934+ unsigned long (*pax_close_kernel)(void);
16935+#endif
16936+
16937+} __no_randomize_layout;
16938
16939 struct arch_spinlock;
16940 #ifdef CONFIG_SMP
16941@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
16942 struct pv_lock_ops {
16943 struct paravirt_callee_save lock_spinning;
16944 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
16945-};
16946+} __no_randomize_layout;
16947
16948 /* This contains all the paravirt structures: we get a convenient
16949 * number for each function using the offset which we use to indicate
16950- * what to patch. */
16951+ * what to patch.
16952+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
16953+ */
16954+
16955 struct paravirt_patch_template {
16956 struct pv_init_ops pv_init_ops;
16957 struct pv_time_ops pv_time_ops;
16958@@ -349,7 +359,7 @@ struct paravirt_patch_template {
16959 struct pv_apic_ops pv_apic_ops;
16960 struct pv_mmu_ops pv_mmu_ops;
16961 struct pv_lock_ops pv_lock_ops;
16962-};
16963+} __no_randomize_layout;
16964
16965 extern struct pv_info pv_info;
16966 extern struct pv_init_ops pv_init_ops;
16967diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16968index c4412e9..90e88c5 100644
16969--- a/arch/x86/include/asm/pgalloc.h
16970+++ b/arch/x86/include/asm/pgalloc.h
16971@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16972 pmd_t *pmd, pte_t *pte)
16973 {
16974 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16975+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16976+}
16977+
16978+static inline void pmd_populate_user(struct mm_struct *mm,
16979+ pmd_t *pmd, pte_t *pte)
16980+{
16981+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16982 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16983 }
16984
16985@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16986
16987 #ifdef CONFIG_X86_PAE
16988 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16989+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16990+{
16991+ pud_populate(mm, pudp, pmd);
16992+}
16993 #else /* !CONFIG_X86_PAE */
16994 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16995 {
16996 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16997 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16998 }
16999+
17000+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17001+{
17002+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17003+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17004+}
17005 #endif /* CONFIG_X86_PAE */
17006
17007 #if PAGETABLE_LEVELS > 3
17008@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17009 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17010 }
17011
17012+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17013+{
17014+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17015+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17016+}
17017+
17018 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17019 {
17020 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17021diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17022index 3bf2dd0..23d2a9f 100644
17023--- a/arch/x86/include/asm/pgtable-2level.h
17024+++ b/arch/x86/include/asm/pgtable-2level.h
17025@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17026
17027 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17028 {
17029+ pax_open_kernel();
17030 *pmdp = pmd;
17031+ pax_close_kernel();
17032 }
17033
17034 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17035diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17036index 81bb91b..9392125 100644
17037--- a/arch/x86/include/asm/pgtable-3level.h
17038+++ b/arch/x86/include/asm/pgtable-3level.h
17039@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17040
17041 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17042 {
17043+ pax_open_kernel();
17044 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17045+ pax_close_kernel();
17046 }
17047
17048 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17049 {
17050+ pax_open_kernel();
17051 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17052+ pax_close_kernel();
17053 }
17054
17055 /*
17056diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17057index bbc8b12..f228861 100644
17058--- a/arch/x86/include/asm/pgtable.h
17059+++ b/arch/x86/include/asm/pgtable.h
17060@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17061
17062 #ifndef __PAGETABLE_PUD_FOLDED
17063 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17064+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17065 #define pgd_clear(pgd) native_pgd_clear(pgd)
17066 #endif
17067
17068@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17069
17070 #define arch_end_context_switch(prev) do {} while(0)
17071
17072+#define pax_open_kernel() native_pax_open_kernel()
17073+#define pax_close_kernel() native_pax_close_kernel()
17074 #endif /* CONFIG_PARAVIRT */
17075
17076+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17077+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17078+
17079+#ifdef CONFIG_PAX_KERNEXEC
17080+static inline unsigned long native_pax_open_kernel(void)
17081+{
17082+ unsigned long cr0;
17083+
17084+ preempt_disable();
17085+ barrier();
17086+ cr0 = read_cr0() ^ X86_CR0_WP;
17087+ BUG_ON(cr0 & X86_CR0_WP);
17088+ write_cr0(cr0);
17089+ return cr0 ^ X86_CR0_WP;
17090+}
17091+
17092+static inline unsigned long native_pax_close_kernel(void)
17093+{
17094+ unsigned long cr0;
17095+
17096+ cr0 = read_cr0() ^ X86_CR0_WP;
17097+ BUG_ON(!(cr0 & X86_CR0_WP));
17098+ write_cr0(cr0);
17099+ barrier();
17100+ preempt_enable_no_resched();
17101+ return cr0 ^ X86_CR0_WP;
17102+}
17103+#else
17104+static inline unsigned long native_pax_open_kernel(void) { return 0; }
17105+static inline unsigned long native_pax_close_kernel(void) { return 0; }
17106+#endif
17107+
17108 /*
17109 * The following only work if pte_present() is true.
17110 * Undefined behaviour if not..
17111 */
17112+static inline int pte_user(pte_t pte)
17113+{
17114+ return pte_val(pte) & _PAGE_USER;
17115+}
17116+
17117 static inline int pte_dirty(pte_t pte)
17118 {
17119 return pte_flags(pte) & _PAGE_DIRTY;
17120@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17121 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17122 }
17123
17124+static inline unsigned long pgd_pfn(pgd_t pgd)
17125+{
17126+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17127+}
17128+
17129 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17130
17131 static inline int pmd_large(pmd_t pte)
17132@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17133 return pte_clear_flags(pte, _PAGE_RW);
17134 }
17135
17136+static inline pte_t pte_mkread(pte_t pte)
17137+{
17138+ return __pte(pte_val(pte) | _PAGE_USER);
17139+}
17140+
17141 static inline pte_t pte_mkexec(pte_t pte)
17142 {
17143- return pte_clear_flags(pte, _PAGE_NX);
17144+#ifdef CONFIG_X86_PAE
17145+ if (__supported_pte_mask & _PAGE_NX)
17146+ return pte_clear_flags(pte, _PAGE_NX);
17147+ else
17148+#endif
17149+ return pte_set_flags(pte, _PAGE_USER);
17150+}
17151+
17152+static inline pte_t pte_exprotect(pte_t pte)
17153+{
17154+#ifdef CONFIG_X86_PAE
17155+ if (__supported_pte_mask & _PAGE_NX)
17156+ return pte_set_flags(pte, _PAGE_NX);
17157+ else
17158+#endif
17159+ return pte_clear_flags(pte, _PAGE_USER);
17160 }
17161
17162 static inline pte_t pte_mkdirty(pte_t pte)
17163@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
17164 #endif
17165
17166 #ifndef __ASSEMBLY__
17167+
17168+#ifdef CONFIG_PAX_PER_CPU_PGD
17169+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
17170+enum cpu_pgd_type {kernel = 0, user = 1};
17171+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
17172+{
17173+ return cpu_pgd[cpu][type];
17174+}
17175+#endif
17176+
17177 #include <linux/mm_types.h>
17178 #include <linux/mmdebug.h>
17179 #include <linux/log2.h>
17180@@ -570,7 +645,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
17181 * Currently stuck as a macro due to indirect forward reference to
17182 * linux/mmzone.h's __section_mem_map_addr() definition:
17183 */
17184-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
17185+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
17186
17187 /* Find an entry in the second-level page table.. */
17188 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
17189@@ -610,7 +685,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
17190 * Currently stuck as a macro due to indirect forward reference to
17191 * linux/mmzone.h's __section_mem_map_addr() definition:
17192 */
17193-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
17194+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
17195
17196 /* to find an entry in a page-table-directory. */
17197 static inline unsigned long pud_index(unsigned long address)
17198@@ -625,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
17199
17200 static inline int pgd_bad(pgd_t pgd)
17201 {
17202- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
17203+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
17204 }
17205
17206 static inline int pgd_none(pgd_t pgd)
17207@@ -648,7 +723,12 @@ static inline int pgd_none(pgd_t pgd)
17208 * pgd_offset() returns a (pgd_t *)
17209 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
17210 */
17211-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
17212+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
17213+
17214+#ifdef CONFIG_PAX_PER_CPU_PGD
17215+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
17216+#endif
17217+
17218 /*
17219 * a shortcut which implies the use of the kernel's pgd, instead
17220 * of a process's
17221@@ -659,6 +739,23 @@ static inline int pgd_none(pgd_t pgd)
17222 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
17223 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
17224
17225+#ifdef CONFIG_X86_32
17226+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
17227+#else
17228+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
17229+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
17230+
17231+#ifdef CONFIG_PAX_MEMORY_UDEREF
17232+#ifdef __ASSEMBLY__
17233+#define pax_user_shadow_base pax_user_shadow_base(%rip)
17234+#else
17235+extern unsigned long pax_user_shadow_base;
17236+extern pgdval_t clone_pgd_mask;
17237+#endif
17238+#endif
17239+
17240+#endif
17241+
17242 #ifndef __ASSEMBLY__
17243
17244 extern int direct_gbpages;
17245@@ -825,11 +922,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
17246 * dst and src can be on the same page, but the range must not overlap,
17247 * and must not cross a page boundary.
17248 */
17249-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
17250+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
17251 {
17252- memcpy(dst, src, count * sizeof(pgd_t));
17253+ pax_open_kernel();
17254+ while (count--)
17255+ *dst++ = *src++;
17256+ pax_close_kernel();
17257 }
17258
17259+#ifdef CONFIG_PAX_PER_CPU_PGD
17260+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
17261+#endif
17262+
17263+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17264+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
17265+#else
17266+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
17267+#endif
17268+
17269 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
17270 static inline int page_level_shift(enum pg_level level)
17271 {
17272diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
17273index 9ee3221..b979c6b 100644
17274--- a/arch/x86/include/asm/pgtable_32.h
17275+++ b/arch/x86/include/asm/pgtable_32.h
17276@@ -25,9 +25,6 @@
17277 struct mm_struct;
17278 struct vm_area_struct;
17279
17280-extern pgd_t swapper_pg_dir[1024];
17281-extern pgd_t initial_page_table[1024];
17282-
17283 static inline void pgtable_cache_init(void) { }
17284 static inline void check_pgt_cache(void) { }
17285 void paging_init(void);
17286@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17287 # include <asm/pgtable-2level.h>
17288 #endif
17289
17290+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
17291+extern pgd_t initial_page_table[PTRS_PER_PGD];
17292+#ifdef CONFIG_X86_PAE
17293+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
17294+#endif
17295+
17296 #if defined(CONFIG_HIGHPTE)
17297 #define pte_offset_map(dir, address) \
17298 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
17299@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17300 /* Clear a kernel PTE and flush it from the TLB */
17301 #define kpte_clear_flush(ptep, vaddr) \
17302 do { \
17303+ pax_open_kernel(); \
17304 pte_clear(&init_mm, (vaddr), (ptep)); \
17305+ pax_close_kernel(); \
17306 __flush_tlb_one((vaddr)); \
17307 } while (0)
17308
17309 #endif /* !__ASSEMBLY__ */
17310
17311+#define HAVE_ARCH_UNMAPPED_AREA
17312+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17313+
17314 /*
17315 * kern_addr_valid() is (1) for FLATMEM and (0) for
17316 * SPARSEMEM and DISCONTIGMEM
17317diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
17318index ed5903b..c7fe163 100644
17319--- a/arch/x86/include/asm/pgtable_32_types.h
17320+++ b/arch/x86/include/asm/pgtable_32_types.h
17321@@ -8,7 +8,7 @@
17322 */
17323 #ifdef CONFIG_X86_PAE
17324 # include <asm/pgtable-3level_types.h>
17325-# define PMD_SIZE (1UL << PMD_SHIFT)
17326+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
17327 # define PMD_MASK (~(PMD_SIZE - 1))
17328 #else
17329 # include <asm/pgtable-2level_types.h>
17330@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
17331 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
17332 #endif
17333
17334+#ifdef CONFIG_PAX_KERNEXEC
17335+#ifndef __ASSEMBLY__
17336+extern unsigned char MODULES_EXEC_VADDR[];
17337+extern unsigned char MODULES_EXEC_END[];
17338+#endif
17339+#include <asm/boot.h>
17340+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
17341+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
17342+#else
17343+#define ktla_ktva(addr) (addr)
17344+#define ktva_ktla(addr) (addr)
17345+#endif
17346+
17347 #define MODULES_VADDR VMALLOC_START
17348 #define MODULES_END VMALLOC_END
17349 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
17350diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
17351index e22c1db..23a625a 100644
17352--- a/arch/x86/include/asm/pgtable_64.h
17353+++ b/arch/x86/include/asm/pgtable_64.h
17354@@ -16,10 +16,14 @@
17355
17356 extern pud_t level3_kernel_pgt[512];
17357 extern pud_t level3_ident_pgt[512];
17358+extern pud_t level3_vmalloc_start_pgt[512];
17359+extern pud_t level3_vmalloc_end_pgt[512];
17360+extern pud_t level3_vmemmap_pgt[512];
17361+extern pud_t level2_vmemmap_pgt[512];
17362 extern pmd_t level2_kernel_pgt[512];
17363 extern pmd_t level2_fixmap_pgt[512];
17364-extern pmd_t level2_ident_pgt[512];
17365-extern pgd_t init_level4_pgt[];
17366+extern pmd_t level2_ident_pgt[512*2];
17367+extern pgd_t init_level4_pgt[512];
17368
17369 #define swapper_pg_dir init_level4_pgt
17370
17371@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17372
17373 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17374 {
17375+ pax_open_kernel();
17376 *pmdp = pmd;
17377+ pax_close_kernel();
17378 }
17379
17380 static inline void native_pmd_clear(pmd_t *pmd)
17381@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
17382
17383 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17384 {
17385+ pax_open_kernel();
17386 *pudp = pud;
17387+ pax_close_kernel();
17388 }
17389
17390 static inline void native_pud_clear(pud_t *pud)
17391@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
17392
17393 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
17394 {
17395+ pax_open_kernel();
17396+ *pgdp = pgd;
17397+ pax_close_kernel();
17398+}
17399+
17400+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17401+{
17402 *pgdp = pgd;
17403 }
17404
17405diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
17406index 2d88344..4679fc3 100644
17407--- a/arch/x86/include/asm/pgtable_64_types.h
17408+++ b/arch/x86/include/asm/pgtable_64_types.h
17409@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
17410 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
17411 #define MODULES_END _AC(0xffffffffff000000, UL)
17412 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
17413+#define MODULES_EXEC_VADDR MODULES_VADDR
17414+#define MODULES_EXEC_END MODULES_END
17415+
17416+#define ktla_ktva(addr) (addr)
17417+#define ktva_ktla(addr) (addr)
17418
17419 #define EARLY_DYNAMIC_PAGE_TABLES 64
17420
17421diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
17422index 0ecac25..7a15e09 100644
17423--- a/arch/x86/include/asm/pgtable_types.h
17424+++ b/arch/x86/include/asm/pgtable_types.h
17425@@ -16,13 +16,12 @@
17426 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17427 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17428 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17429-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17430+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
17431 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
17432 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
17433 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
17434-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
17435-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
17436-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
17437+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
17438+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
17439 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
17440
17441 /* If _PAGE_BIT_PRESENT is clear, we use these: */
17442@@ -40,7 +39,6 @@
17443 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
17444 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
17445 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
17446-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
17447 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
17448 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
17449 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
17450@@ -87,8 +85,10 @@
17451
17452 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
17453 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
17454-#else
17455+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
17456 #define _PAGE_NX (_AT(pteval_t, 0))
17457+#else
17458+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
17459 #endif
17460
17461 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
17462@@ -146,6 +146,9 @@
17463 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
17464 _PAGE_ACCESSED)
17465
17466+#define PAGE_READONLY_NOEXEC PAGE_READONLY
17467+#define PAGE_SHARED_NOEXEC PAGE_SHARED
17468+
17469 #define __PAGE_KERNEL_EXEC \
17470 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
17471 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
17472@@ -156,7 +159,7 @@
17473 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
17474 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
17475 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
17476-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
17477+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
17478 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
17479 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
17480 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
17481@@ -218,8 +221,8 @@
17482 * bits are combined, this will alow user to access the high address mapped
17483 * VDSO in the presence of CONFIG_COMPAT_VDSO
17484 */
17485-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
17486-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
17487+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17488+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17489 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
17490 #endif
17491
17492@@ -257,7 +260,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
17493 {
17494 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
17495 }
17496+#endif
17497
17498+#if PAGETABLE_LEVELS == 3
17499+#include <asm-generic/pgtable-nopud.h>
17500+#endif
17501+
17502+#if PAGETABLE_LEVELS == 2
17503+#include <asm-generic/pgtable-nopmd.h>
17504+#endif
17505+
17506+#ifndef __ASSEMBLY__
17507 #if PAGETABLE_LEVELS > 3
17508 typedef struct { pudval_t pud; } pud_t;
17509
17510@@ -271,8 +284,6 @@ static inline pudval_t native_pud_val(pud_t pud)
17511 return pud.pud;
17512 }
17513 #else
17514-#include <asm-generic/pgtable-nopud.h>
17515-
17516 static inline pudval_t native_pud_val(pud_t pud)
17517 {
17518 return native_pgd_val(pud.pgd);
17519@@ -292,8 +303,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
17520 return pmd.pmd;
17521 }
17522 #else
17523-#include <asm-generic/pgtable-nopmd.h>
17524-
17525 static inline pmdval_t native_pmd_val(pmd_t pmd)
17526 {
17527 return native_pgd_val(pmd.pud.pgd);
17528@@ -333,7 +342,6 @@ typedef struct page *pgtable_t;
17529
17530 extern pteval_t __supported_pte_mask;
17531 extern void set_nx(void);
17532-extern int nx_enabled;
17533
17534 #define pgprot_writecombine pgprot_writecombine
17535 extern pgprot_t pgprot_writecombine(pgprot_t prot);
17536diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
17537index c8b0519..fd29e73 100644
17538--- a/arch/x86/include/asm/preempt.h
17539+++ b/arch/x86/include/asm/preempt.h
17540@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
17541 */
17542 static __always_inline bool __preempt_count_dec_and_test(void)
17543 {
17544- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
17545+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
17546 }
17547
17548 /*
17549diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
17550index 7b034a4..4fe3e3f 100644
17551--- a/arch/x86/include/asm/processor.h
17552+++ b/arch/x86/include/asm/processor.h
17553@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
17554 /* Index into per_cpu list: */
17555 u16 cpu_index;
17556 u32 microcode;
17557-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
17558+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
17559
17560 #define X86_VENDOR_INTEL 0
17561 #define X86_VENDOR_CYRIX 1
17562@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
17563 : "memory");
17564 }
17565
17566+/* invpcid (%rdx),%rax */
17567+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
17568+
17569+#define INVPCID_SINGLE_ADDRESS 0UL
17570+#define INVPCID_SINGLE_CONTEXT 1UL
17571+#define INVPCID_ALL_GLOBAL 2UL
17572+#define INVPCID_ALL_MONGLOBAL 3UL
17573+
17574+#define PCID_KERNEL 0UL
17575+#define PCID_USER 1UL
17576+#define PCID_NOFLUSH (1UL << 63)
17577+
17578 static inline void load_cr3(pgd_t *pgdir)
17579 {
17580- write_cr3(__pa(pgdir));
17581+ write_cr3(__pa(pgdir) | PCID_KERNEL);
17582 }
17583
17584 #ifdef CONFIG_X86_32
17585@@ -283,7 +295,7 @@ struct tss_struct {
17586
17587 } ____cacheline_aligned;
17588
17589-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
17590+extern struct tss_struct init_tss[NR_CPUS];
17591
17592 /*
17593 * Save the original ist values for checking stack pointers during debugging
17594@@ -453,6 +465,7 @@ struct thread_struct {
17595 unsigned short ds;
17596 unsigned short fsindex;
17597 unsigned short gsindex;
17598+ unsigned short ss;
17599 #endif
17600 #ifdef CONFIG_X86_32
17601 unsigned long ip;
17602@@ -562,29 +575,8 @@ static inline void load_sp0(struct tss_struct *tss,
17603 extern unsigned long mmu_cr4_features;
17604 extern u32 *trampoline_cr4_features;
17605
17606-static inline void set_in_cr4(unsigned long mask)
17607-{
17608- unsigned long cr4;
17609-
17610- mmu_cr4_features |= mask;
17611- if (trampoline_cr4_features)
17612- *trampoline_cr4_features = mmu_cr4_features;
17613- cr4 = read_cr4();
17614- cr4 |= mask;
17615- write_cr4(cr4);
17616-}
17617-
17618-static inline void clear_in_cr4(unsigned long mask)
17619-{
17620- unsigned long cr4;
17621-
17622- mmu_cr4_features &= ~mask;
17623- if (trampoline_cr4_features)
17624- *trampoline_cr4_features = mmu_cr4_features;
17625- cr4 = read_cr4();
17626- cr4 &= ~mask;
17627- write_cr4(cr4);
17628-}
17629+extern void set_in_cr4(unsigned long mask);
17630+extern void clear_in_cr4(unsigned long mask);
17631
17632 typedef struct {
17633 unsigned long seg;
17634@@ -833,11 +825,18 @@ static inline void spin_lock_prefetch(const void *x)
17635 */
17636 #define TASK_SIZE PAGE_OFFSET
17637 #define TASK_SIZE_MAX TASK_SIZE
17638+
17639+#ifdef CONFIG_PAX_SEGMEXEC
17640+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
17641+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
17642+#else
17643 #define STACK_TOP TASK_SIZE
17644-#define STACK_TOP_MAX STACK_TOP
17645+#endif
17646+
17647+#define STACK_TOP_MAX TASK_SIZE
17648
17649 #define INIT_THREAD { \
17650- .sp0 = sizeof(init_stack) + (long)&init_stack, \
17651+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17652 .vm86_info = NULL, \
17653 .sysenter_cs = __KERNEL_CS, \
17654 .io_bitmap_ptr = NULL, \
17655@@ -851,7 +850,7 @@ static inline void spin_lock_prefetch(const void *x)
17656 */
17657 #define INIT_TSS { \
17658 .x86_tss = { \
17659- .sp0 = sizeof(init_stack) + (long)&init_stack, \
17660+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17661 .ss0 = __KERNEL_DS, \
17662 .ss1 = __KERNEL_CS, \
17663 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
17664@@ -862,11 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
17665 extern unsigned long thread_saved_pc(struct task_struct *tsk);
17666
17667 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
17668-#define KSTK_TOP(info) \
17669-({ \
17670- unsigned long *__ptr = (unsigned long *)(info); \
17671- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
17672-})
17673+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
17674
17675 /*
17676 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
17677@@ -881,7 +876,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17678 #define task_pt_regs(task) \
17679 ({ \
17680 struct pt_regs *__regs__; \
17681- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
17682+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
17683 __regs__ - 1; \
17684 })
17685
17686@@ -891,13 +886,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17687 /*
17688 * User space process size. 47bits minus one guard page.
17689 */
17690-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
17691+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
17692
17693 /* This decides where the kernel will search for a free chunk of vm
17694 * space during mmap's.
17695 */
17696 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
17697- 0xc0000000 : 0xFFFFe000)
17698+ 0xc0000000 : 0xFFFFf000)
17699
17700 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
17701 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
17702@@ -908,11 +903,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17703 #define STACK_TOP_MAX TASK_SIZE_MAX
17704
17705 #define INIT_THREAD { \
17706- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17707+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17708 }
17709
17710 #define INIT_TSS { \
17711- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17712+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17713 }
17714
17715 /*
17716@@ -940,6 +935,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
17717 */
17718 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
17719
17720+#ifdef CONFIG_PAX_SEGMEXEC
17721+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
17722+#endif
17723+
17724 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
17725
17726 /* Get/set a process' ability to use the timestamp counter instruction */
17727@@ -966,7 +965,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
17728 return 0;
17729 }
17730
17731-extern unsigned long arch_align_stack(unsigned long sp);
17732+#define arch_align_stack(x) ((x) & ~0xfUL)
17733 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
17734
17735 void default_idle(void);
17736@@ -976,6 +975,6 @@ bool xen_set_default_idle(void);
17737 #define xen_set_default_idle 0
17738 #endif
17739
17740-void stop_this_cpu(void *dummy);
17741+void stop_this_cpu(void *dummy) __noreturn;
17742 void df_debug(struct pt_regs *regs, long error_code);
17743 #endif /* _ASM_X86_PROCESSOR_H */
17744diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
17745index 942a086..6c26446 100644
17746--- a/arch/x86/include/asm/ptrace.h
17747+++ b/arch/x86/include/asm/ptrace.h
17748@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
17749 }
17750
17751 /*
17752- * user_mode_vm(regs) determines whether a register set came from user mode.
17753+ * user_mode(regs) determines whether a register set came from user mode.
17754 * This is true if V8086 mode was enabled OR if the register set was from
17755 * protected mode with RPL-3 CS value. This tricky test checks that with
17756 * one comparison. Many places in the kernel can bypass this full check
17757- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
17758+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
17759+ * be used.
17760 */
17761-static inline int user_mode(struct pt_regs *regs)
17762+static inline int user_mode_novm(struct pt_regs *regs)
17763 {
17764 #ifdef CONFIG_X86_32
17765 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
17766 #else
17767- return !!(regs->cs & 3);
17768+ return !!(regs->cs & SEGMENT_RPL_MASK);
17769 #endif
17770 }
17771
17772-static inline int user_mode_vm(struct pt_regs *regs)
17773+static inline int user_mode(struct pt_regs *regs)
17774 {
17775 #ifdef CONFIG_X86_32
17776 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
17777 USER_RPL;
17778 #else
17779- return user_mode(regs);
17780+ return user_mode_novm(regs);
17781 #endif
17782 }
17783
17784@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
17785 #ifdef CONFIG_X86_64
17786 static inline bool user_64bit_mode(struct pt_regs *regs)
17787 {
17788+ unsigned long cs = regs->cs & 0xffff;
17789 #ifndef CONFIG_PARAVIRT
17790 /*
17791 * On non-paravirt systems, this is the only long mode CPL 3
17792 * selector. We do not allow long mode selectors in the LDT.
17793 */
17794- return regs->cs == __USER_CS;
17795+ return cs == __USER_CS;
17796 #else
17797 /* Headers are too twisted for this to go in paravirt.h. */
17798- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
17799+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
17800 #endif
17801 }
17802
17803@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
17804 * Traps from the kernel do not save sp and ss.
17805 * Use the helper function to retrieve sp.
17806 */
17807- if (offset == offsetof(struct pt_regs, sp) &&
17808- regs->cs == __KERNEL_CS)
17809- return kernel_stack_pointer(regs);
17810+ if (offset == offsetof(struct pt_regs, sp)) {
17811+ unsigned long cs = regs->cs & 0xffff;
17812+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
17813+ return kernel_stack_pointer(regs);
17814+ }
17815 #endif
17816 return *(unsigned long *)((unsigned long)regs + offset);
17817 }
17818diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
17819index 9c6b890..5305f53 100644
17820--- a/arch/x86/include/asm/realmode.h
17821+++ b/arch/x86/include/asm/realmode.h
17822@@ -22,16 +22,14 @@ struct real_mode_header {
17823 #endif
17824 /* APM/BIOS reboot */
17825 u32 machine_real_restart_asm;
17826-#ifdef CONFIG_X86_64
17827 u32 machine_real_restart_seg;
17828-#endif
17829 };
17830
17831 /* This must match data at trampoline_32/64.S */
17832 struct trampoline_header {
17833 #ifdef CONFIG_X86_32
17834 u32 start;
17835- u16 gdt_pad;
17836+ u16 boot_cs;
17837 u16 gdt_limit;
17838 u32 gdt_base;
17839 #else
17840diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
17841index a82c4f1..ac45053 100644
17842--- a/arch/x86/include/asm/reboot.h
17843+++ b/arch/x86/include/asm/reboot.h
17844@@ -6,13 +6,13 @@
17845 struct pt_regs;
17846
17847 struct machine_ops {
17848- void (*restart)(char *cmd);
17849- void (*halt)(void);
17850- void (*power_off)(void);
17851+ void (* __noreturn restart)(char *cmd);
17852+ void (* __noreturn halt)(void);
17853+ void (* __noreturn power_off)(void);
17854 void (*shutdown)(void);
17855 void (*crash_shutdown)(struct pt_regs *);
17856- void (*emergency_restart)(void);
17857-};
17858+ void (* __noreturn emergency_restart)(void);
17859+} __no_const;
17860
17861 extern struct machine_ops machine_ops;
17862
17863diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
17864index 8f7866a..e442f20 100644
17865--- a/arch/x86/include/asm/rmwcc.h
17866+++ b/arch/x86/include/asm/rmwcc.h
17867@@ -3,7 +3,34 @@
17868
17869 #ifdef CC_HAVE_ASM_GOTO
17870
17871-#define __GEN_RMWcc(fullop, var, cc, ...) \
17872+#ifdef CONFIG_PAX_REFCOUNT
17873+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17874+do { \
17875+ asm_volatile_goto (fullop \
17876+ ";jno 0f\n" \
17877+ fullantiop \
17878+ ";int $4\n0:\n" \
17879+ _ASM_EXTABLE(0b, 0b) \
17880+ ";j" cc " %l[cc_label]" \
17881+ : : "m" (var), ## __VA_ARGS__ \
17882+ : "memory" : cc_label); \
17883+ return 0; \
17884+cc_label: \
17885+ return 1; \
17886+} while (0)
17887+#else
17888+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17889+do { \
17890+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
17891+ : : "m" (var), ## __VA_ARGS__ \
17892+ : "memory" : cc_label); \
17893+ return 0; \
17894+cc_label: \
17895+ return 1; \
17896+} while (0)
17897+#endif
17898+
17899+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
17900 do { \
17901 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
17902 : : "m" (var), ## __VA_ARGS__ \
17903@@ -13,15 +40,46 @@ cc_label: \
17904 return 1; \
17905 } while (0)
17906
17907-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17908- __GEN_RMWcc(op " " arg0, var, cc)
17909+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
17910+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
17911
17912-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
17913- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
17914+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
17915+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
17916+
17917+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
17918+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
17919+
17920+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
17921+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
17922
17923 #else /* !CC_HAVE_ASM_GOTO */
17924
17925-#define __GEN_RMWcc(fullop, var, cc, ...) \
17926+#ifdef CONFIG_PAX_REFCOUNT
17927+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17928+do { \
17929+ char c; \
17930+ asm volatile (fullop \
17931+ ";jno 0f\n" \
17932+ fullantiop \
17933+ ";int $4\n0:\n" \
17934+ _ASM_EXTABLE(0b, 0b) \
17935+ "; set" cc " %1" \
17936+ : "+m" (var), "=qm" (c) \
17937+ : __VA_ARGS__ : "memory"); \
17938+ return c != 0; \
17939+} while (0)
17940+#else
17941+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17942+do { \
17943+ char c; \
17944+ asm volatile (fullop "; set" cc " %1" \
17945+ : "+m" (var), "=qm" (c) \
17946+ : __VA_ARGS__ : "memory"); \
17947+ return c != 0; \
17948+} while (0)
17949+#endif
17950+
17951+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
17952 do { \
17953 char c; \
17954 asm volatile (fullop "; set" cc " %1" \
17955@@ -30,11 +88,17 @@ do { \
17956 return c != 0; \
17957 } while (0)
17958
17959-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17960- __GEN_RMWcc(op " " arg0, var, cc)
17961+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
17962+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
17963+
17964+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
17965+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
17966+
17967+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
17968+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
17969
17970-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
17971- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
17972+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
17973+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
17974
17975 #endif /* CC_HAVE_ASM_GOTO */
17976
17977diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
17978index cad82c9..2e5c5c1 100644
17979--- a/arch/x86/include/asm/rwsem.h
17980+++ b/arch/x86/include/asm/rwsem.h
17981@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
17982 {
17983 asm volatile("# beginning down_read\n\t"
17984 LOCK_PREFIX _ASM_INC "(%1)\n\t"
17985+
17986+#ifdef CONFIG_PAX_REFCOUNT
17987+ "jno 0f\n"
17988+ LOCK_PREFIX _ASM_DEC "(%1)\n"
17989+ "int $4\n0:\n"
17990+ _ASM_EXTABLE(0b, 0b)
17991+#endif
17992+
17993 /* adds 0x00000001 */
17994 " jns 1f\n"
17995 " call call_rwsem_down_read_failed\n"
17996@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
17997 "1:\n\t"
17998 " mov %1,%2\n\t"
17999 " add %3,%2\n\t"
18000+
18001+#ifdef CONFIG_PAX_REFCOUNT
18002+ "jno 0f\n"
18003+ "sub %3,%2\n"
18004+ "int $4\n0:\n"
18005+ _ASM_EXTABLE(0b, 0b)
18006+#endif
18007+
18008 " jle 2f\n\t"
18009 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18010 " jnz 1b\n\t"
18011@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18012 long tmp;
18013 asm volatile("# beginning down_write\n\t"
18014 LOCK_PREFIX " xadd %1,(%2)\n\t"
18015+
18016+#ifdef CONFIG_PAX_REFCOUNT
18017+ "jno 0f\n"
18018+ "mov %1,(%2)\n"
18019+ "int $4\n0:\n"
18020+ _ASM_EXTABLE(0b, 0b)
18021+#endif
18022+
18023 /* adds 0xffff0001, returns the old value */
18024 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18025 /* was the active mask 0 before? */
18026@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18027 long tmp;
18028 asm volatile("# beginning __up_read\n\t"
18029 LOCK_PREFIX " xadd %1,(%2)\n\t"
18030+
18031+#ifdef CONFIG_PAX_REFCOUNT
18032+ "jno 0f\n"
18033+ "mov %1,(%2)\n"
18034+ "int $4\n0:\n"
18035+ _ASM_EXTABLE(0b, 0b)
18036+#endif
18037+
18038 /* subtracts 1, returns the old value */
18039 " jns 1f\n\t"
18040 " call call_rwsem_wake\n" /* expects old value in %edx */
18041@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18042 long tmp;
18043 asm volatile("# beginning __up_write\n\t"
18044 LOCK_PREFIX " xadd %1,(%2)\n\t"
18045+
18046+#ifdef CONFIG_PAX_REFCOUNT
18047+ "jno 0f\n"
18048+ "mov %1,(%2)\n"
18049+ "int $4\n0:\n"
18050+ _ASM_EXTABLE(0b, 0b)
18051+#endif
18052+
18053 /* subtracts 0xffff0001, returns the old value */
18054 " jns 1f\n\t"
18055 " call call_rwsem_wake\n" /* expects old value in %edx */
18056@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18057 {
18058 asm volatile("# beginning __downgrade_write\n\t"
18059 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18060+
18061+#ifdef CONFIG_PAX_REFCOUNT
18062+ "jno 0f\n"
18063+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18064+ "int $4\n0:\n"
18065+ _ASM_EXTABLE(0b, 0b)
18066+#endif
18067+
18068 /*
18069 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18070 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18071@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18072 */
18073 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18074 {
18075- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18076+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18077+
18078+#ifdef CONFIG_PAX_REFCOUNT
18079+ "jno 0f\n"
18080+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18081+ "int $4\n0:\n"
18082+ _ASM_EXTABLE(0b, 0b)
18083+#endif
18084+
18085 : "+m" (sem->count)
18086 : "er" (delta));
18087 }
18088@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18089 */
18090 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18091 {
18092- return delta + xadd(&sem->count, delta);
18093+ return delta + xadd_check_overflow(&sem->count, delta);
18094 }
18095
18096 #endif /* __KERNEL__ */
18097diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18098index 6f1c3a8..7744f19 100644
18099--- a/arch/x86/include/asm/segment.h
18100+++ b/arch/x86/include/asm/segment.h
18101@@ -64,10 +64,15 @@
18102 * 26 - ESPFIX small SS
18103 * 27 - per-cpu [ offset to per-cpu data area ]
18104 * 28 - stack_canary-20 [ for stack protector ]
18105- * 29 - unused
18106- * 30 - unused
18107+ * 29 - PCI BIOS CS
18108+ * 30 - PCI BIOS DS
18109 * 31 - TSS for double fault handler
18110 */
18111+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18112+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18113+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18114+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18115+
18116 #define GDT_ENTRY_TLS_MIN 6
18117 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18118
18119@@ -79,6 +84,8 @@
18120
18121 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18122
18123+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18124+
18125 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18126
18127 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18128@@ -104,6 +111,12 @@
18129 #define __KERNEL_STACK_CANARY 0
18130 #endif
18131
18132+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18133+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18134+
18135+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
18136+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
18137+
18138 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
18139
18140 /*
18141@@ -141,7 +154,7 @@
18142 */
18143
18144 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
18145-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
18146+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
18147
18148
18149 #else
18150@@ -165,6 +178,8 @@
18151 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
18152 #define __USER32_DS __USER_DS
18153
18154+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
18155+
18156 #define GDT_ENTRY_TSS 8 /* needs two entries */
18157 #define GDT_ENTRY_LDT 10 /* needs two entries */
18158 #define GDT_ENTRY_TLS_MIN 12
18159@@ -173,6 +188,8 @@
18160 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
18161 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
18162
18163+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
18164+
18165 /* TLS indexes for 64bit - hardcoded in arch_prctl */
18166 #define FS_TLS 0
18167 #define GS_TLS 1
18168@@ -180,12 +197,14 @@
18169 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
18170 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
18171
18172-#define GDT_ENTRIES 16
18173+#define GDT_ENTRIES 17
18174
18175 #endif
18176
18177 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
18178+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
18179 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
18180+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
18181 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
18182 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
18183 #ifndef CONFIG_PARAVIRT
18184@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
18185 {
18186 unsigned long __limit;
18187 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
18188- return __limit + 1;
18189+ return __limit;
18190 }
18191
18192 #endif /* !__ASSEMBLY__ */
18193diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
18194index 8d3120f..352b440 100644
18195--- a/arch/x86/include/asm/smap.h
18196+++ b/arch/x86/include/asm/smap.h
18197@@ -25,11 +25,40 @@
18198
18199 #include <asm/alternative-asm.h>
18200
18201+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18202+#define ASM_PAX_OPEN_USERLAND \
18203+ 661: jmp 663f; \
18204+ .pushsection .altinstr_replacement, "a" ; \
18205+ 662: pushq %rax; nop; \
18206+ .popsection ; \
18207+ .pushsection .altinstructions, "a" ; \
18208+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18209+ .popsection ; \
18210+ call __pax_open_userland; \
18211+ popq %rax; \
18212+ 663:
18213+
18214+#define ASM_PAX_CLOSE_USERLAND \
18215+ 661: jmp 663f; \
18216+ .pushsection .altinstr_replacement, "a" ; \
18217+ 662: pushq %rax; nop; \
18218+ .popsection; \
18219+ .pushsection .altinstructions, "a" ; \
18220+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18221+ .popsection; \
18222+ call __pax_close_userland; \
18223+ popq %rax; \
18224+ 663:
18225+#else
18226+#define ASM_PAX_OPEN_USERLAND
18227+#define ASM_PAX_CLOSE_USERLAND
18228+#endif
18229+
18230 #ifdef CONFIG_X86_SMAP
18231
18232 #define ASM_CLAC \
18233 661: ASM_NOP3 ; \
18234- .pushsection .altinstr_replacement, "ax" ; \
18235+ .pushsection .altinstr_replacement, "a" ; \
18236 662: __ASM_CLAC ; \
18237 .popsection ; \
18238 .pushsection .altinstructions, "a" ; \
18239@@ -38,7 +67,7 @@
18240
18241 #define ASM_STAC \
18242 661: ASM_NOP3 ; \
18243- .pushsection .altinstr_replacement, "ax" ; \
18244+ .pushsection .altinstr_replacement, "a" ; \
18245 662: __ASM_STAC ; \
18246 .popsection ; \
18247 .pushsection .altinstructions, "a" ; \
18248@@ -56,6 +85,37 @@
18249
18250 #include <asm/alternative.h>
18251
18252+#define __HAVE_ARCH_PAX_OPEN_USERLAND
18253+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
18254+
18255+extern void __pax_open_userland(void);
18256+static __always_inline unsigned long pax_open_userland(void)
18257+{
18258+
18259+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18260+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
18261+ :
18262+ : [open] "i" (__pax_open_userland)
18263+ : "memory", "rax");
18264+#endif
18265+
18266+ return 0;
18267+}
18268+
18269+extern void __pax_close_userland(void);
18270+static __always_inline unsigned long pax_close_userland(void)
18271+{
18272+
18273+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18274+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
18275+ :
18276+ : [close] "i" (__pax_close_userland)
18277+ : "memory", "rax");
18278+#endif
18279+
18280+ return 0;
18281+}
18282+
18283 #ifdef CONFIG_X86_SMAP
18284
18285 static __always_inline void clac(void)
18286diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
18287index 4137890..03fa172 100644
18288--- a/arch/x86/include/asm/smp.h
18289+++ b/arch/x86/include/asm/smp.h
18290@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
18291 /* cpus sharing the last level cache: */
18292 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
18293 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
18294-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
18295+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
18296
18297 static inline struct cpumask *cpu_sibling_mask(int cpu)
18298 {
18299@@ -79,7 +79,7 @@ struct smp_ops {
18300
18301 void (*send_call_func_ipi)(const struct cpumask *mask);
18302 void (*send_call_func_single_ipi)(int cpu);
18303-};
18304+} __no_const;
18305
18306 /* Globals due to paravirt */
18307 extern void set_cpu_sibling_map(int cpu);
18308@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
18309 extern int safe_smp_processor_id(void);
18310
18311 #elif defined(CONFIG_X86_64_SMP)
18312-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18313-
18314-#define stack_smp_processor_id() \
18315-({ \
18316- struct thread_info *ti; \
18317- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
18318- ti->cpu; \
18319-})
18320+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18321+#define stack_smp_processor_id() raw_smp_processor_id()
18322 #define safe_smp_processor_id() smp_processor_id()
18323
18324 #endif
18325diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
18326index bf156de..1a782ab 100644
18327--- a/arch/x86/include/asm/spinlock.h
18328+++ b/arch/x86/include/asm/spinlock.h
18329@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
18330 static inline void arch_read_lock(arch_rwlock_t *rw)
18331 {
18332 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
18333+
18334+#ifdef CONFIG_PAX_REFCOUNT
18335+ "jno 0f\n"
18336+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
18337+ "int $4\n0:\n"
18338+ _ASM_EXTABLE(0b, 0b)
18339+#endif
18340+
18341 "jns 1f\n"
18342 "call __read_lock_failed\n\t"
18343 "1:\n"
18344@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
18345 static inline void arch_write_lock(arch_rwlock_t *rw)
18346 {
18347 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
18348+
18349+#ifdef CONFIG_PAX_REFCOUNT
18350+ "jno 0f\n"
18351+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
18352+ "int $4\n0:\n"
18353+ _ASM_EXTABLE(0b, 0b)
18354+#endif
18355+
18356 "jz 1f\n"
18357 "call __write_lock_failed\n\t"
18358 "1:\n"
18359@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
18360
18361 static inline void arch_read_unlock(arch_rwlock_t *rw)
18362 {
18363- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
18364+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
18365+
18366+#ifdef CONFIG_PAX_REFCOUNT
18367+ "jno 0f\n"
18368+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
18369+ "int $4\n0:\n"
18370+ _ASM_EXTABLE(0b, 0b)
18371+#endif
18372+
18373 :"+m" (rw->lock) : : "memory");
18374 }
18375
18376 static inline void arch_write_unlock(arch_rwlock_t *rw)
18377 {
18378- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
18379+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
18380+
18381+#ifdef CONFIG_PAX_REFCOUNT
18382+ "jno 0f\n"
18383+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
18384+ "int $4\n0:\n"
18385+ _ASM_EXTABLE(0b, 0b)
18386+#endif
18387+
18388 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
18389 }
18390
18391diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
18392index 6a99859..03cb807 100644
18393--- a/arch/x86/include/asm/stackprotector.h
18394+++ b/arch/x86/include/asm/stackprotector.h
18395@@ -47,7 +47,7 @@
18396 * head_32 for boot CPU and setup_per_cpu_areas() for others.
18397 */
18398 #define GDT_STACK_CANARY_INIT \
18399- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
18400+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
18401
18402 /*
18403 * Initialize the stackprotector canary value.
18404@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
18405
18406 static inline void load_stack_canary_segment(void)
18407 {
18408-#ifdef CONFIG_X86_32
18409+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18410 asm volatile ("mov %0, %%gs" : : "r" (0));
18411 #endif
18412 }
18413diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
18414index 70bbe39..4ae2bd4 100644
18415--- a/arch/x86/include/asm/stacktrace.h
18416+++ b/arch/x86/include/asm/stacktrace.h
18417@@ -11,28 +11,20 @@
18418
18419 extern int kstack_depth_to_print;
18420
18421-struct thread_info;
18422+struct task_struct;
18423 struct stacktrace_ops;
18424
18425-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
18426- unsigned long *stack,
18427- unsigned long bp,
18428- const struct stacktrace_ops *ops,
18429- void *data,
18430- unsigned long *end,
18431- int *graph);
18432+typedef unsigned long walk_stack_t(struct task_struct *task,
18433+ void *stack_start,
18434+ unsigned long *stack,
18435+ unsigned long bp,
18436+ const struct stacktrace_ops *ops,
18437+ void *data,
18438+ unsigned long *end,
18439+ int *graph);
18440
18441-extern unsigned long
18442-print_context_stack(struct thread_info *tinfo,
18443- unsigned long *stack, unsigned long bp,
18444- const struct stacktrace_ops *ops, void *data,
18445- unsigned long *end, int *graph);
18446-
18447-extern unsigned long
18448-print_context_stack_bp(struct thread_info *tinfo,
18449- unsigned long *stack, unsigned long bp,
18450- const struct stacktrace_ops *ops, void *data,
18451- unsigned long *end, int *graph);
18452+extern walk_stack_t print_context_stack;
18453+extern walk_stack_t print_context_stack_bp;
18454
18455 /* Generic stack tracer with callbacks */
18456
18457@@ -40,7 +32,7 @@ struct stacktrace_ops {
18458 void (*address)(void *data, unsigned long address, int reliable);
18459 /* On negative return stop dumping */
18460 int (*stack)(void *data, char *name);
18461- walk_stack_t walk_stack;
18462+ walk_stack_t *walk_stack;
18463 };
18464
18465 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18466diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
18467index d7f3b3b..3cc39f1 100644
18468--- a/arch/x86/include/asm/switch_to.h
18469+++ b/arch/x86/include/asm/switch_to.h
18470@@ -108,7 +108,7 @@ do { \
18471 "call __switch_to\n\t" \
18472 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
18473 __switch_canary \
18474- "movq %P[thread_info](%%rsi),%%r8\n\t" \
18475+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
18476 "movq %%rax,%%rdi\n\t" \
18477 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
18478 "jnz ret_from_fork\n\t" \
18479@@ -119,7 +119,7 @@ do { \
18480 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
18481 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
18482 [_tif_fork] "i" (_TIF_FORK), \
18483- [thread_info] "i" (offsetof(struct task_struct, stack)), \
18484+ [thread_info] "m" (current_tinfo), \
18485 [current_task] "m" (current_task) \
18486 __switch_canary_iparam \
18487 : "memory", "cc" __EXTRA_CLOBBER)
18488diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
18489index 3ba3de4..6c113b2 100644
18490--- a/arch/x86/include/asm/thread_info.h
18491+++ b/arch/x86/include/asm/thread_info.h
18492@@ -10,6 +10,7 @@
18493 #include <linux/compiler.h>
18494 #include <asm/page.h>
18495 #include <asm/types.h>
18496+#include <asm/percpu.h>
18497
18498 /*
18499 * low level task data that entry.S needs immediate access to
18500@@ -23,7 +24,6 @@ struct exec_domain;
18501 #include <linux/atomic.h>
18502
18503 struct thread_info {
18504- struct task_struct *task; /* main task structure */
18505 struct exec_domain *exec_domain; /* execution domain */
18506 __u32 flags; /* low level flags */
18507 __u32 status; /* thread synchronous flags */
18508@@ -32,19 +32,13 @@ struct thread_info {
18509 mm_segment_t addr_limit;
18510 struct restart_block restart_block;
18511 void __user *sysenter_return;
18512-#ifdef CONFIG_X86_32
18513- unsigned long previous_esp; /* ESP of the previous stack in
18514- case of nested (IRQ) stacks
18515- */
18516- __u8 supervisor_stack[0];
18517-#endif
18518+ unsigned long lowest_stack;
18519 unsigned int sig_on_uaccess_error:1;
18520 unsigned int uaccess_err:1; /* uaccess failed */
18521 };
18522
18523-#define INIT_THREAD_INFO(tsk) \
18524+#define INIT_THREAD_INFO \
18525 { \
18526- .task = &tsk, \
18527 .exec_domain = &default_exec_domain, \
18528 .flags = 0, \
18529 .cpu = 0, \
18530@@ -55,7 +49,7 @@ struct thread_info {
18531 }, \
18532 }
18533
18534-#define init_thread_info (init_thread_union.thread_info)
18535+#define init_thread_info (init_thread_union.stack)
18536 #define init_stack (init_thread_union.stack)
18537
18538 #else /* !__ASSEMBLY__ */
18539@@ -95,6 +89,7 @@ struct thread_info {
18540 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
18541 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
18542 #define TIF_X32 30 /* 32-bit native x86-64 binary */
18543+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
18544
18545 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
18546 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
18547@@ -118,17 +113,18 @@ struct thread_info {
18548 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
18549 #define _TIF_ADDR32 (1 << TIF_ADDR32)
18550 #define _TIF_X32 (1 << TIF_X32)
18551+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
18552
18553 /* work to do in syscall_trace_enter() */
18554 #define _TIF_WORK_SYSCALL_ENTRY \
18555 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
18556 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
18557- _TIF_NOHZ)
18558+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
18559
18560 /* work to do in syscall_trace_leave() */
18561 #define _TIF_WORK_SYSCALL_EXIT \
18562 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
18563- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
18564+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
18565
18566 /* work to do on interrupt/exception return */
18567 #define _TIF_WORK_MASK \
18568@@ -139,7 +135,7 @@ struct thread_info {
18569 /* work to do on any return to user space */
18570 #define _TIF_ALLWORK_MASK \
18571 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
18572- _TIF_NOHZ)
18573+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
18574
18575 /* Only used for 64 bit */
18576 #define _TIF_DO_NOTIFY_MASK \
18577@@ -153,45 +149,40 @@ struct thread_info {
18578 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
18579 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
18580
18581-#ifdef CONFIG_X86_32
18582-
18583-#define STACK_WARN (THREAD_SIZE/8)
18584-/*
18585- * macros/functions for gaining access to the thread information structure
18586- *
18587- * preempt_count needs to be 1 initially, until the scheduler is functional.
18588- */
18589-#ifndef __ASSEMBLY__
18590-
18591-
18592-/* how to get the current stack pointer from C */
18593-register unsigned long current_stack_pointer asm("esp") __used;
18594-
18595-/* how to get the thread information struct from C */
18596-static inline struct thread_info *current_thread_info(void)
18597-{
18598- return (struct thread_info *)
18599- (current_stack_pointer & ~(THREAD_SIZE - 1));
18600-}
18601-
18602-#else /* !__ASSEMBLY__ */
18603-
18604+#ifdef __ASSEMBLY__
18605 /* how to get the thread information struct from ASM */
18606 #define GET_THREAD_INFO(reg) \
18607- movl $-THREAD_SIZE, reg; \
18608- andl %esp, reg
18609+ mov PER_CPU_VAR(current_tinfo), reg
18610
18611 /* use this one if reg already contains %esp */
18612-#define GET_THREAD_INFO_WITH_ESP(reg) \
18613- andl $-THREAD_SIZE, reg
18614+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
18615+#else
18616+/* how to get the thread information struct from C */
18617+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
18618+
18619+static __always_inline struct thread_info *current_thread_info(void)
18620+{
18621+ return this_cpu_read_stable(current_tinfo);
18622+}
18623+#endif
18624+
18625+#ifdef CONFIG_X86_32
18626+
18627+#define STACK_WARN (THREAD_SIZE/8)
18628+/*
18629+ * macros/functions for gaining access to the thread information structure
18630+ *
18631+ * preempt_count needs to be 1 initially, until the scheduler is functional.
18632+ */
18633+#ifndef __ASSEMBLY__
18634+
18635+/* how to get the current stack pointer from C */
18636+register unsigned long current_stack_pointer asm("esp") __used;
18637
18638 #endif
18639
18640 #else /* X86_32 */
18641
18642-#include <asm/percpu.h>
18643-#define KERNEL_STACK_OFFSET (5*8)
18644-
18645 /*
18646 * macros/functions for gaining access to the thread information structure
18647 * preempt_count needs to be 1 initially, until the scheduler is functional.
18648@@ -199,27 +190,8 @@ static inline struct thread_info *current_thread_info(void)
18649 #ifndef __ASSEMBLY__
18650 DECLARE_PER_CPU(unsigned long, kernel_stack);
18651
18652-static inline struct thread_info *current_thread_info(void)
18653-{
18654- struct thread_info *ti;
18655- ti = (void *)(this_cpu_read_stable(kernel_stack) +
18656- KERNEL_STACK_OFFSET - THREAD_SIZE);
18657- return ti;
18658-}
18659-
18660-#else /* !__ASSEMBLY__ */
18661-
18662-/* how to get the thread information struct from ASM */
18663-#define GET_THREAD_INFO(reg) \
18664- movq PER_CPU_VAR(kernel_stack),reg ; \
18665- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
18666-
18667-/*
18668- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
18669- * a certain register (to be used in assembler memory operands).
18670- */
18671-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
18672-
18673+/* how to get the current stack pointer from C */
18674+register unsigned long current_stack_pointer asm("rsp") __used;
18675 #endif
18676
18677 #endif /* !X86_32 */
18678@@ -278,5 +250,12 @@ static inline bool is_ia32_task(void)
18679 extern void arch_task_cache_init(void);
18680 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
18681 extern void arch_release_task_struct(struct task_struct *tsk);
18682+
18683+#define __HAVE_THREAD_FUNCTIONS
18684+#define task_thread_info(task) (&(task)->tinfo)
18685+#define task_stack_page(task) ((task)->stack)
18686+#define setup_thread_stack(p, org) do {} while (0)
18687+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
18688+
18689 #endif
18690 #endif /* _ASM_X86_THREAD_INFO_H */
18691diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
18692index e6d90ba..f81f114 100644
18693--- a/arch/x86/include/asm/tlbflush.h
18694+++ b/arch/x86/include/asm/tlbflush.h
18695@@ -17,18 +17,44 @@
18696
18697 static inline void __native_flush_tlb(void)
18698 {
18699+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18700+ u64 descriptor[2];
18701+
18702+ descriptor[0] = PCID_KERNEL;
18703+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
18704+ return;
18705+ }
18706+
18707+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18708+ if (static_cpu_has(X86_FEATURE_PCID)) {
18709+ unsigned int cpu = raw_get_cpu();
18710+
18711+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18712+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18713+ raw_put_cpu_no_resched();
18714+ return;
18715+ }
18716+#endif
18717+
18718 native_write_cr3(native_read_cr3());
18719 }
18720
18721 static inline void __native_flush_tlb_global_irq_disabled(void)
18722 {
18723- unsigned long cr4;
18724+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18725+ u64 descriptor[2];
18726
18727- cr4 = native_read_cr4();
18728- /* clear PGE */
18729- native_write_cr4(cr4 & ~X86_CR4_PGE);
18730- /* write old PGE again and flush TLBs */
18731- native_write_cr4(cr4);
18732+ descriptor[0] = PCID_KERNEL;
18733+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
18734+ } else {
18735+ unsigned long cr4;
18736+
18737+ cr4 = native_read_cr4();
18738+ /* clear PGE */
18739+ native_write_cr4(cr4 & ~X86_CR4_PGE);
18740+ /* write old PGE again and flush TLBs */
18741+ native_write_cr4(cr4);
18742+ }
18743 }
18744
18745 static inline void __native_flush_tlb_global(void)
18746@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
18747
18748 static inline void __native_flush_tlb_single(unsigned long addr)
18749 {
18750+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18751+ u64 descriptor[2];
18752+
18753+ descriptor[0] = PCID_KERNEL;
18754+ descriptor[1] = addr;
18755+
18756+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18757+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
18758+ if (addr < TASK_SIZE_MAX)
18759+ descriptor[1] += pax_user_shadow_base;
18760+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18761+ }
18762+
18763+ descriptor[0] = PCID_USER;
18764+ descriptor[1] = addr;
18765+#endif
18766+
18767+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18768+ return;
18769+ }
18770+
18771+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18772+ if (static_cpu_has(X86_FEATURE_PCID)) {
18773+ unsigned int cpu = raw_get_cpu();
18774+
18775+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
18776+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18777+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18778+ raw_put_cpu_no_resched();
18779+
18780+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
18781+ addr += pax_user_shadow_base;
18782+ }
18783+#endif
18784+
18785 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18786 }
18787
18788diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
18789index 8ec57c0..3ee58c9 100644
18790--- a/arch/x86/include/asm/uaccess.h
18791+++ b/arch/x86/include/asm/uaccess.h
18792@@ -7,6 +7,7 @@
18793 #include <linux/compiler.h>
18794 #include <linux/thread_info.h>
18795 #include <linux/string.h>
18796+#include <linux/spinlock.h>
18797 #include <asm/asm.h>
18798 #include <asm/page.h>
18799 #include <asm/smap.h>
18800@@ -29,7 +30,12 @@
18801
18802 #define get_ds() (KERNEL_DS)
18803 #define get_fs() (current_thread_info()->addr_limit)
18804+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18805+void __set_fs(mm_segment_t x);
18806+void set_fs(mm_segment_t x);
18807+#else
18808 #define set_fs(x) (current_thread_info()->addr_limit = (x))
18809+#endif
18810
18811 #define segment_eq(a, b) ((a).seg == (b).seg)
18812
18813@@ -77,8 +83,34 @@
18814 * checks that the pointer is in the user space range - after calling
18815 * this function, memory access functions may still return -EFAULT.
18816 */
18817-#define access_ok(type, addr, size) \
18818- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18819+extern int _cond_resched(void);
18820+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18821+#define access_ok(type, addr, size) \
18822+({ \
18823+ long __size = size; \
18824+ unsigned long __addr = (unsigned long)addr; \
18825+ unsigned long __addr_ao = __addr & PAGE_MASK; \
18826+ unsigned long __end_ao = __addr + __size - 1; \
18827+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
18828+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
18829+ while(__addr_ao <= __end_ao) { \
18830+ char __c_ao; \
18831+ __addr_ao += PAGE_SIZE; \
18832+ if (__size > PAGE_SIZE) \
18833+ _cond_resched(); \
18834+ if (__get_user(__c_ao, (char __user *)__addr)) \
18835+ break; \
18836+ if (type != VERIFY_WRITE) { \
18837+ __addr = __addr_ao; \
18838+ continue; \
18839+ } \
18840+ if (__put_user(__c_ao, (char __user *)__addr)) \
18841+ break; \
18842+ __addr = __addr_ao; \
18843+ } \
18844+ } \
18845+ __ret_ao; \
18846+})
18847
18848 /*
18849 * The exception table consists of pairs of addresses relative to the
18850@@ -168,10 +200,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18851 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
18852 __chk_user_ptr(ptr); \
18853 might_fault(); \
18854+ pax_open_userland(); \
18855 asm volatile("call __get_user_%P3" \
18856 : "=a" (__ret_gu), "=r" (__val_gu) \
18857 : "0" (ptr), "i" (sizeof(*(ptr)))); \
18858 (x) = (__typeof__(*(ptr))) __val_gu; \
18859+ pax_close_userland(); \
18860 __ret_gu; \
18861 })
18862
18863@@ -179,13 +213,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18864 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
18865 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
18866
18867-
18868+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18869+#define __copyuser_seg "gs;"
18870+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
18871+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
18872+#else
18873+#define __copyuser_seg
18874+#define __COPYUSER_SET_ES
18875+#define __COPYUSER_RESTORE_ES
18876+#endif
18877
18878 #ifdef CONFIG_X86_32
18879 #define __put_user_asm_u64(x, addr, err, errret) \
18880 asm volatile(ASM_STAC "\n" \
18881- "1: movl %%eax,0(%2)\n" \
18882- "2: movl %%edx,4(%2)\n" \
18883+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
18884+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
18885 "3: " ASM_CLAC "\n" \
18886 ".section .fixup,\"ax\"\n" \
18887 "4: movl %3,%0\n" \
18888@@ -198,8 +240,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18889
18890 #define __put_user_asm_ex_u64(x, addr) \
18891 asm volatile(ASM_STAC "\n" \
18892- "1: movl %%eax,0(%1)\n" \
18893- "2: movl %%edx,4(%1)\n" \
18894+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
18895+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
18896 "3: " ASM_CLAC "\n" \
18897 _ASM_EXTABLE_EX(1b, 2b) \
18898 _ASM_EXTABLE_EX(2b, 3b) \
18899@@ -249,7 +291,8 @@ extern void __put_user_8(void);
18900 __typeof__(*(ptr)) __pu_val; \
18901 __chk_user_ptr(ptr); \
18902 might_fault(); \
18903- __pu_val = x; \
18904+ __pu_val = (x); \
18905+ pax_open_userland(); \
18906 switch (sizeof(*(ptr))) { \
18907 case 1: \
18908 __put_user_x(1, __pu_val, ptr, __ret_pu); \
18909@@ -267,6 +310,7 @@ extern void __put_user_8(void);
18910 __put_user_x(X, __pu_val, ptr, __ret_pu); \
18911 break; \
18912 } \
18913+ pax_close_userland(); \
18914 __ret_pu; \
18915 })
18916
18917@@ -347,8 +391,10 @@ do { \
18918 } while (0)
18919
18920 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18921+do { \
18922+ pax_open_userland(); \
18923 asm volatile(ASM_STAC "\n" \
18924- "1: mov"itype" %2,%"rtype"1\n" \
18925+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
18926 "2: " ASM_CLAC "\n" \
18927 ".section .fixup,\"ax\"\n" \
18928 "3: mov %3,%0\n" \
18929@@ -356,8 +402,10 @@ do { \
18930 " jmp 2b\n" \
18931 ".previous\n" \
18932 _ASM_EXTABLE(1b, 3b) \
18933- : "=r" (err), ltype(x) \
18934- : "m" (__m(addr)), "i" (errret), "0" (err))
18935+ : "=r" (err), ltype (x) \
18936+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
18937+ pax_close_userland(); \
18938+} while (0)
18939
18940 #define __get_user_size_ex(x, ptr, size) \
18941 do { \
18942@@ -381,7 +429,7 @@ do { \
18943 } while (0)
18944
18945 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
18946- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
18947+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
18948 "2:\n" \
18949 _ASM_EXTABLE_EX(1b, 2b) \
18950 : ltype(x) : "m" (__m(addr)))
18951@@ -398,13 +446,24 @@ do { \
18952 int __gu_err; \
18953 unsigned long __gu_val; \
18954 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
18955- (x) = (__force __typeof__(*(ptr)))__gu_val; \
18956+ (x) = (__typeof__(*(ptr)))__gu_val; \
18957 __gu_err; \
18958 })
18959
18960 /* FIXME: this hack is definitely wrong -AK */
18961 struct __large_struct { unsigned long buf[100]; };
18962-#define __m(x) (*(struct __large_struct __user *)(x))
18963+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18964+#define ____m(x) \
18965+({ \
18966+ unsigned long ____x = (unsigned long)(x); \
18967+ if (____x < pax_user_shadow_base) \
18968+ ____x += pax_user_shadow_base; \
18969+ (typeof(x))____x; \
18970+})
18971+#else
18972+#define ____m(x) (x)
18973+#endif
18974+#define __m(x) (*(struct __large_struct __user *)____m(x))
18975
18976 /*
18977 * Tell gcc we read from memory instead of writing: this is because
18978@@ -412,8 +471,10 @@ struct __large_struct { unsigned long buf[100]; };
18979 * aliasing issues.
18980 */
18981 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18982+do { \
18983+ pax_open_userland(); \
18984 asm volatile(ASM_STAC "\n" \
18985- "1: mov"itype" %"rtype"1,%2\n" \
18986+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
18987 "2: " ASM_CLAC "\n" \
18988 ".section .fixup,\"ax\"\n" \
18989 "3: mov %3,%0\n" \
18990@@ -421,10 +482,12 @@ struct __large_struct { unsigned long buf[100]; };
18991 ".previous\n" \
18992 _ASM_EXTABLE(1b, 3b) \
18993 : "=r"(err) \
18994- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
18995+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
18996+ pax_close_userland(); \
18997+} while (0)
18998
18999 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19000- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19001+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19002 "2:\n" \
19003 _ASM_EXTABLE_EX(1b, 2b) \
19004 : : ltype(x), "m" (__m(addr)))
19005@@ -434,11 +497,13 @@ struct __large_struct { unsigned long buf[100]; };
19006 */
19007 #define uaccess_try do { \
19008 current_thread_info()->uaccess_err = 0; \
19009+ pax_open_userland(); \
19010 stac(); \
19011 barrier();
19012
19013 #define uaccess_catch(err) \
19014 clac(); \
19015+ pax_close_userland(); \
19016 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19017 } while (0)
19018
19019@@ -463,8 +528,12 @@ struct __large_struct { unsigned long buf[100]; };
19020 * On error, the variable @x is set to zero.
19021 */
19022
19023+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19024+#define __get_user(x, ptr) get_user((x), (ptr))
19025+#else
19026 #define __get_user(x, ptr) \
19027 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19028+#endif
19029
19030 /**
19031 * __put_user: - Write a simple value into user space, with less checking.
19032@@ -486,8 +555,12 @@ struct __large_struct { unsigned long buf[100]; };
19033 * Returns zero on success, or -EFAULT on error.
19034 */
19035
19036+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19037+#define __put_user(x, ptr) put_user((x), (ptr))
19038+#else
19039 #define __put_user(x, ptr) \
19040 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19041+#endif
19042
19043 #define __get_user_unaligned __get_user
19044 #define __put_user_unaligned __put_user
19045@@ -505,7 +578,7 @@ struct __large_struct { unsigned long buf[100]; };
19046 #define get_user_ex(x, ptr) do { \
19047 unsigned long __gue_val; \
19048 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19049- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19050+ (x) = (__typeof__(*(ptr)))__gue_val; \
19051 } while (0)
19052
19053 #define put_user_try uaccess_try
19054@@ -536,17 +609,6 @@ extern struct movsl_mask {
19055
19056 #define ARCH_HAS_NOCACHE_UACCESS 1
19057
19058-#ifdef CONFIG_X86_32
19059-# include <asm/uaccess_32.h>
19060-#else
19061-# include <asm/uaccess_64.h>
19062-#endif
19063-
19064-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19065- unsigned n);
19066-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19067- unsigned n);
19068-
19069 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19070 # define copy_user_diag __compiletime_error
19071 #else
19072@@ -556,7 +618,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19073 extern void copy_user_diag("copy_from_user() buffer size is too small")
19074 copy_from_user_overflow(void);
19075 extern void copy_user_diag("copy_to_user() buffer size is too small")
19076-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19077+copy_to_user_overflow(void);
19078
19079 #undef copy_user_diag
19080
19081@@ -569,7 +631,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19082
19083 extern void
19084 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19085-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19086+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19087 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19088
19089 #else
19090@@ -584,10 +646,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19091
19092 #endif
19093
19094+#ifdef CONFIG_X86_32
19095+# include <asm/uaccess_32.h>
19096+#else
19097+# include <asm/uaccess_64.h>
19098+#endif
19099+
19100 static inline unsigned long __must_check
19101 copy_from_user(void *to, const void __user *from, unsigned long n)
19102 {
19103- int sz = __compiletime_object_size(to);
19104+ size_t sz = __compiletime_object_size(to);
19105
19106 might_fault();
19107
19108@@ -609,12 +677,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19109 * case, and do only runtime checking for non-constant sizes.
19110 */
19111
19112- if (likely(sz < 0 || sz >= n))
19113- n = _copy_from_user(to, from, n);
19114- else if(__builtin_constant_p(n))
19115- copy_from_user_overflow();
19116- else
19117- __copy_from_user_overflow(sz, n);
19118+ if (likely(sz != (size_t)-1 && sz < n)) {
19119+ if(__builtin_constant_p(n))
19120+ copy_from_user_overflow();
19121+ else
19122+ __copy_from_user_overflow(sz, n);
19123+ } if (access_ok(VERIFY_READ, from, n))
19124+ n = __copy_from_user(to, from, n);
19125+ else if ((long)n > 0)
19126+ memset(to, 0, n);
19127
19128 return n;
19129 }
19130@@ -622,17 +693,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19131 static inline unsigned long __must_check
19132 copy_to_user(void __user *to, const void *from, unsigned long n)
19133 {
19134- int sz = __compiletime_object_size(from);
19135+ size_t sz = __compiletime_object_size(from);
19136
19137 might_fault();
19138
19139 /* See the comment in copy_from_user() above. */
19140- if (likely(sz < 0 || sz >= n))
19141- n = _copy_to_user(to, from, n);
19142- else if(__builtin_constant_p(n))
19143- copy_to_user_overflow();
19144- else
19145- __copy_to_user_overflow(sz, n);
19146+ if (likely(sz != (size_t)-1 && sz < n)) {
19147+ if(__builtin_constant_p(n))
19148+ copy_to_user_overflow();
19149+ else
19150+ __copy_to_user_overflow(sz, n);
19151+ } else if (access_ok(VERIFY_WRITE, to, n))
19152+ n = __copy_to_user(to, from, n);
19153
19154 return n;
19155 }
19156diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19157index 3c03a5d..1071638 100644
19158--- a/arch/x86/include/asm/uaccess_32.h
19159+++ b/arch/x86/include/asm/uaccess_32.h
19160@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19161 static __always_inline unsigned long __must_check
19162 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
19163 {
19164+ if ((long)n < 0)
19165+ return n;
19166+
19167+ check_object_size(from, n, true);
19168+
19169 if (__builtin_constant_p(n)) {
19170 unsigned long ret;
19171
19172@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
19173 __copy_to_user(void __user *to, const void *from, unsigned long n)
19174 {
19175 might_fault();
19176+
19177 return __copy_to_user_inatomic(to, from, n);
19178 }
19179
19180 static __always_inline unsigned long
19181 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
19182 {
19183+ if ((long)n < 0)
19184+ return n;
19185+
19186 /* Avoid zeroing the tail if the copy fails..
19187 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
19188 * but as the zeroing behaviour is only significant when n is not
19189@@ -137,6 +146,12 @@ static __always_inline unsigned long
19190 __copy_from_user(void *to, const void __user *from, unsigned long n)
19191 {
19192 might_fault();
19193+
19194+ if ((long)n < 0)
19195+ return n;
19196+
19197+ check_object_size(to, n, false);
19198+
19199 if (__builtin_constant_p(n)) {
19200 unsigned long ret;
19201
19202@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
19203 const void __user *from, unsigned long n)
19204 {
19205 might_fault();
19206+
19207+ if ((long)n < 0)
19208+ return n;
19209+
19210 if (__builtin_constant_p(n)) {
19211 unsigned long ret;
19212
19213@@ -181,7 +200,10 @@ static __always_inline unsigned long
19214 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
19215 unsigned long n)
19216 {
19217- return __copy_from_user_ll_nocache_nozero(to, from, n);
19218+ if ((long)n < 0)
19219+ return n;
19220+
19221+ return __copy_from_user_ll_nocache_nozero(to, from, n);
19222 }
19223
19224 #endif /* _ASM_X86_UACCESS_32_H */
19225diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
19226index 190413d..8a80c2a 100644
19227--- a/arch/x86/include/asm/uaccess_64.h
19228+++ b/arch/x86/include/asm/uaccess_64.h
19229@@ -10,6 +10,9 @@
19230 #include <asm/alternative.h>
19231 #include <asm/cpufeature.h>
19232 #include <asm/page.h>
19233+#include <asm/pgtable.h>
19234+
19235+#define set_fs(x) (current_thread_info()->addr_limit = (x))
19236
19237 /*
19238 * Copy To/From Userspace
19239@@ -17,14 +20,14 @@
19240
19241 /* Handles exceptions in both to and from, but doesn't do access_ok */
19242 __must_check unsigned long
19243-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
19244+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
19245 __must_check unsigned long
19246-copy_user_generic_string(void *to, const void *from, unsigned len);
19247+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
19248 __must_check unsigned long
19249-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
19250+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
19251
19252 static __always_inline __must_check unsigned long
19253-copy_user_generic(void *to, const void *from, unsigned len)
19254+copy_user_generic(void *to, const void *from, unsigned long len)
19255 {
19256 unsigned ret;
19257
19258@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
19259 }
19260
19261 __must_check unsigned long
19262-copy_in_user(void __user *to, const void __user *from, unsigned len);
19263+copy_in_user(void __user *to, const void __user *from, unsigned long len);
19264
19265 static __always_inline __must_check
19266-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
19267+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
19268 {
19269- int ret = 0;
19270+ size_t sz = __compiletime_object_size(dst);
19271+ unsigned ret = 0;
19272+
19273+ if (size > INT_MAX)
19274+ return size;
19275+
19276+ check_object_size(dst, size, false);
19277+
19278+#ifdef CONFIG_PAX_MEMORY_UDEREF
19279+ if (!__access_ok(VERIFY_READ, src, size))
19280+ return size;
19281+#endif
19282+
19283+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19284+ if(__builtin_constant_p(size))
19285+ copy_from_user_overflow();
19286+ else
19287+ __copy_from_user_overflow(sz, size);
19288+ return size;
19289+ }
19290
19291 if (!__builtin_constant_p(size))
19292- return copy_user_generic(dst, (__force void *)src, size);
19293+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19294 switch (size) {
19295- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
19296+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
19297 ret, "b", "b", "=q", 1);
19298 return ret;
19299- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
19300+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
19301 ret, "w", "w", "=r", 2);
19302 return ret;
19303- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
19304+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
19305 ret, "l", "k", "=r", 4);
19306 return ret;
19307- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
19308+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19309 ret, "q", "", "=r", 8);
19310 return ret;
19311 case 10:
19312- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19313+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19314 ret, "q", "", "=r", 10);
19315 if (unlikely(ret))
19316 return ret;
19317 __get_user_asm(*(u16 *)(8 + (char *)dst),
19318- (u16 __user *)(8 + (char __user *)src),
19319+ (const u16 __user *)(8 + (const char __user *)src),
19320 ret, "w", "w", "=r", 2);
19321 return ret;
19322 case 16:
19323- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19324+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19325 ret, "q", "", "=r", 16);
19326 if (unlikely(ret))
19327 return ret;
19328 __get_user_asm(*(u64 *)(8 + (char *)dst),
19329- (u64 __user *)(8 + (char __user *)src),
19330+ (const u64 __user *)(8 + (const char __user *)src),
19331 ret, "q", "", "=r", 8);
19332 return ret;
19333 default:
19334- return copy_user_generic(dst, (__force void *)src, size);
19335+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19336 }
19337 }
19338
19339 static __always_inline __must_check
19340-int __copy_from_user(void *dst, const void __user *src, unsigned size)
19341+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
19342 {
19343 might_fault();
19344 return __copy_from_user_nocheck(dst, src, size);
19345 }
19346
19347 static __always_inline __must_check
19348-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
19349+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
19350 {
19351- int ret = 0;
19352+ size_t sz = __compiletime_object_size(src);
19353+ unsigned ret = 0;
19354+
19355+ if (size > INT_MAX)
19356+ return size;
19357+
19358+ check_object_size(src, size, true);
19359+
19360+#ifdef CONFIG_PAX_MEMORY_UDEREF
19361+ if (!__access_ok(VERIFY_WRITE, dst, size))
19362+ return size;
19363+#endif
19364+
19365+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19366+ if(__builtin_constant_p(size))
19367+ copy_to_user_overflow();
19368+ else
19369+ __copy_to_user_overflow(sz, size);
19370+ return size;
19371+ }
19372
19373 if (!__builtin_constant_p(size))
19374- return copy_user_generic((__force void *)dst, src, size);
19375+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19376 switch (size) {
19377- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
19378+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
19379 ret, "b", "b", "iq", 1);
19380 return ret;
19381- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
19382+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
19383 ret, "w", "w", "ir", 2);
19384 return ret;
19385- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
19386+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
19387 ret, "l", "k", "ir", 4);
19388 return ret;
19389- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
19390+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19391 ret, "q", "", "er", 8);
19392 return ret;
19393 case 10:
19394- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19395+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19396 ret, "q", "", "er", 10);
19397 if (unlikely(ret))
19398 return ret;
19399 asm("":::"memory");
19400- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
19401+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
19402 ret, "w", "w", "ir", 2);
19403 return ret;
19404 case 16:
19405- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19406+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19407 ret, "q", "", "er", 16);
19408 if (unlikely(ret))
19409 return ret;
19410 asm("":::"memory");
19411- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
19412+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
19413 ret, "q", "", "er", 8);
19414 return ret;
19415 default:
19416- return copy_user_generic((__force void *)dst, src, size);
19417+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19418 }
19419 }
19420
19421 static __always_inline __must_check
19422-int __copy_to_user(void __user *dst, const void *src, unsigned size)
19423+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
19424 {
19425 might_fault();
19426 return __copy_to_user_nocheck(dst, src, size);
19427 }
19428
19429 static __always_inline __must_check
19430-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19431+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19432 {
19433- int ret = 0;
19434+ unsigned ret = 0;
19435
19436 might_fault();
19437+
19438+ if (size > INT_MAX)
19439+ return size;
19440+
19441+#ifdef CONFIG_PAX_MEMORY_UDEREF
19442+ if (!__access_ok(VERIFY_READ, src, size))
19443+ return size;
19444+ if (!__access_ok(VERIFY_WRITE, dst, size))
19445+ return size;
19446+#endif
19447+
19448 if (!__builtin_constant_p(size))
19449- return copy_user_generic((__force void *)dst,
19450- (__force void *)src, size);
19451+ return copy_user_generic((__force_kernel void *)____m(dst),
19452+ (__force_kernel const void *)____m(src), size);
19453 switch (size) {
19454 case 1: {
19455 u8 tmp;
19456- __get_user_asm(tmp, (u8 __user *)src,
19457+ __get_user_asm(tmp, (const u8 __user *)src,
19458 ret, "b", "b", "=q", 1);
19459 if (likely(!ret))
19460 __put_user_asm(tmp, (u8 __user *)dst,
19461@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19462 }
19463 case 2: {
19464 u16 tmp;
19465- __get_user_asm(tmp, (u16 __user *)src,
19466+ __get_user_asm(tmp, (const u16 __user *)src,
19467 ret, "w", "w", "=r", 2);
19468 if (likely(!ret))
19469 __put_user_asm(tmp, (u16 __user *)dst,
19470@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19471
19472 case 4: {
19473 u32 tmp;
19474- __get_user_asm(tmp, (u32 __user *)src,
19475+ __get_user_asm(tmp, (const u32 __user *)src,
19476 ret, "l", "k", "=r", 4);
19477 if (likely(!ret))
19478 __put_user_asm(tmp, (u32 __user *)dst,
19479@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19480 }
19481 case 8: {
19482 u64 tmp;
19483- __get_user_asm(tmp, (u64 __user *)src,
19484+ __get_user_asm(tmp, (const u64 __user *)src,
19485 ret, "q", "", "=r", 8);
19486 if (likely(!ret))
19487 __put_user_asm(tmp, (u64 __user *)dst,
19488@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19489 return ret;
19490 }
19491 default:
19492- return copy_user_generic((__force void *)dst,
19493- (__force void *)src, size);
19494+ return copy_user_generic((__force_kernel void *)____m(dst),
19495+ (__force_kernel const void *)____m(src), size);
19496 }
19497 }
19498
19499-static __must_check __always_inline int
19500-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
19501+static __must_check __always_inline unsigned long
19502+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
19503 {
19504- return __copy_from_user_nocheck(dst, (__force const void *)src, size);
19505+ return __copy_from_user_nocheck(dst, src, size);
19506 }
19507
19508-static __must_check __always_inline int
19509-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
19510+static __must_check __always_inline unsigned long
19511+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
19512 {
19513- return __copy_to_user_nocheck((__force void *)dst, src, size);
19514+ return __copy_to_user_nocheck(dst, src, size);
19515 }
19516
19517-extern long __copy_user_nocache(void *dst, const void __user *src,
19518- unsigned size, int zerorest);
19519+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
19520+ unsigned long size, int zerorest);
19521
19522-static inline int
19523-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
19524+static inline unsigned long
19525+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
19526 {
19527 might_fault();
19528+
19529+ if (size > INT_MAX)
19530+ return size;
19531+
19532+#ifdef CONFIG_PAX_MEMORY_UDEREF
19533+ if (!__access_ok(VERIFY_READ, src, size))
19534+ return size;
19535+#endif
19536+
19537 return __copy_user_nocache(dst, src, size, 1);
19538 }
19539
19540-static inline int
19541+static inline unsigned long
19542 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
19543- unsigned size)
19544+ unsigned long size)
19545 {
19546+ if (size > INT_MAX)
19547+ return size;
19548+
19549+#ifdef CONFIG_PAX_MEMORY_UDEREF
19550+ if (!__access_ok(VERIFY_READ, src, size))
19551+ return size;
19552+#endif
19553+
19554 return __copy_user_nocache(dst, src, size, 0);
19555 }
19556
19557 unsigned long
19558-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
19559+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
19560
19561 #endif /* _ASM_X86_UACCESS_64_H */
19562diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
19563index 5b238981..77fdd78 100644
19564--- a/arch/x86/include/asm/word-at-a-time.h
19565+++ b/arch/x86/include/asm/word-at-a-time.h
19566@@ -11,7 +11,7 @@
19567 * and shift, for example.
19568 */
19569 struct word_at_a_time {
19570- const unsigned long one_bits, high_bits;
19571+ unsigned long one_bits, high_bits;
19572 };
19573
19574 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
19575diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
19576index 0f1be11..f7542bf 100644
19577--- a/arch/x86/include/asm/x86_init.h
19578+++ b/arch/x86/include/asm/x86_init.h
19579@@ -129,7 +129,7 @@ struct x86_init_ops {
19580 struct x86_init_timers timers;
19581 struct x86_init_iommu iommu;
19582 struct x86_init_pci pci;
19583-};
19584+} __no_const;
19585
19586 /**
19587 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
19588@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
19589 void (*setup_percpu_clockev)(void);
19590 void (*early_percpu_clock_init)(void);
19591 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
19592-};
19593+} __no_const;
19594
19595 struct timespec;
19596
19597@@ -168,7 +168,7 @@ struct x86_platform_ops {
19598 void (*save_sched_clock_state)(void);
19599 void (*restore_sched_clock_state)(void);
19600 void (*apic_post_init)(void);
19601-};
19602+} __no_const;
19603
19604 struct pci_dev;
19605 struct msi_msg;
19606@@ -185,7 +185,7 @@ struct x86_msi_ops {
19607 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
19608 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
19609 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
19610-};
19611+} __no_const;
19612
19613 struct IO_APIC_route_entry;
19614 struct io_apic_irq_attr;
19615@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
19616 unsigned int destination, int vector,
19617 struct io_apic_irq_attr *attr);
19618 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
19619-};
19620+} __no_const;
19621
19622 extern struct x86_init_ops x86_init;
19623 extern struct x86_cpuinit_ops x86_cpuinit;
19624diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
19625index b913915..4f5a581 100644
19626--- a/arch/x86/include/asm/xen/page.h
19627+++ b/arch/x86/include/asm/xen/page.h
19628@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
19629 extern struct page *m2p_find_override(unsigned long mfn);
19630 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
19631
19632-static inline unsigned long pfn_to_mfn(unsigned long pfn)
19633+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
19634 {
19635 unsigned long mfn;
19636
19637diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
19638index 0415cda..3b22adc 100644
19639--- a/arch/x86/include/asm/xsave.h
19640+++ b/arch/x86/include/asm/xsave.h
19641@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19642 if (unlikely(err))
19643 return -EFAULT;
19644
19645+ pax_open_userland();
19646 __asm__ __volatile__(ASM_STAC "\n"
19647- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
19648+ "1:"
19649+ __copyuser_seg
19650+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
19651 "2: " ASM_CLAC "\n"
19652 ".section .fixup,\"ax\"\n"
19653 "3: movl $-1,%[err]\n"
19654@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19655 : [err] "=r" (err)
19656 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
19657 : "memory");
19658+ pax_close_userland();
19659 return err;
19660 }
19661
19662 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19663 {
19664 int err;
19665- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
19666+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
19667 u32 lmask = mask;
19668 u32 hmask = mask >> 32;
19669
19670+ pax_open_userland();
19671 __asm__ __volatile__(ASM_STAC "\n"
19672- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19673+ "1:"
19674+ __copyuser_seg
19675+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19676 "2: " ASM_CLAC "\n"
19677 ".section .fixup,\"ax\"\n"
19678 "3: movl $-1,%[err]\n"
19679@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19680 : [err] "=r" (err)
19681 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
19682 : "memory"); /* memory required? */
19683+ pax_close_userland();
19684 return err;
19685 }
19686
19687diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
19688index bbae024..e1528f9 100644
19689--- a/arch/x86/include/uapi/asm/e820.h
19690+++ b/arch/x86/include/uapi/asm/e820.h
19691@@ -63,7 +63,7 @@ struct e820map {
19692 #define ISA_START_ADDRESS 0xa0000
19693 #define ISA_END_ADDRESS 0x100000
19694
19695-#define BIOS_BEGIN 0x000a0000
19696+#define BIOS_BEGIN 0x000c0000
19697 #define BIOS_END 0x00100000
19698
19699 #define BIOS_ROM_BASE 0xffe00000
19700diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
19701index 7b0a55a..ad115bf 100644
19702--- a/arch/x86/include/uapi/asm/ptrace-abi.h
19703+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
19704@@ -49,7 +49,6 @@
19705 #define EFLAGS 144
19706 #define RSP 152
19707 #define SS 160
19708-#define ARGOFFSET R11
19709 #endif /* __ASSEMBLY__ */
19710
19711 /* top of stack page */
19712diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
19713index 9b0a34e..fc7e553 100644
19714--- a/arch/x86/kernel/Makefile
19715+++ b/arch/x86/kernel/Makefile
19716@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
19717 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
19718 obj-$(CONFIG_IRQ_WORK) += irq_work.o
19719 obj-y += probe_roms.o
19720-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
19721+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
19722 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
19723 obj-y += syscall_$(BITS).o
19724 obj-$(CONFIG_X86_64) += vsyscall_64.o
19725diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
19726index 6c0b43b..e67bb31 100644
19727--- a/arch/x86/kernel/acpi/boot.c
19728+++ b/arch/x86/kernel/acpi/boot.c
19729@@ -1315,7 +1315,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
19730 * If your system is blacklisted here, but you find that acpi=force
19731 * works for you, please contact linux-acpi@vger.kernel.org
19732 */
19733-static struct dmi_system_id __initdata acpi_dmi_table[] = {
19734+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
19735 /*
19736 * Boxes that need ACPI disabled
19737 */
19738@@ -1390,7 +1390,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
19739 };
19740
19741 /* second table for DMI checks that should run after early-quirks */
19742-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
19743+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
19744 /*
19745 * HP laptops which use a DSDT reporting as HP/SB400/10000,
19746 * which includes some code which overrides all temperature
19747diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
19748index 3a2ae4c..9db31d6 100644
19749--- a/arch/x86/kernel/acpi/sleep.c
19750+++ b/arch/x86/kernel/acpi/sleep.c
19751@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
19752 #else /* CONFIG_64BIT */
19753 #ifdef CONFIG_SMP
19754 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
19755+
19756+ pax_open_kernel();
19757 early_gdt_descr.address =
19758 (unsigned long)get_cpu_gdt_table(smp_processor_id());
19759+ pax_close_kernel();
19760+
19761 initial_gs = per_cpu_offset(smp_processor_id());
19762 #endif
19763 initial_code = (unsigned long)wakeup_long64;
19764diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
19765index 665c6b7..eae4d56 100644
19766--- a/arch/x86/kernel/acpi/wakeup_32.S
19767+++ b/arch/x86/kernel/acpi/wakeup_32.S
19768@@ -29,13 +29,11 @@ wakeup_pmode_return:
19769 # and restore the stack ... but you need gdt for this to work
19770 movl saved_context_esp, %esp
19771
19772- movl %cs:saved_magic, %eax
19773- cmpl $0x12345678, %eax
19774+ cmpl $0x12345678, saved_magic
19775 jne bogus_magic
19776
19777 # jump to place where we left off
19778- movl saved_eip, %eax
19779- jmp *%eax
19780+ jmp *(saved_eip)
19781
19782 bogus_magic:
19783 jmp bogus_magic
19784diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
19785index df94598..f3b29bf 100644
19786--- a/arch/x86/kernel/alternative.c
19787+++ b/arch/x86/kernel/alternative.c
19788@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
19789 */
19790 for (a = start; a < end; a++) {
19791 instr = (u8 *)&a->instr_offset + a->instr_offset;
19792+
19793+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19794+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19795+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
19796+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19797+#endif
19798+
19799 replacement = (u8 *)&a->repl_offset + a->repl_offset;
19800 BUG_ON(a->replacementlen > a->instrlen);
19801 BUG_ON(a->instrlen > sizeof(insnbuf));
19802@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
19803 for (poff = start; poff < end; poff++) {
19804 u8 *ptr = (u8 *)poff + *poff;
19805
19806+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19807+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19808+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19809+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19810+#endif
19811+
19812 if (!*poff || ptr < text || ptr >= text_end)
19813 continue;
19814 /* turn DS segment override prefix into lock prefix */
19815- if (*ptr == 0x3e)
19816+ if (*ktla_ktva(ptr) == 0x3e)
19817 text_poke(ptr, ((unsigned char []){0xf0}), 1);
19818 }
19819 mutex_unlock(&text_mutex);
19820@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
19821 for (poff = start; poff < end; poff++) {
19822 u8 *ptr = (u8 *)poff + *poff;
19823
19824+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19825+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19826+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19827+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19828+#endif
19829+
19830 if (!*poff || ptr < text || ptr >= text_end)
19831 continue;
19832 /* turn lock prefix into DS segment override prefix */
19833- if (*ptr == 0xf0)
19834+ if (*ktla_ktva(ptr) == 0xf0)
19835 text_poke(ptr, ((unsigned char []){0x3E}), 1);
19836 }
19837 mutex_unlock(&text_mutex);
19838@@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
19839
19840 BUG_ON(p->len > MAX_PATCH_LEN);
19841 /* prep the buffer with the original instructions */
19842- memcpy(insnbuf, p->instr, p->len);
19843+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
19844 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
19845 (unsigned long)p->instr, p->len);
19846
19847@@ -505,7 +524,7 @@ void __init alternative_instructions(void)
19848 if (!uniproc_patched || num_possible_cpus() == 1)
19849 free_init_pages("SMP alternatives",
19850 (unsigned long)__smp_locks,
19851- (unsigned long)__smp_locks_end);
19852+ PAGE_ALIGN((unsigned long)__smp_locks_end));
19853 #endif
19854
19855 apply_paravirt(__parainstructions, __parainstructions_end);
19856@@ -525,13 +544,17 @@ void __init alternative_instructions(void)
19857 * instructions. And on the local CPU you need to be protected again NMI or MCE
19858 * handlers seeing an inconsistent instruction while you patch.
19859 */
19860-void *__init_or_module text_poke_early(void *addr, const void *opcode,
19861+void *__kprobes text_poke_early(void *addr, const void *opcode,
19862 size_t len)
19863 {
19864 unsigned long flags;
19865 local_irq_save(flags);
19866- memcpy(addr, opcode, len);
19867+
19868+ pax_open_kernel();
19869+ memcpy(ktla_ktva(addr), opcode, len);
19870 sync_core();
19871+ pax_close_kernel();
19872+
19873 local_irq_restore(flags);
19874 /* Could also do a CLFLUSH here to speed up CPU recovery; but
19875 that causes hangs on some VIA CPUs. */
19876@@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
19877 */
19878 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
19879 {
19880- unsigned long flags;
19881- char *vaddr;
19882+ unsigned char *vaddr = ktla_ktva(addr);
19883 struct page *pages[2];
19884- int i;
19885+ size_t i;
19886
19887 if (!core_kernel_text((unsigned long)addr)) {
19888- pages[0] = vmalloc_to_page(addr);
19889- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
19890+ pages[0] = vmalloc_to_page(vaddr);
19891+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
19892 } else {
19893- pages[0] = virt_to_page(addr);
19894+ pages[0] = virt_to_page(vaddr);
19895 WARN_ON(!PageReserved(pages[0]));
19896- pages[1] = virt_to_page(addr + PAGE_SIZE);
19897+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
19898 }
19899 BUG_ON(!pages[0]);
19900- local_irq_save(flags);
19901- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
19902- if (pages[1])
19903- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
19904- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
19905- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
19906- clear_fixmap(FIX_TEXT_POKE0);
19907- if (pages[1])
19908- clear_fixmap(FIX_TEXT_POKE1);
19909- local_flush_tlb();
19910- sync_core();
19911- /* Could also do a CLFLUSH here to speed up CPU recovery; but
19912- that causes hangs on some VIA CPUs. */
19913+ text_poke_early(addr, opcode, len);
19914 for (i = 0; i < len; i++)
19915- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
19916- local_irq_restore(flags);
19917+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
19918 return addr;
19919 }
19920
19921@@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *regs)
19922 if (likely(!bp_patching_in_progress))
19923 return 0;
19924
19925- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
19926+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
19927 return 0;
19928
19929 /* set up the specified breakpoint handler */
19930@@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *regs)
19931 */
19932 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
19933 {
19934- unsigned char int3 = 0xcc;
19935+ const unsigned char int3 = 0xcc;
19936
19937 bp_int3_handler = handler;
19938 bp_int3_addr = (u8 *)addr + sizeof(int3);
19939diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
19940index d278736..0b4af9a8 100644
19941--- a/arch/x86/kernel/apic/apic.c
19942+++ b/arch/x86/kernel/apic/apic.c
19943@@ -191,7 +191,7 @@ int first_system_vector = 0xfe;
19944 /*
19945 * Debug level, exported for io_apic.c
19946 */
19947-unsigned int apic_verbosity;
19948+int apic_verbosity;
19949
19950 int pic_mode;
19951
19952@@ -1986,7 +1986,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
19953 apic_write(APIC_ESR, 0);
19954 v1 = apic_read(APIC_ESR);
19955 ack_APIC_irq();
19956- atomic_inc(&irq_err_count);
19957+ atomic_inc_unchecked(&irq_err_count);
19958
19959 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
19960 smp_processor_id(), v0 , v1);
19961diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
19962index 00c77cf..2dc6a2d 100644
19963--- a/arch/x86/kernel/apic/apic_flat_64.c
19964+++ b/arch/x86/kernel/apic/apic_flat_64.c
19965@@ -157,7 +157,7 @@ static int flat_probe(void)
19966 return 1;
19967 }
19968
19969-static struct apic apic_flat = {
19970+static struct apic apic_flat __read_only = {
19971 .name = "flat",
19972 .probe = flat_probe,
19973 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19974@@ -271,7 +271,7 @@ static int physflat_probe(void)
19975 return 0;
19976 }
19977
19978-static struct apic apic_physflat = {
19979+static struct apic apic_physflat __read_only = {
19980
19981 .name = "physical flat",
19982 .probe = physflat_probe,
19983diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19984index e145f28..2752888 100644
19985--- a/arch/x86/kernel/apic/apic_noop.c
19986+++ b/arch/x86/kernel/apic/apic_noop.c
19987@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19988 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19989 }
19990
19991-struct apic apic_noop = {
19992+struct apic apic_noop __read_only = {
19993 .name = "noop",
19994 .probe = noop_probe,
19995 .acpi_madt_oem_check = NULL,
19996diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19997index d50e364..543bee3 100644
19998--- a/arch/x86/kernel/apic/bigsmp_32.c
19999+++ b/arch/x86/kernel/apic/bigsmp_32.c
20000@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
20001 return dmi_bigsmp;
20002 }
20003
20004-static struct apic apic_bigsmp = {
20005+static struct apic apic_bigsmp __read_only = {
20006
20007 .name = "bigsmp",
20008 .probe = probe_bigsmp,
20009diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
20010index c552247..587a316 100644
20011--- a/arch/x86/kernel/apic/es7000_32.c
20012+++ b/arch/x86/kernel/apic/es7000_32.c
20013@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
20014 return ret && es7000_apic_is_cluster();
20015 }
20016
20017-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
20018-static struct apic __refdata apic_es7000_cluster = {
20019+static struct apic apic_es7000_cluster __read_only = {
20020
20021 .name = "es7000",
20022 .probe = probe_es7000,
20023@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
20024 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
20025 };
20026
20027-static struct apic __refdata apic_es7000 = {
20028+static struct apic apic_es7000 __read_only = {
20029
20030 .name = "es7000",
20031 .probe = probe_es7000,
20032diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20033index e63a5bd..c0babf8 100644
20034--- a/arch/x86/kernel/apic/io_apic.c
20035+++ b/arch/x86/kernel/apic/io_apic.c
20036@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
20037 }
20038 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
20039
20040-void lock_vector_lock(void)
20041+void lock_vector_lock(void) __acquires(vector_lock)
20042 {
20043 /* Used to the online set of cpus does not change
20044 * during assign_irq_vector.
20045@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
20046 raw_spin_lock(&vector_lock);
20047 }
20048
20049-void unlock_vector_lock(void)
20050+void unlock_vector_lock(void) __releases(vector_lock)
20051 {
20052 raw_spin_unlock(&vector_lock);
20053 }
20054@@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_data *data)
20055 ack_APIC_irq();
20056 }
20057
20058-atomic_t irq_mis_count;
20059+atomic_unchecked_t irq_mis_count;
20060
20061 #ifdef CONFIG_GENERIC_PENDING_IRQ
20062 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20063@@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_data *data)
20064 * at the cpu.
20065 */
20066 if (!(v & (1 << (i & 0x1f)))) {
20067- atomic_inc(&irq_mis_count);
20068+ atomic_inc_unchecked(&irq_mis_count);
20069
20070 eoi_ioapic_irq(irq, cfg);
20071 }
20072diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
20073index 1e42e8f..daacf44 100644
20074--- a/arch/x86/kernel/apic/numaq_32.c
20075+++ b/arch/x86/kernel/apic/numaq_32.c
20076@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
20077 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
20078 }
20079
20080-/* Use __refdata to keep false positive warning calm. */
20081-static struct apic __refdata apic_numaq = {
20082+static struct apic apic_numaq __read_only = {
20083
20084 .name = "NUMAQ",
20085 .probe = probe_numaq,
20086diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20087index eb35ef9..f184a21 100644
20088--- a/arch/x86/kernel/apic/probe_32.c
20089+++ b/arch/x86/kernel/apic/probe_32.c
20090@@ -72,7 +72,7 @@ static int probe_default(void)
20091 return 1;
20092 }
20093
20094-static struct apic apic_default = {
20095+static struct apic apic_default __read_only = {
20096
20097 .name = "default",
20098 .probe = probe_default,
20099diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
20100index 77c95c0..434f8a4 100644
20101--- a/arch/x86/kernel/apic/summit_32.c
20102+++ b/arch/x86/kernel/apic/summit_32.c
20103@@ -486,7 +486,7 @@ void setup_summit(void)
20104 }
20105 #endif
20106
20107-static struct apic apic_summit = {
20108+static struct apic apic_summit __read_only = {
20109
20110 .name = "summit",
20111 .probe = probe_summit,
20112diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20113index 140e29d..d88bc95 100644
20114--- a/arch/x86/kernel/apic/x2apic_cluster.c
20115+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20116@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20117 return notifier_from_errno(err);
20118 }
20119
20120-static struct notifier_block __refdata x2apic_cpu_notifier = {
20121+static struct notifier_block x2apic_cpu_notifier = {
20122 .notifier_call = update_clusterinfo,
20123 };
20124
20125@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20126 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20127 }
20128
20129-static struct apic apic_x2apic_cluster = {
20130+static struct apic apic_x2apic_cluster __read_only = {
20131
20132 .name = "cluster x2apic",
20133 .probe = x2apic_cluster_probe,
20134diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20135index 562a76d..a003c0f 100644
20136--- a/arch/x86/kernel/apic/x2apic_phys.c
20137+++ b/arch/x86/kernel/apic/x2apic_phys.c
20138@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
20139 return apic == &apic_x2apic_phys;
20140 }
20141
20142-static struct apic apic_x2apic_phys = {
20143+static struct apic apic_x2apic_phys __read_only = {
20144
20145 .name = "physical x2apic",
20146 .probe = x2apic_phys_probe,
20147diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20148index ad0dc04..0d9cc56 100644
20149--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20150+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20151@@ -350,7 +350,7 @@ static int uv_probe(void)
20152 return apic == &apic_x2apic_uv_x;
20153 }
20154
20155-static struct apic __refdata apic_x2apic_uv_x = {
20156+static struct apic apic_x2apic_uv_x __read_only = {
20157
20158 .name = "UV large system",
20159 .probe = uv_probe,
20160diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20161index 3ab0343..814c4787 100644
20162--- a/arch/x86/kernel/apm_32.c
20163+++ b/arch/x86/kernel/apm_32.c
20164@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
20165 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20166 * even though they are called in protected mode.
20167 */
20168-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20169+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20170 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20171
20172 static const char driver_version[] = "1.16ac"; /* no spaces */
20173@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
20174 BUG_ON(cpu != 0);
20175 gdt = get_cpu_gdt_table(cpu);
20176 save_desc_40 = gdt[0x40 / 8];
20177+
20178+ pax_open_kernel();
20179 gdt[0x40 / 8] = bad_bios_desc;
20180+ pax_close_kernel();
20181
20182 apm_irq_save(flags);
20183 APM_DO_SAVE_SEGS;
20184@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
20185 &call->esi);
20186 APM_DO_RESTORE_SEGS;
20187 apm_irq_restore(flags);
20188+
20189+ pax_open_kernel();
20190 gdt[0x40 / 8] = save_desc_40;
20191+ pax_close_kernel();
20192+
20193 put_cpu();
20194
20195 return call->eax & 0xff;
20196@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
20197 BUG_ON(cpu != 0);
20198 gdt = get_cpu_gdt_table(cpu);
20199 save_desc_40 = gdt[0x40 / 8];
20200+
20201+ pax_open_kernel();
20202 gdt[0x40 / 8] = bad_bios_desc;
20203+ pax_close_kernel();
20204
20205 apm_irq_save(flags);
20206 APM_DO_SAVE_SEGS;
20207@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
20208 &call->eax);
20209 APM_DO_RESTORE_SEGS;
20210 apm_irq_restore(flags);
20211+
20212+ pax_open_kernel();
20213 gdt[0x40 / 8] = save_desc_40;
20214+ pax_close_kernel();
20215+
20216 put_cpu();
20217 return error;
20218 }
20219@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
20220 * code to that CPU.
20221 */
20222 gdt = get_cpu_gdt_table(0);
20223+
20224+ pax_open_kernel();
20225 set_desc_base(&gdt[APM_CS >> 3],
20226 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20227 set_desc_base(&gdt[APM_CS_16 >> 3],
20228 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20229 set_desc_base(&gdt[APM_DS >> 3],
20230 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20231+ pax_close_kernel();
20232
20233 proc_create("apm", 0, NULL, &apm_file_ops);
20234
20235diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20236index 9f6b934..cf5ffb3 100644
20237--- a/arch/x86/kernel/asm-offsets.c
20238+++ b/arch/x86/kernel/asm-offsets.c
20239@@ -32,6 +32,8 @@ void common(void) {
20240 OFFSET(TI_flags, thread_info, flags);
20241 OFFSET(TI_status, thread_info, status);
20242 OFFSET(TI_addr_limit, thread_info, addr_limit);
20243+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20244+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20245
20246 BLANK();
20247 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20248@@ -52,8 +54,26 @@ void common(void) {
20249 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20250 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20251 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20252+
20253+#ifdef CONFIG_PAX_KERNEXEC
20254+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
20255 #endif
20256
20257+#ifdef CONFIG_PAX_MEMORY_UDEREF
20258+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
20259+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
20260+#ifdef CONFIG_X86_64
20261+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
20262+#endif
20263+#endif
20264+
20265+#endif
20266+
20267+ BLANK();
20268+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
20269+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
20270+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
20271+
20272 #ifdef CONFIG_XEN
20273 BLANK();
20274 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
20275diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
20276index e7c798b..2b2019b 100644
20277--- a/arch/x86/kernel/asm-offsets_64.c
20278+++ b/arch/x86/kernel/asm-offsets_64.c
20279@@ -77,6 +77,7 @@ int main(void)
20280 BLANK();
20281 #undef ENTRY
20282
20283+ DEFINE(TSS_size, sizeof(struct tss_struct));
20284 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
20285 BLANK();
20286
20287diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
20288index 47b56a7..efc2bc6 100644
20289--- a/arch/x86/kernel/cpu/Makefile
20290+++ b/arch/x86/kernel/cpu/Makefile
20291@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
20292 CFLAGS_REMOVE_perf_event.o = -pg
20293 endif
20294
20295-# Make sure load_percpu_segment has no stackprotector
20296-nostackp := $(call cc-option, -fno-stack-protector)
20297-CFLAGS_common.o := $(nostackp)
20298-
20299 obj-y := intel_cacheinfo.o scattered.o topology.o
20300 obj-y += proc.o capflags.o powerflags.o common.o
20301 obj-y += rdrand.o
20302diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
20303index 59bfebc..d8f27bd 100644
20304--- a/arch/x86/kernel/cpu/amd.c
20305+++ b/arch/x86/kernel/cpu/amd.c
20306@@ -753,7 +753,7 @@ static void init_amd(struct cpuinfo_x86 *c)
20307 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
20308 {
20309 /* AMD errata T13 (order #21922) */
20310- if ((c->x86 == 6)) {
20311+ if (c->x86 == 6) {
20312 /* Duron Rev A0 */
20313 if (c->x86_model == 3 && c->x86_mask == 0)
20314 size = 64;
20315diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
20316index 6abc172..3b0df94 100644
20317--- a/arch/x86/kernel/cpu/common.c
20318+++ b/arch/x86/kernel/cpu/common.c
20319@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
20320
20321 static const struct cpu_dev *this_cpu = &default_cpu;
20322
20323-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
20324-#ifdef CONFIG_X86_64
20325- /*
20326- * We need valid kernel segments for data and code in long mode too
20327- * IRET will check the segment types kkeil 2000/10/28
20328- * Also sysret mandates a special GDT layout
20329- *
20330- * TLS descriptors are currently at a different place compared to i386.
20331- * Hopefully nobody expects them at a fixed place (Wine?)
20332- */
20333- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
20334- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
20335- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
20336- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
20337- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
20338- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
20339-#else
20340- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
20341- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20342- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
20343- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
20344- /*
20345- * Segments used for calling PnP BIOS have byte granularity.
20346- * They code segments and data segments have fixed 64k limits,
20347- * the transfer segment sizes are set at run time.
20348- */
20349- /* 32-bit code */
20350- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20351- /* 16-bit code */
20352- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20353- /* 16-bit data */
20354- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
20355- /* 16-bit data */
20356- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
20357- /* 16-bit data */
20358- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
20359- /*
20360- * The APM segments have byte granularity and their bases
20361- * are set at run time. All have 64k limits.
20362- */
20363- /* 32-bit code */
20364- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20365- /* 16-bit code */
20366- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20367- /* data */
20368- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
20369-
20370- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20371- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20372- GDT_STACK_CANARY_INIT
20373-#endif
20374-} };
20375-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
20376-
20377 static int __init x86_xsave_setup(char *s)
20378 {
20379 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
20380@@ -288,6 +234,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
20381 set_in_cr4(X86_CR4_SMAP);
20382 }
20383
20384+#ifdef CONFIG_X86_64
20385+static __init int setup_disable_pcid(char *arg)
20386+{
20387+ setup_clear_cpu_cap(X86_FEATURE_PCID);
20388+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
20389+
20390+#ifdef CONFIG_PAX_MEMORY_UDEREF
20391+ if (clone_pgd_mask != ~(pgdval_t)0UL)
20392+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20393+#endif
20394+
20395+ return 1;
20396+}
20397+__setup("nopcid", setup_disable_pcid);
20398+
20399+static void setup_pcid(struct cpuinfo_x86 *c)
20400+{
20401+ if (!cpu_has(c, X86_FEATURE_PCID)) {
20402+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
20403+
20404+#ifdef CONFIG_PAX_MEMORY_UDEREF
20405+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
20406+ pax_open_kernel();
20407+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20408+ pax_close_kernel();
20409+ printk("PAX: slow and weak UDEREF enabled\n");
20410+ } else
20411+ printk("PAX: UDEREF disabled\n");
20412+#endif
20413+
20414+ return;
20415+ }
20416+
20417+ printk("PAX: PCID detected\n");
20418+ set_in_cr4(X86_CR4_PCIDE);
20419+
20420+#ifdef CONFIG_PAX_MEMORY_UDEREF
20421+ pax_open_kernel();
20422+ clone_pgd_mask = ~(pgdval_t)0UL;
20423+ pax_close_kernel();
20424+ if (pax_user_shadow_base)
20425+ printk("PAX: weak UDEREF enabled\n");
20426+ else {
20427+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
20428+ printk("PAX: strong UDEREF enabled\n");
20429+ }
20430+#endif
20431+
20432+ if (cpu_has(c, X86_FEATURE_INVPCID))
20433+ printk("PAX: INVPCID detected\n");
20434+}
20435+#endif
20436+
20437 /*
20438 * Some CPU features depend on higher CPUID levels, which may not always
20439 * be available due to CPUID level capping or broken virtualization
20440@@ -388,7 +387,7 @@ void switch_to_new_gdt(int cpu)
20441 {
20442 struct desc_ptr gdt_descr;
20443
20444- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
20445+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20446 gdt_descr.size = GDT_SIZE - 1;
20447 load_gdt(&gdt_descr);
20448 /* Reload the per-cpu base */
20449@@ -877,6 +876,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20450 setup_smep(c);
20451 setup_smap(c);
20452
20453+#ifdef CONFIG_X86_64
20454+ setup_pcid(c);
20455+#endif
20456+
20457 /*
20458 * The vendor-specific functions might have changed features.
20459 * Now we do "generic changes."
20460@@ -885,6 +888,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20461 /* Filter out anything that depends on CPUID levels we don't have */
20462 filter_cpuid_features(c, true);
20463
20464+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20465+ setup_clear_cpu_cap(X86_FEATURE_SEP);
20466+#endif
20467+
20468 /* If the model name is still unset, do table lookup. */
20469 if (!c->x86_model_id[0]) {
20470 const char *p;
20471@@ -1072,10 +1079,12 @@ static __init int setup_disablecpuid(char *arg)
20472 }
20473 __setup("clearcpuid=", setup_disablecpuid);
20474
20475+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
20476+EXPORT_PER_CPU_SYMBOL(current_tinfo);
20477+
20478 #ifdef CONFIG_X86_64
20479-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20480-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
20481- (unsigned long) debug_idt_table };
20482+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20483+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
20484
20485 DEFINE_PER_CPU_FIRST(union irq_stack_union,
20486 irq_stack_union) __aligned(PAGE_SIZE) __visible;
20487@@ -1089,7 +1098,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
20488 EXPORT_PER_CPU_SYMBOL(current_task);
20489
20490 DEFINE_PER_CPU(unsigned long, kernel_stack) =
20491- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
20492+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
20493 EXPORT_PER_CPU_SYMBOL(kernel_stack);
20494
20495 DEFINE_PER_CPU(char *, irq_stack_ptr) =
20496@@ -1239,7 +1248,7 @@ void cpu_init(void)
20497 load_ucode_ap();
20498
20499 cpu = stack_smp_processor_id();
20500- t = &per_cpu(init_tss, cpu);
20501+ t = init_tss + cpu;
20502 oist = &per_cpu(orig_ist, cpu);
20503
20504 #ifdef CONFIG_NUMA
20505@@ -1274,7 +1283,6 @@ void cpu_init(void)
20506 wrmsrl(MSR_KERNEL_GS_BASE, 0);
20507 barrier();
20508
20509- x86_configure_nx();
20510 enable_x2apic();
20511
20512 /*
20513@@ -1326,7 +1334,7 @@ void cpu_init(void)
20514 {
20515 int cpu = smp_processor_id();
20516 struct task_struct *curr = current;
20517- struct tss_struct *t = &per_cpu(init_tss, cpu);
20518+ struct tss_struct *t = init_tss + cpu;
20519 struct thread_struct *thread = &curr->thread;
20520
20521 show_ucode_info_early();
20522diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
20523index 0641113..06f5ba4 100644
20524--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
20525+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
20526@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
20527 };
20528
20529 #ifdef CONFIG_AMD_NB
20530+static struct attribute *default_attrs_amd_nb[] = {
20531+ &type.attr,
20532+ &level.attr,
20533+ &coherency_line_size.attr,
20534+ &physical_line_partition.attr,
20535+ &ways_of_associativity.attr,
20536+ &number_of_sets.attr,
20537+ &size.attr,
20538+ &shared_cpu_map.attr,
20539+ &shared_cpu_list.attr,
20540+ NULL,
20541+ NULL,
20542+ NULL,
20543+ NULL
20544+};
20545+
20546 static struct attribute **amd_l3_attrs(void)
20547 {
20548 static struct attribute **attrs;
20549@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
20550
20551 n = ARRAY_SIZE(default_attrs);
20552
20553- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
20554- n += 2;
20555-
20556- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
20557- n += 1;
20558-
20559- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
20560- if (attrs == NULL)
20561- return attrs = default_attrs;
20562-
20563- for (n = 0; default_attrs[n]; n++)
20564- attrs[n] = default_attrs[n];
20565+ attrs = default_attrs_amd_nb;
20566
20567 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
20568 attrs[n++] = &cache_disable_0.attr;
20569@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
20570 .default_attrs = default_attrs,
20571 };
20572
20573+#ifdef CONFIG_AMD_NB
20574+static struct kobj_type ktype_cache_amd_nb = {
20575+ .sysfs_ops = &sysfs_ops,
20576+ .default_attrs = default_attrs_amd_nb,
20577+};
20578+#endif
20579+
20580 static struct kobj_type ktype_percpu_entry = {
20581 .sysfs_ops = &sysfs_ops,
20582 };
20583@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
20584 return retval;
20585 }
20586
20587+#ifdef CONFIG_AMD_NB
20588+ amd_l3_attrs();
20589+#endif
20590+
20591 for (i = 0; i < num_cache_leaves; i++) {
20592+ struct kobj_type *ktype;
20593+
20594 this_object = INDEX_KOBJECT_PTR(cpu, i);
20595 this_object->cpu = cpu;
20596 this_object->index = i;
20597
20598 this_leaf = CPUID4_INFO_IDX(cpu, i);
20599
20600- ktype_cache.default_attrs = default_attrs;
20601+ ktype = &ktype_cache;
20602 #ifdef CONFIG_AMD_NB
20603 if (this_leaf->base.nb)
20604- ktype_cache.default_attrs = amd_l3_attrs();
20605+ ktype = &ktype_cache_amd_nb;
20606 #endif
20607 retval = kobject_init_and_add(&(this_object->kobj),
20608- &ktype_cache,
20609+ ktype,
20610 per_cpu(ici_cache_kobject, cpu),
20611 "index%1lu", i);
20612 if (unlikely(retval)) {
20613diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
20614index b3218cd..99a75de 100644
20615--- a/arch/x86/kernel/cpu/mcheck/mce.c
20616+++ b/arch/x86/kernel/cpu/mcheck/mce.c
20617@@ -45,6 +45,7 @@
20618 #include <asm/processor.h>
20619 #include <asm/mce.h>
20620 #include <asm/msr.h>
20621+#include <asm/local.h>
20622
20623 #include "mce-internal.h"
20624
20625@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
20626 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
20627 m->cs, m->ip);
20628
20629- if (m->cs == __KERNEL_CS)
20630+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
20631 print_symbol("{%s}", m->ip);
20632 pr_cont("\n");
20633 }
20634@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
20635
20636 #define PANIC_TIMEOUT 5 /* 5 seconds */
20637
20638-static atomic_t mce_paniced;
20639+static atomic_unchecked_t mce_paniced;
20640
20641 static int fake_panic;
20642-static atomic_t mce_fake_paniced;
20643+static atomic_unchecked_t mce_fake_paniced;
20644
20645 /* Panic in progress. Enable interrupts and wait for final IPI */
20646 static void wait_for_panic(void)
20647@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20648 /*
20649 * Make sure only one CPU runs in machine check panic
20650 */
20651- if (atomic_inc_return(&mce_paniced) > 1)
20652+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
20653 wait_for_panic();
20654 barrier();
20655
20656@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20657 console_verbose();
20658 } else {
20659 /* Don't log too much for fake panic */
20660- if (atomic_inc_return(&mce_fake_paniced) > 1)
20661+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
20662 return;
20663 }
20664 /* First print corrected ones that are still unlogged */
20665@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20666 if (!fake_panic) {
20667 if (panic_timeout == 0)
20668 panic_timeout = mca_cfg.panic_timeout;
20669- panic(msg);
20670+ panic("%s", msg);
20671 } else
20672 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
20673 }
20674@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
20675 * might have been modified by someone else.
20676 */
20677 rmb();
20678- if (atomic_read(&mce_paniced))
20679+ if (atomic_read_unchecked(&mce_paniced))
20680 wait_for_panic();
20681 if (!mca_cfg.monarch_timeout)
20682 goto out;
20683@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
20684 }
20685
20686 /* Call the installed machine check handler for this CPU setup. */
20687-void (*machine_check_vector)(struct pt_regs *, long error_code) =
20688+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
20689 unexpected_machine_check;
20690
20691 /*
20692@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20693 return;
20694 }
20695
20696+ pax_open_kernel();
20697 machine_check_vector = do_machine_check;
20698+ pax_close_kernel();
20699
20700 __mcheck_cpu_init_generic();
20701 __mcheck_cpu_init_vendor(c);
20702@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20703 */
20704
20705 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
20706-static int mce_chrdev_open_count; /* #times opened */
20707+static local_t mce_chrdev_open_count; /* #times opened */
20708 static int mce_chrdev_open_exclu; /* already open exclusive? */
20709
20710 static int mce_chrdev_open(struct inode *inode, struct file *file)
20711@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20712 spin_lock(&mce_chrdev_state_lock);
20713
20714 if (mce_chrdev_open_exclu ||
20715- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
20716+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
20717 spin_unlock(&mce_chrdev_state_lock);
20718
20719 return -EBUSY;
20720@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20721
20722 if (file->f_flags & O_EXCL)
20723 mce_chrdev_open_exclu = 1;
20724- mce_chrdev_open_count++;
20725+ local_inc(&mce_chrdev_open_count);
20726
20727 spin_unlock(&mce_chrdev_state_lock);
20728
20729@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
20730 {
20731 spin_lock(&mce_chrdev_state_lock);
20732
20733- mce_chrdev_open_count--;
20734+ local_dec(&mce_chrdev_open_count);
20735 mce_chrdev_open_exclu = 0;
20736
20737 spin_unlock(&mce_chrdev_state_lock);
20738@@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
20739
20740 for (i = 0; i < mca_cfg.banks; i++) {
20741 struct mce_bank *b = &mce_banks[i];
20742- struct device_attribute *a = &b->attr;
20743+ device_attribute_no_const *a = &b->attr;
20744
20745 sysfs_attr_init(&a->attr);
20746 a->attr.name = b->attrname;
20747@@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
20748 static void mce_reset(void)
20749 {
20750 cpu_missing = 0;
20751- atomic_set(&mce_fake_paniced, 0);
20752+ atomic_set_unchecked(&mce_fake_paniced, 0);
20753 atomic_set(&mce_executing, 0);
20754 atomic_set(&mce_callin, 0);
20755 atomic_set(&global_nwo, 0);
20756diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
20757index 1c044b1..37a2a43 100644
20758--- a/arch/x86/kernel/cpu/mcheck/p5.c
20759+++ b/arch/x86/kernel/cpu/mcheck/p5.c
20760@@ -11,6 +11,7 @@
20761 #include <asm/processor.h>
20762 #include <asm/mce.h>
20763 #include <asm/msr.h>
20764+#include <asm/pgtable.h>
20765
20766 /* By default disabled */
20767 int mce_p5_enabled __read_mostly;
20768@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
20769 if (!cpu_has(c, X86_FEATURE_MCE))
20770 return;
20771
20772+ pax_open_kernel();
20773 machine_check_vector = pentium_machine_check;
20774+ pax_close_kernel();
20775 /* Make sure the vector pointer is visible before we enable MCEs: */
20776 wmb();
20777
20778diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
20779index e9a701a..35317d6 100644
20780--- a/arch/x86/kernel/cpu/mcheck/winchip.c
20781+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
20782@@ -10,6 +10,7 @@
20783 #include <asm/processor.h>
20784 #include <asm/mce.h>
20785 #include <asm/msr.h>
20786+#include <asm/pgtable.h>
20787
20788 /* Machine check handler for WinChip C6: */
20789 static void winchip_machine_check(struct pt_regs *regs, long error_code)
20790@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
20791 {
20792 u32 lo, hi;
20793
20794+ pax_open_kernel();
20795 machine_check_vector = winchip_machine_check;
20796+ pax_close_kernel();
20797 /* Make sure the vector pointer is visible before we enable MCEs: */
20798 wmb();
20799
20800diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
20801index f961de9..8a9d332 100644
20802--- a/arch/x86/kernel/cpu/mtrr/main.c
20803+++ b/arch/x86/kernel/cpu/mtrr/main.c
20804@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
20805 u64 size_or_mask, size_and_mask;
20806 static bool mtrr_aps_delayed_init;
20807
20808-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
20809+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
20810
20811 const struct mtrr_ops *mtrr_if;
20812
20813diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
20814index df5e41f..816c719 100644
20815--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
20816+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
20817@@ -25,7 +25,7 @@ struct mtrr_ops {
20818 int (*validate_add_page)(unsigned long base, unsigned long size,
20819 unsigned int type);
20820 int (*have_wrcomb)(void);
20821-};
20822+} __do_const;
20823
20824 extern int generic_get_free_region(unsigned long base, unsigned long size,
20825 int replace_reg);
20826diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
20827index 8e13293..9bfd68c 100644
20828--- a/arch/x86/kernel/cpu/perf_event.c
20829+++ b/arch/x86/kernel/cpu/perf_event.c
20830@@ -1348,7 +1348,7 @@ static void __init pmu_check_apic(void)
20831 pr_info("no hardware sampling interrupt available.\n");
20832 }
20833
20834-static struct attribute_group x86_pmu_format_group = {
20835+static attribute_group_no_const x86_pmu_format_group = {
20836 .name = "format",
20837 .attrs = NULL,
20838 };
20839@@ -1447,7 +1447,7 @@ static struct attribute *events_attr[] = {
20840 NULL,
20841 };
20842
20843-static struct attribute_group x86_pmu_events_group = {
20844+static attribute_group_no_const x86_pmu_events_group = {
20845 .name = "events",
20846 .attrs = events_attr,
20847 };
20848@@ -1958,7 +1958,7 @@ static unsigned long get_segment_base(unsigned int segment)
20849 if (idx > GDT_ENTRIES)
20850 return 0;
20851
20852- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
20853+ desc = get_cpu_gdt_table(smp_processor_id());
20854 }
20855
20856 return get_desc_base(desc + idx);
20857@@ -2048,7 +2048,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
20858 break;
20859
20860 perf_callchain_store(entry, frame.return_address);
20861- fp = frame.next_frame;
20862+ fp = (const void __force_user *)frame.next_frame;
20863 }
20864 }
20865
20866diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20867index 639d128..e92d7e5 100644
20868--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20869+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20870@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
20871 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
20872 {
20873 struct attribute **attrs;
20874- struct attribute_group *attr_group;
20875+ attribute_group_no_const *attr_group;
20876 int i = 0, j;
20877
20878 while (amd_iommu_v2_event_descs[i].attr.attr.name)
20879diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
20880index 0fa4f24..17990ed 100644
20881--- a/arch/x86/kernel/cpu/perf_event_intel.c
20882+++ b/arch/x86/kernel/cpu/perf_event_intel.c
20883@@ -2314,10 +2314,10 @@ __init int intel_pmu_init(void)
20884 * v2 and above have a perf capabilities MSR
20885 */
20886 if (version > 1) {
20887- u64 capabilities;
20888+ u64 capabilities = x86_pmu.intel_cap.capabilities;
20889
20890- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
20891- x86_pmu.intel_cap.capabilities = capabilities;
20892+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
20893+ x86_pmu.intel_cap.capabilities = capabilities;
20894 }
20895
20896 intel_ds_init();
20897diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20898index 29c2487..a5606fa 100644
20899--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20900+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20901@@ -3318,7 +3318,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
20902 static int __init uncore_type_init(struct intel_uncore_type *type)
20903 {
20904 struct intel_uncore_pmu *pmus;
20905- struct attribute_group *attr_group;
20906+ attribute_group_no_const *attr_group;
20907 struct attribute **attrs;
20908 int i, j;
20909
20910diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20911index a80ab71..4089da5 100644
20912--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20913+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20914@@ -498,7 +498,7 @@ struct intel_uncore_box {
20915 struct uncore_event_desc {
20916 struct kobj_attribute attr;
20917 const char *config;
20918-};
20919+} __do_const;
20920
20921 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
20922 { \
20923diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
20924index 7d9481c..99c7e4b 100644
20925--- a/arch/x86/kernel/cpuid.c
20926+++ b/arch/x86/kernel/cpuid.c
20927@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
20928 return notifier_from_errno(err);
20929 }
20930
20931-static struct notifier_block __refdata cpuid_class_cpu_notifier =
20932+static struct notifier_block cpuid_class_cpu_notifier =
20933 {
20934 .notifier_call = cpuid_class_cpu_callback,
20935 };
20936diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20937index 18677a9..f67c45b 100644
20938--- a/arch/x86/kernel/crash.c
20939+++ b/arch/x86/kernel/crash.c
20940@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20941 {
20942 #ifdef CONFIG_X86_32
20943 struct pt_regs fixed_regs;
20944-#endif
20945
20946-#ifdef CONFIG_X86_32
20947- if (!user_mode_vm(regs)) {
20948+ if (!user_mode(regs)) {
20949 crash_fixup_ss_esp(&fixed_regs, regs);
20950 regs = &fixed_regs;
20951 }
20952diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20953index afa64ad..dce67dd 100644
20954--- a/arch/x86/kernel/crash_dump_64.c
20955+++ b/arch/x86/kernel/crash_dump_64.c
20956@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20957 return -ENOMEM;
20958
20959 if (userbuf) {
20960- if (copy_to_user(buf, vaddr + offset, csize)) {
20961+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20962 iounmap(vaddr);
20963 return -EFAULT;
20964 }
20965diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
20966index 5d3fe8d..02e1429 100644
20967--- a/arch/x86/kernel/doublefault.c
20968+++ b/arch/x86/kernel/doublefault.c
20969@@ -13,7 +13,7 @@
20970
20971 #define DOUBLEFAULT_STACKSIZE (1024)
20972 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20973-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20974+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20975
20976 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20977
20978@@ -23,7 +23,7 @@ static void doublefault_fn(void)
20979 unsigned long gdt, tss;
20980
20981 native_store_gdt(&gdt_desc);
20982- gdt = gdt_desc.address;
20983+ gdt = (unsigned long)gdt_desc.address;
20984
20985 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20986
20987@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20988 /* 0x2 bit is always set */
20989 .flags = X86_EFLAGS_SF | 0x2,
20990 .sp = STACK_START,
20991- .es = __USER_DS,
20992+ .es = __KERNEL_DS,
20993 .cs = __KERNEL_CS,
20994 .ss = __KERNEL_DS,
20995- .ds = __USER_DS,
20996+ .ds = __KERNEL_DS,
20997 .fs = __KERNEL_PERCPU,
20998
20999 .__cr3 = __pa_nodebug(swapper_pg_dir),
21000diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21001index d9c12d3..7858b62 100644
21002--- a/arch/x86/kernel/dumpstack.c
21003+++ b/arch/x86/kernel/dumpstack.c
21004@@ -2,6 +2,9 @@
21005 * Copyright (C) 1991, 1992 Linus Torvalds
21006 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21007 */
21008+#ifdef CONFIG_GRKERNSEC_HIDESYM
21009+#define __INCLUDED_BY_HIDESYM 1
21010+#endif
21011 #include <linux/kallsyms.h>
21012 #include <linux/kprobes.h>
21013 #include <linux/uaccess.h>
21014@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
21015 static void
21016 print_ftrace_graph_addr(unsigned long addr, void *data,
21017 const struct stacktrace_ops *ops,
21018- struct thread_info *tinfo, int *graph)
21019+ struct task_struct *task, int *graph)
21020 {
21021- struct task_struct *task;
21022 unsigned long ret_addr;
21023 int index;
21024
21025 if (addr != (unsigned long)return_to_handler)
21026 return;
21027
21028- task = tinfo->task;
21029 index = task->curr_ret_stack;
21030
21031 if (!task->ret_stack || index < *graph)
21032@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21033 static inline void
21034 print_ftrace_graph_addr(unsigned long addr, void *data,
21035 const struct stacktrace_ops *ops,
21036- struct thread_info *tinfo, int *graph)
21037+ struct task_struct *task, int *graph)
21038 { }
21039 #endif
21040
21041@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21042 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21043 */
21044
21045-static inline int valid_stack_ptr(struct thread_info *tinfo,
21046- void *p, unsigned int size, void *end)
21047+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21048 {
21049- void *t = tinfo;
21050 if (end) {
21051 if (p < end && p >= (end-THREAD_SIZE))
21052 return 1;
21053@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21054 }
21055
21056 unsigned long
21057-print_context_stack(struct thread_info *tinfo,
21058+print_context_stack(struct task_struct *task, void *stack_start,
21059 unsigned long *stack, unsigned long bp,
21060 const struct stacktrace_ops *ops, void *data,
21061 unsigned long *end, int *graph)
21062 {
21063 struct stack_frame *frame = (struct stack_frame *)bp;
21064
21065- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21066+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21067 unsigned long addr;
21068
21069 addr = *stack;
21070@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21071 } else {
21072 ops->address(data, addr, 0);
21073 }
21074- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21075+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21076 }
21077 stack++;
21078 }
21079@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21080 EXPORT_SYMBOL_GPL(print_context_stack);
21081
21082 unsigned long
21083-print_context_stack_bp(struct thread_info *tinfo,
21084+print_context_stack_bp(struct task_struct *task, void *stack_start,
21085 unsigned long *stack, unsigned long bp,
21086 const struct stacktrace_ops *ops, void *data,
21087 unsigned long *end, int *graph)
21088@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21089 struct stack_frame *frame = (struct stack_frame *)bp;
21090 unsigned long *ret_addr = &frame->return_address;
21091
21092- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21093+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21094 unsigned long addr = *ret_addr;
21095
21096 if (!__kernel_text_address(addr))
21097@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21098 ops->address(data, addr, 1);
21099 frame = frame->next_frame;
21100 ret_addr = &frame->return_address;
21101- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21102+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21103 }
21104
21105 return (unsigned long)frame;
21106@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21107 static void print_trace_address(void *data, unsigned long addr, int reliable)
21108 {
21109 touch_nmi_watchdog();
21110- printk(data);
21111+ printk("%s", (char *)data);
21112 printk_stack_address(addr, reliable);
21113 }
21114
21115@@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void)
21116 }
21117 EXPORT_SYMBOL_GPL(oops_begin);
21118
21119+extern void gr_handle_kernel_exploit(void);
21120+
21121 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21122 {
21123 if (regs && kexec_should_crash(current))
21124@@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21125 panic("Fatal exception in interrupt");
21126 if (panic_on_oops)
21127 panic("Fatal exception");
21128- do_exit(signr);
21129+
21130+ gr_handle_kernel_exploit();
21131+
21132+ do_group_exit(signr);
21133 }
21134
21135 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21136@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21137 print_modules();
21138 show_regs(regs);
21139 #ifdef CONFIG_X86_32
21140- if (user_mode_vm(regs)) {
21141+ if (user_mode(regs)) {
21142 sp = regs->sp;
21143 ss = regs->ss & 0xffff;
21144 } else {
21145@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21146 unsigned long flags = oops_begin();
21147 int sig = SIGSEGV;
21148
21149- if (!user_mode_vm(regs))
21150+ if (!user_mode(regs))
21151 report_bug(regs->ip, regs);
21152
21153 if (__die(str, regs, err))
21154diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21155index f2a1770..540657f 100644
21156--- a/arch/x86/kernel/dumpstack_32.c
21157+++ b/arch/x86/kernel/dumpstack_32.c
21158@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21159 bp = stack_frame(task, regs);
21160
21161 for (;;) {
21162- struct thread_info *context;
21163+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21164
21165- context = (struct thread_info *)
21166- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
21167- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
21168+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21169
21170- stack = (unsigned long *)context->previous_esp;
21171- if (!stack)
21172+ if (stack_start == task_stack_page(task))
21173 break;
21174+ stack = *(unsigned long **)stack_start;
21175 if (ops->stack(data, "IRQ") < 0)
21176 break;
21177 touch_nmi_watchdog();
21178@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
21179 int i;
21180
21181 show_regs_print_info(KERN_EMERG);
21182- __show_regs(regs, !user_mode_vm(regs));
21183+ __show_regs(regs, !user_mode(regs));
21184
21185 /*
21186 * When in-kernel, we also print out the stack and code at the
21187 * time of the fault..
21188 */
21189- if (!user_mode_vm(regs)) {
21190+ if (!user_mode(regs)) {
21191 unsigned int code_prologue = code_bytes * 43 / 64;
21192 unsigned int code_len = code_bytes;
21193 unsigned char c;
21194 u8 *ip;
21195+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
21196
21197 pr_emerg("Stack:\n");
21198 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
21199
21200 pr_emerg("Code:");
21201
21202- ip = (u8 *)regs->ip - code_prologue;
21203+ ip = (u8 *)regs->ip - code_prologue + cs_base;
21204 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
21205 /* try starting at IP */
21206- ip = (u8 *)regs->ip;
21207+ ip = (u8 *)regs->ip + cs_base;
21208 code_len = code_len - code_prologue + 1;
21209 }
21210 for (i = 0; i < code_len; i++, ip++) {
21211@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
21212 pr_cont(" Bad EIP value.");
21213 break;
21214 }
21215- if (ip == (u8 *)regs->ip)
21216+ if (ip == (u8 *)regs->ip + cs_base)
21217 pr_cont(" <%02x>", c);
21218 else
21219 pr_cont(" %02x", c);
21220@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
21221 {
21222 unsigned short ud2;
21223
21224+ ip = ktla_ktva(ip);
21225 if (ip < PAGE_OFFSET)
21226 return 0;
21227 if (probe_kernel_address((unsigned short *)ip, ud2))
21228@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
21229
21230 return ud2 == 0x0b0f;
21231 }
21232+
21233+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21234+void pax_check_alloca(unsigned long size)
21235+{
21236+ unsigned long sp = (unsigned long)&sp, stack_left;
21237+
21238+ /* all kernel stacks are of the same size */
21239+ stack_left = sp & (THREAD_SIZE - 1);
21240+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21241+}
21242+EXPORT_SYMBOL(pax_check_alloca);
21243+#endif
21244diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
21245index addb207..99635fa 100644
21246--- a/arch/x86/kernel/dumpstack_64.c
21247+++ b/arch/x86/kernel/dumpstack_64.c
21248@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21249 unsigned long *irq_stack_end =
21250 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
21251 unsigned used = 0;
21252- struct thread_info *tinfo;
21253 int graph = 0;
21254 unsigned long dummy;
21255+ void *stack_start;
21256
21257 if (!task)
21258 task = current;
21259@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21260 * current stack address. If the stacks consist of nested
21261 * exceptions
21262 */
21263- tinfo = task_thread_info(task);
21264 for (;;) {
21265 char *id;
21266 unsigned long *estack_end;
21267+
21268 estack_end = in_exception_stack(cpu, (unsigned long)stack,
21269 &used, &id);
21270
21271@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21272 if (ops->stack(data, id) < 0)
21273 break;
21274
21275- bp = ops->walk_stack(tinfo, stack, bp, ops,
21276+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
21277 data, estack_end, &graph);
21278 ops->stack(data, "<EOE>");
21279 /*
21280@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21281 * second-to-last pointer (index -2 to end) in the
21282 * exception stack:
21283 */
21284+ if ((u16)estack_end[-1] != __KERNEL_DS)
21285+ goto out;
21286 stack = (unsigned long *) estack_end[-2];
21287 continue;
21288 }
21289@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21290 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
21291 if (ops->stack(data, "IRQ") < 0)
21292 break;
21293- bp = ops->walk_stack(tinfo, stack, bp,
21294+ bp = ops->walk_stack(task, irq_stack, stack, bp,
21295 ops, data, irq_stack_end, &graph);
21296 /*
21297 * We link to the next stack (which would be
21298@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21299 /*
21300 * This handles the process stack:
21301 */
21302- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
21303+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21304+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21305+out:
21306 put_cpu();
21307 }
21308 EXPORT_SYMBOL(dump_trace);
21309@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
21310
21311 return ud2 == 0x0b0f;
21312 }
21313+
21314+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21315+void pax_check_alloca(unsigned long size)
21316+{
21317+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
21318+ unsigned cpu, used;
21319+ char *id;
21320+
21321+ /* check the process stack first */
21322+ stack_start = (unsigned long)task_stack_page(current);
21323+ stack_end = stack_start + THREAD_SIZE;
21324+ if (likely(stack_start <= sp && sp < stack_end)) {
21325+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
21326+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21327+ return;
21328+ }
21329+
21330+ cpu = get_cpu();
21331+
21332+ /* check the irq stacks */
21333+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
21334+ stack_start = stack_end - IRQ_STACK_SIZE;
21335+ if (stack_start <= sp && sp < stack_end) {
21336+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
21337+ put_cpu();
21338+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21339+ return;
21340+ }
21341+
21342+ /* check the exception stacks */
21343+ used = 0;
21344+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
21345+ stack_start = stack_end - EXCEPTION_STKSZ;
21346+ if (stack_end && stack_start <= sp && sp < stack_end) {
21347+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
21348+ put_cpu();
21349+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21350+ return;
21351+ }
21352+
21353+ put_cpu();
21354+
21355+ /* unknown stack */
21356+ BUG();
21357+}
21358+EXPORT_SYMBOL(pax_check_alloca);
21359+#endif
21360diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
21361index 174da5f..5e55606 100644
21362--- a/arch/x86/kernel/e820.c
21363+++ b/arch/x86/kernel/e820.c
21364@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
21365
21366 static void early_panic(char *msg)
21367 {
21368- early_printk(msg);
21369- panic(msg);
21370+ early_printk("%s", msg);
21371+ panic("%s", msg);
21372 }
21373
21374 static int userdef __initdata;
21375diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
21376index 01d1c18..8073693 100644
21377--- a/arch/x86/kernel/early_printk.c
21378+++ b/arch/x86/kernel/early_printk.c
21379@@ -7,6 +7,7 @@
21380 #include <linux/pci_regs.h>
21381 #include <linux/pci_ids.h>
21382 #include <linux/errno.h>
21383+#include <linux/sched.h>
21384 #include <asm/io.h>
21385 #include <asm/processor.h>
21386 #include <asm/fcntl.h>
21387diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
21388index a2a4f46..6cab058 100644
21389--- a/arch/x86/kernel/entry_32.S
21390+++ b/arch/x86/kernel/entry_32.S
21391@@ -177,13 +177,153 @@
21392 /*CFI_REL_OFFSET gs, PT_GS*/
21393 .endm
21394 .macro SET_KERNEL_GS reg
21395+
21396+#ifdef CONFIG_CC_STACKPROTECTOR
21397 movl $(__KERNEL_STACK_CANARY), \reg
21398+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21399+ movl $(__USER_DS), \reg
21400+#else
21401+ xorl \reg, \reg
21402+#endif
21403+
21404 movl \reg, %gs
21405 .endm
21406
21407 #endif /* CONFIG_X86_32_LAZY_GS */
21408
21409-.macro SAVE_ALL
21410+.macro pax_enter_kernel
21411+#ifdef CONFIG_PAX_KERNEXEC
21412+ call pax_enter_kernel
21413+#endif
21414+.endm
21415+
21416+.macro pax_exit_kernel
21417+#ifdef CONFIG_PAX_KERNEXEC
21418+ call pax_exit_kernel
21419+#endif
21420+.endm
21421+
21422+#ifdef CONFIG_PAX_KERNEXEC
21423+ENTRY(pax_enter_kernel)
21424+#ifdef CONFIG_PARAVIRT
21425+ pushl %eax
21426+ pushl %ecx
21427+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
21428+ mov %eax, %esi
21429+#else
21430+ mov %cr0, %esi
21431+#endif
21432+ bts $16, %esi
21433+ jnc 1f
21434+ mov %cs, %esi
21435+ cmp $__KERNEL_CS, %esi
21436+ jz 3f
21437+ ljmp $__KERNEL_CS, $3f
21438+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
21439+2:
21440+#ifdef CONFIG_PARAVIRT
21441+ mov %esi, %eax
21442+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
21443+#else
21444+ mov %esi, %cr0
21445+#endif
21446+3:
21447+#ifdef CONFIG_PARAVIRT
21448+ popl %ecx
21449+ popl %eax
21450+#endif
21451+ ret
21452+ENDPROC(pax_enter_kernel)
21453+
21454+ENTRY(pax_exit_kernel)
21455+#ifdef CONFIG_PARAVIRT
21456+ pushl %eax
21457+ pushl %ecx
21458+#endif
21459+ mov %cs, %esi
21460+ cmp $__KERNEXEC_KERNEL_CS, %esi
21461+ jnz 2f
21462+#ifdef CONFIG_PARAVIRT
21463+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
21464+ mov %eax, %esi
21465+#else
21466+ mov %cr0, %esi
21467+#endif
21468+ btr $16, %esi
21469+ ljmp $__KERNEL_CS, $1f
21470+1:
21471+#ifdef CONFIG_PARAVIRT
21472+ mov %esi, %eax
21473+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
21474+#else
21475+ mov %esi, %cr0
21476+#endif
21477+2:
21478+#ifdef CONFIG_PARAVIRT
21479+ popl %ecx
21480+ popl %eax
21481+#endif
21482+ ret
21483+ENDPROC(pax_exit_kernel)
21484+#endif
21485+
21486+ .macro pax_erase_kstack
21487+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21488+ call pax_erase_kstack
21489+#endif
21490+ .endm
21491+
21492+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21493+/*
21494+ * ebp: thread_info
21495+ */
21496+ENTRY(pax_erase_kstack)
21497+ pushl %edi
21498+ pushl %ecx
21499+ pushl %eax
21500+
21501+ mov TI_lowest_stack(%ebp), %edi
21502+ mov $-0xBEEF, %eax
21503+ std
21504+
21505+1: mov %edi, %ecx
21506+ and $THREAD_SIZE_asm - 1, %ecx
21507+ shr $2, %ecx
21508+ repne scasl
21509+ jecxz 2f
21510+
21511+ cmp $2*16, %ecx
21512+ jc 2f
21513+
21514+ mov $2*16, %ecx
21515+ repe scasl
21516+ jecxz 2f
21517+ jne 1b
21518+
21519+2: cld
21520+ mov %esp, %ecx
21521+ sub %edi, %ecx
21522+
21523+ cmp $THREAD_SIZE_asm, %ecx
21524+ jb 3f
21525+ ud2
21526+3:
21527+
21528+ shr $2, %ecx
21529+ rep stosl
21530+
21531+ mov TI_task_thread_sp0(%ebp), %edi
21532+ sub $128, %edi
21533+ mov %edi, TI_lowest_stack(%ebp)
21534+
21535+ popl %eax
21536+ popl %ecx
21537+ popl %edi
21538+ ret
21539+ENDPROC(pax_erase_kstack)
21540+#endif
21541+
21542+.macro __SAVE_ALL _DS
21543 cld
21544 PUSH_GS
21545 pushl_cfi %fs
21546@@ -206,7 +346,7 @@
21547 CFI_REL_OFFSET ecx, 0
21548 pushl_cfi %ebx
21549 CFI_REL_OFFSET ebx, 0
21550- movl $(__USER_DS), %edx
21551+ movl $\_DS, %edx
21552 movl %edx, %ds
21553 movl %edx, %es
21554 movl $(__KERNEL_PERCPU), %edx
21555@@ -214,6 +354,15 @@
21556 SET_KERNEL_GS %edx
21557 .endm
21558
21559+.macro SAVE_ALL
21560+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21561+ __SAVE_ALL __KERNEL_DS
21562+ pax_enter_kernel
21563+#else
21564+ __SAVE_ALL __USER_DS
21565+#endif
21566+.endm
21567+
21568 .macro RESTORE_INT_REGS
21569 popl_cfi %ebx
21570 CFI_RESTORE ebx
21571@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
21572 popfl_cfi
21573 jmp syscall_exit
21574 CFI_ENDPROC
21575-END(ret_from_fork)
21576+ENDPROC(ret_from_fork)
21577
21578 ENTRY(ret_from_kernel_thread)
21579 CFI_STARTPROC
21580@@ -344,7 +493,15 @@ ret_from_intr:
21581 andl $SEGMENT_RPL_MASK, %eax
21582 #endif
21583 cmpl $USER_RPL, %eax
21584+
21585+#ifdef CONFIG_PAX_KERNEXEC
21586+ jae resume_userspace
21587+
21588+ pax_exit_kernel
21589+ jmp resume_kernel
21590+#else
21591 jb resume_kernel # not returning to v8086 or userspace
21592+#endif
21593
21594 ENTRY(resume_userspace)
21595 LOCKDEP_SYS_EXIT
21596@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
21597 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
21598 # int/exception return?
21599 jne work_pending
21600- jmp restore_all
21601-END(ret_from_exception)
21602+ jmp restore_all_pax
21603+ENDPROC(ret_from_exception)
21604
21605 #ifdef CONFIG_PREEMPT
21606 ENTRY(resume_kernel)
21607@@ -369,7 +526,7 @@ need_resched:
21608 jz restore_all
21609 call preempt_schedule_irq
21610 jmp need_resched
21611-END(resume_kernel)
21612+ENDPROC(resume_kernel)
21613 #endif
21614 CFI_ENDPROC
21615 /*
21616@@ -403,30 +560,45 @@ sysenter_past_esp:
21617 /*CFI_REL_OFFSET cs, 0*/
21618 /*
21619 * Push current_thread_info()->sysenter_return to the stack.
21620- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
21621- * pushed above; +8 corresponds to copy_thread's esp0 setting.
21622 */
21623- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
21624+ pushl_cfi $0
21625 CFI_REL_OFFSET eip, 0
21626
21627 pushl_cfi %eax
21628 SAVE_ALL
21629+ GET_THREAD_INFO(%ebp)
21630+ movl TI_sysenter_return(%ebp),%ebp
21631+ movl %ebp,PT_EIP(%esp)
21632 ENABLE_INTERRUPTS(CLBR_NONE)
21633
21634 /*
21635 * Load the potential sixth argument from user stack.
21636 * Careful about security.
21637 */
21638+ movl PT_OLDESP(%esp),%ebp
21639+
21640+#ifdef CONFIG_PAX_MEMORY_UDEREF
21641+ mov PT_OLDSS(%esp),%ds
21642+1: movl %ds:(%ebp),%ebp
21643+ push %ss
21644+ pop %ds
21645+#else
21646 cmpl $__PAGE_OFFSET-3,%ebp
21647 jae syscall_fault
21648 ASM_STAC
21649 1: movl (%ebp),%ebp
21650 ASM_CLAC
21651+#endif
21652+
21653 movl %ebp,PT_EBP(%esp)
21654 _ASM_EXTABLE(1b,syscall_fault)
21655
21656 GET_THREAD_INFO(%ebp)
21657
21658+#ifdef CONFIG_PAX_RANDKSTACK
21659+ pax_erase_kstack
21660+#endif
21661+
21662 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21663 jnz sysenter_audit
21664 sysenter_do_call:
21665@@ -441,12 +613,24 @@ sysenter_do_call:
21666 testl $_TIF_ALLWORK_MASK, %ecx
21667 jne sysexit_audit
21668 sysenter_exit:
21669+
21670+#ifdef CONFIG_PAX_RANDKSTACK
21671+ pushl_cfi %eax
21672+ movl %esp, %eax
21673+ call pax_randomize_kstack
21674+ popl_cfi %eax
21675+#endif
21676+
21677+ pax_erase_kstack
21678+
21679 /* if something modifies registers it must also disable sysexit */
21680 movl PT_EIP(%esp), %edx
21681 movl PT_OLDESP(%esp), %ecx
21682 xorl %ebp,%ebp
21683 TRACE_IRQS_ON
21684 1: mov PT_FS(%esp), %fs
21685+2: mov PT_DS(%esp), %ds
21686+3: mov PT_ES(%esp), %es
21687 PTGS_TO_GS
21688 ENABLE_INTERRUPTS_SYSEXIT
21689
21690@@ -463,6 +647,9 @@ sysenter_audit:
21691 movl %eax,%edx /* 2nd arg: syscall number */
21692 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
21693 call __audit_syscall_entry
21694+
21695+ pax_erase_kstack
21696+
21697 pushl_cfi %ebx
21698 movl PT_EAX(%esp),%eax /* reload syscall number */
21699 jmp sysenter_do_call
21700@@ -488,10 +675,16 @@ sysexit_audit:
21701
21702 CFI_ENDPROC
21703 .pushsection .fixup,"ax"
21704-2: movl $0,PT_FS(%esp)
21705+4: movl $0,PT_FS(%esp)
21706+ jmp 1b
21707+5: movl $0,PT_DS(%esp)
21708+ jmp 1b
21709+6: movl $0,PT_ES(%esp)
21710 jmp 1b
21711 .popsection
21712- _ASM_EXTABLE(1b,2b)
21713+ _ASM_EXTABLE(1b,4b)
21714+ _ASM_EXTABLE(2b,5b)
21715+ _ASM_EXTABLE(3b,6b)
21716 PTGS_TO_GS_EX
21717 ENDPROC(ia32_sysenter_target)
21718
21719@@ -506,6 +699,11 @@ ENTRY(system_call)
21720 pushl_cfi %eax # save orig_eax
21721 SAVE_ALL
21722 GET_THREAD_INFO(%ebp)
21723+
21724+#ifdef CONFIG_PAX_RANDKSTACK
21725+ pax_erase_kstack
21726+#endif
21727+
21728 # system call tracing in operation / emulation
21729 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21730 jnz syscall_trace_entry
21731@@ -524,6 +722,15 @@ syscall_exit:
21732 testl $_TIF_ALLWORK_MASK, %ecx # current->work
21733 jne syscall_exit_work
21734
21735+restore_all_pax:
21736+
21737+#ifdef CONFIG_PAX_RANDKSTACK
21738+ movl %esp, %eax
21739+ call pax_randomize_kstack
21740+#endif
21741+
21742+ pax_erase_kstack
21743+
21744 restore_all:
21745 TRACE_IRQS_IRET
21746 restore_all_notrace:
21747@@ -580,14 +787,34 @@ ldt_ss:
21748 * compensating for the offset by changing to the ESPFIX segment with
21749 * a base address that matches for the difference.
21750 */
21751-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
21752+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
21753 mov %esp, %edx /* load kernel esp */
21754 mov PT_OLDESP(%esp), %eax /* load userspace esp */
21755 mov %dx, %ax /* eax: new kernel esp */
21756 sub %eax, %edx /* offset (low word is 0) */
21757+#ifdef CONFIG_SMP
21758+ movl PER_CPU_VAR(cpu_number), %ebx
21759+ shll $PAGE_SHIFT_asm, %ebx
21760+ addl $cpu_gdt_table, %ebx
21761+#else
21762+ movl $cpu_gdt_table, %ebx
21763+#endif
21764 shr $16, %edx
21765- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
21766- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
21767+
21768+#ifdef CONFIG_PAX_KERNEXEC
21769+ mov %cr0, %esi
21770+ btr $16, %esi
21771+ mov %esi, %cr0
21772+#endif
21773+
21774+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
21775+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
21776+
21777+#ifdef CONFIG_PAX_KERNEXEC
21778+ bts $16, %esi
21779+ mov %esi, %cr0
21780+#endif
21781+
21782 pushl_cfi $__ESPFIX_SS
21783 pushl_cfi %eax /* new kernel esp */
21784 /* Disable interrupts, but do not irqtrace this section: we
21785@@ -616,20 +843,18 @@ work_resched:
21786 movl TI_flags(%ebp), %ecx
21787 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
21788 # than syscall tracing?
21789- jz restore_all
21790+ jz restore_all_pax
21791 testb $_TIF_NEED_RESCHED, %cl
21792 jnz work_resched
21793
21794 work_notifysig: # deal with pending signals and
21795 # notify-resume requests
21796+ movl %esp, %eax
21797 #ifdef CONFIG_VM86
21798 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
21799- movl %esp, %eax
21800 jne work_notifysig_v86 # returning to kernel-space or
21801 # vm86-space
21802 1:
21803-#else
21804- movl %esp, %eax
21805 #endif
21806 TRACE_IRQS_ON
21807 ENABLE_INTERRUPTS(CLBR_NONE)
21808@@ -650,7 +875,7 @@ work_notifysig_v86:
21809 movl %eax, %esp
21810 jmp 1b
21811 #endif
21812-END(work_pending)
21813+ENDPROC(work_pending)
21814
21815 # perform syscall exit tracing
21816 ALIGN
21817@@ -658,11 +883,14 @@ syscall_trace_entry:
21818 movl $-ENOSYS,PT_EAX(%esp)
21819 movl %esp, %eax
21820 call syscall_trace_enter
21821+
21822+ pax_erase_kstack
21823+
21824 /* What it returned is what we'll actually use. */
21825 cmpl $(NR_syscalls), %eax
21826 jnae syscall_call
21827 jmp syscall_exit
21828-END(syscall_trace_entry)
21829+ENDPROC(syscall_trace_entry)
21830
21831 # perform syscall exit tracing
21832 ALIGN
21833@@ -675,21 +903,25 @@ syscall_exit_work:
21834 movl %esp, %eax
21835 call syscall_trace_leave
21836 jmp resume_userspace
21837-END(syscall_exit_work)
21838+ENDPROC(syscall_exit_work)
21839 CFI_ENDPROC
21840
21841 RING0_INT_FRAME # can't unwind into user space anyway
21842 syscall_fault:
21843+#ifdef CONFIG_PAX_MEMORY_UDEREF
21844+ push %ss
21845+ pop %ds
21846+#endif
21847 ASM_CLAC
21848 GET_THREAD_INFO(%ebp)
21849 movl $-EFAULT,PT_EAX(%esp)
21850 jmp resume_userspace
21851-END(syscall_fault)
21852+ENDPROC(syscall_fault)
21853
21854 syscall_badsys:
21855 movl $-ENOSYS,PT_EAX(%esp)
21856 jmp resume_userspace
21857-END(syscall_badsys)
21858+ENDPROC(syscall_badsys)
21859 CFI_ENDPROC
21860 /*
21861 * End of kprobes section
21862@@ -705,8 +937,15 @@ END(syscall_badsys)
21863 * normal stack and adjusts ESP with the matching offset.
21864 */
21865 /* fixup the stack */
21866- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
21867- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
21868+#ifdef CONFIG_SMP
21869+ movl PER_CPU_VAR(cpu_number), %ebx
21870+ shll $PAGE_SHIFT_asm, %ebx
21871+ addl $cpu_gdt_table, %ebx
21872+#else
21873+ movl $cpu_gdt_table, %ebx
21874+#endif
21875+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
21876+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
21877 shl $16, %eax
21878 addl %esp, %eax /* the adjusted stack pointer */
21879 pushl_cfi $__KERNEL_DS
21880@@ -759,7 +998,7 @@ vector=vector+1
21881 .endr
21882 2: jmp common_interrupt
21883 .endr
21884-END(irq_entries_start)
21885+ENDPROC(irq_entries_start)
21886
21887 .previous
21888 END(interrupt)
21889@@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
21890 pushl_cfi $do_coprocessor_error
21891 jmp error_code
21892 CFI_ENDPROC
21893-END(coprocessor_error)
21894+ENDPROC(coprocessor_error)
21895
21896 ENTRY(simd_coprocessor_error)
21897 RING0_INT_FRAME
21898@@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
21899 .section .altinstructions,"a"
21900 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
21901 .previous
21902-.section .altinstr_replacement,"ax"
21903+.section .altinstr_replacement,"a"
21904 663: pushl $do_simd_coprocessor_error
21905 664:
21906 .previous
21907@@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
21908 #endif
21909 jmp error_code
21910 CFI_ENDPROC
21911-END(simd_coprocessor_error)
21912+ENDPROC(simd_coprocessor_error)
21913
21914 ENTRY(device_not_available)
21915 RING0_INT_FRAME
21916@@ -851,18 +1090,18 @@ ENTRY(device_not_available)
21917 pushl_cfi $do_device_not_available
21918 jmp error_code
21919 CFI_ENDPROC
21920-END(device_not_available)
21921+ENDPROC(device_not_available)
21922
21923 #ifdef CONFIG_PARAVIRT
21924 ENTRY(native_iret)
21925 iret
21926 _ASM_EXTABLE(native_iret, iret_exc)
21927-END(native_iret)
21928+ENDPROC(native_iret)
21929
21930 ENTRY(native_irq_enable_sysexit)
21931 sti
21932 sysexit
21933-END(native_irq_enable_sysexit)
21934+ENDPROC(native_irq_enable_sysexit)
21935 #endif
21936
21937 ENTRY(overflow)
21938@@ -872,7 +1111,7 @@ ENTRY(overflow)
21939 pushl_cfi $do_overflow
21940 jmp error_code
21941 CFI_ENDPROC
21942-END(overflow)
21943+ENDPROC(overflow)
21944
21945 ENTRY(bounds)
21946 RING0_INT_FRAME
21947@@ -881,7 +1120,7 @@ ENTRY(bounds)
21948 pushl_cfi $do_bounds
21949 jmp error_code
21950 CFI_ENDPROC
21951-END(bounds)
21952+ENDPROC(bounds)
21953
21954 ENTRY(invalid_op)
21955 RING0_INT_FRAME
21956@@ -890,7 +1129,7 @@ ENTRY(invalid_op)
21957 pushl_cfi $do_invalid_op
21958 jmp error_code
21959 CFI_ENDPROC
21960-END(invalid_op)
21961+ENDPROC(invalid_op)
21962
21963 ENTRY(coprocessor_segment_overrun)
21964 RING0_INT_FRAME
21965@@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
21966 pushl_cfi $do_coprocessor_segment_overrun
21967 jmp error_code
21968 CFI_ENDPROC
21969-END(coprocessor_segment_overrun)
21970+ENDPROC(coprocessor_segment_overrun)
21971
21972 ENTRY(invalid_TSS)
21973 RING0_EC_FRAME
21974@@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
21975 pushl_cfi $do_invalid_TSS
21976 jmp error_code
21977 CFI_ENDPROC
21978-END(invalid_TSS)
21979+ENDPROC(invalid_TSS)
21980
21981 ENTRY(segment_not_present)
21982 RING0_EC_FRAME
21983@@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
21984 pushl_cfi $do_segment_not_present
21985 jmp error_code
21986 CFI_ENDPROC
21987-END(segment_not_present)
21988+ENDPROC(segment_not_present)
21989
21990 ENTRY(stack_segment)
21991 RING0_EC_FRAME
21992@@ -923,7 +1162,7 @@ ENTRY(stack_segment)
21993 pushl_cfi $do_stack_segment
21994 jmp error_code
21995 CFI_ENDPROC
21996-END(stack_segment)
21997+ENDPROC(stack_segment)
21998
21999 ENTRY(alignment_check)
22000 RING0_EC_FRAME
22001@@ -931,7 +1170,7 @@ ENTRY(alignment_check)
22002 pushl_cfi $do_alignment_check
22003 jmp error_code
22004 CFI_ENDPROC
22005-END(alignment_check)
22006+ENDPROC(alignment_check)
22007
22008 ENTRY(divide_error)
22009 RING0_INT_FRAME
22010@@ -940,7 +1179,7 @@ ENTRY(divide_error)
22011 pushl_cfi $do_divide_error
22012 jmp error_code
22013 CFI_ENDPROC
22014-END(divide_error)
22015+ENDPROC(divide_error)
22016
22017 #ifdef CONFIG_X86_MCE
22018 ENTRY(machine_check)
22019@@ -950,7 +1189,7 @@ ENTRY(machine_check)
22020 pushl_cfi machine_check_vector
22021 jmp error_code
22022 CFI_ENDPROC
22023-END(machine_check)
22024+ENDPROC(machine_check)
22025 #endif
22026
22027 ENTRY(spurious_interrupt_bug)
22028@@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
22029 pushl_cfi $do_spurious_interrupt_bug
22030 jmp error_code
22031 CFI_ENDPROC
22032-END(spurious_interrupt_bug)
22033+ENDPROC(spurious_interrupt_bug)
22034 /*
22035 * End of kprobes section
22036 */
22037@@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22038
22039 ENTRY(mcount)
22040 ret
22041-END(mcount)
22042+ENDPROC(mcount)
22043
22044 ENTRY(ftrace_caller)
22045 cmpl $0, function_trace_stop
22046@@ -1103,7 +1342,7 @@ ftrace_graph_call:
22047 .globl ftrace_stub
22048 ftrace_stub:
22049 ret
22050-END(ftrace_caller)
22051+ENDPROC(ftrace_caller)
22052
22053 ENTRY(ftrace_regs_caller)
22054 pushf /* push flags before compare (in cs location) */
22055@@ -1207,7 +1446,7 @@ trace:
22056 popl %ecx
22057 popl %eax
22058 jmp ftrace_stub
22059-END(mcount)
22060+ENDPROC(mcount)
22061 #endif /* CONFIG_DYNAMIC_FTRACE */
22062 #endif /* CONFIG_FUNCTION_TRACER */
22063
22064@@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
22065 popl %ecx
22066 popl %eax
22067 ret
22068-END(ftrace_graph_caller)
22069+ENDPROC(ftrace_graph_caller)
22070
22071 .globl return_to_handler
22072 return_to_handler:
22073@@ -1291,15 +1530,18 @@ error_code:
22074 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22075 REG_TO_PTGS %ecx
22076 SET_KERNEL_GS %ecx
22077- movl $(__USER_DS), %ecx
22078+ movl $(__KERNEL_DS), %ecx
22079 movl %ecx, %ds
22080 movl %ecx, %es
22081+
22082+ pax_enter_kernel
22083+
22084 TRACE_IRQS_OFF
22085 movl %esp,%eax # pt_regs pointer
22086 call *%edi
22087 jmp ret_from_exception
22088 CFI_ENDPROC
22089-END(page_fault)
22090+ENDPROC(page_fault)
22091
22092 /*
22093 * Debug traps and NMI can happen at the one SYSENTER instruction
22094@@ -1342,7 +1584,7 @@ debug_stack_correct:
22095 call do_debug
22096 jmp ret_from_exception
22097 CFI_ENDPROC
22098-END(debug)
22099+ENDPROC(debug)
22100
22101 /*
22102 * NMI is doubly nasty. It can happen _while_ we're handling
22103@@ -1380,6 +1622,9 @@ nmi_stack_correct:
22104 xorl %edx,%edx # zero error code
22105 movl %esp,%eax # pt_regs pointer
22106 call do_nmi
22107+
22108+ pax_exit_kernel
22109+
22110 jmp restore_all_notrace
22111 CFI_ENDPROC
22112
22113@@ -1416,12 +1661,15 @@ nmi_espfix_stack:
22114 FIXUP_ESPFIX_STACK # %eax == %esp
22115 xorl %edx,%edx # zero error code
22116 call do_nmi
22117+
22118+ pax_exit_kernel
22119+
22120 RESTORE_REGS
22121 lss 12+4(%esp), %esp # back to espfix stack
22122 CFI_ADJUST_CFA_OFFSET -24
22123 jmp irq_return
22124 CFI_ENDPROC
22125-END(nmi)
22126+ENDPROC(nmi)
22127
22128 ENTRY(int3)
22129 RING0_INT_FRAME
22130@@ -1434,14 +1682,14 @@ ENTRY(int3)
22131 call do_int3
22132 jmp ret_from_exception
22133 CFI_ENDPROC
22134-END(int3)
22135+ENDPROC(int3)
22136
22137 ENTRY(general_protection)
22138 RING0_EC_FRAME
22139 pushl_cfi $do_general_protection
22140 jmp error_code
22141 CFI_ENDPROC
22142-END(general_protection)
22143+ENDPROC(general_protection)
22144
22145 #ifdef CONFIG_KVM_GUEST
22146 ENTRY(async_page_fault)
22147@@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
22148 pushl_cfi $do_async_page_fault
22149 jmp error_code
22150 CFI_ENDPROC
22151-END(async_page_fault)
22152+ENDPROC(async_page_fault)
22153 #endif
22154
22155 /*
22156diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22157index 1e96c36..3ff710a 100644
22158--- a/arch/x86/kernel/entry_64.S
22159+++ b/arch/x86/kernel/entry_64.S
22160@@ -59,6 +59,8 @@
22161 #include <asm/context_tracking.h>
22162 #include <asm/smap.h>
22163 #include <linux/err.h>
22164+#include <asm/pgtable.h>
22165+#include <asm/alternative-asm.h>
22166
22167 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22168 #include <linux/elf-em.h>
22169@@ -80,8 +82,9 @@
22170 #ifdef CONFIG_DYNAMIC_FTRACE
22171
22172 ENTRY(function_hook)
22173+ pax_force_retaddr
22174 retq
22175-END(function_hook)
22176+ENDPROC(function_hook)
22177
22178 /* skip is set if stack has been adjusted */
22179 .macro ftrace_caller_setup skip=0
22180@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
22181 #endif
22182
22183 GLOBAL(ftrace_stub)
22184+ pax_force_retaddr
22185 retq
22186-END(ftrace_caller)
22187+ENDPROC(ftrace_caller)
22188
22189 ENTRY(ftrace_regs_caller)
22190 /* Save the current flags before compare (in SS location)*/
22191@@ -191,7 +195,7 @@ ftrace_restore_flags:
22192 popfq
22193 jmp ftrace_stub
22194
22195-END(ftrace_regs_caller)
22196+ENDPROC(ftrace_regs_caller)
22197
22198
22199 #else /* ! CONFIG_DYNAMIC_FTRACE */
22200@@ -212,6 +216,7 @@ ENTRY(function_hook)
22201 #endif
22202
22203 GLOBAL(ftrace_stub)
22204+ pax_force_retaddr
22205 retq
22206
22207 trace:
22208@@ -225,12 +230,13 @@ trace:
22209 #endif
22210 subq $MCOUNT_INSN_SIZE, %rdi
22211
22212+ pax_force_fptr ftrace_trace_function
22213 call *ftrace_trace_function
22214
22215 MCOUNT_RESTORE_FRAME
22216
22217 jmp ftrace_stub
22218-END(function_hook)
22219+ENDPROC(function_hook)
22220 #endif /* CONFIG_DYNAMIC_FTRACE */
22221 #endif /* CONFIG_FUNCTION_TRACER */
22222
22223@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
22224
22225 MCOUNT_RESTORE_FRAME
22226
22227+ pax_force_retaddr
22228 retq
22229-END(ftrace_graph_caller)
22230+ENDPROC(ftrace_graph_caller)
22231
22232 GLOBAL(return_to_handler)
22233 subq $24, %rsp
22234@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
22235 movq 8(%rsp), %rdx
22236 movq (%rsp), %rax
22237 addq $24, %rsp
22238+ pax_force_fptr %rdi
22239 jmp *%rdi
22240+ENDPROC(return_to_handler)
22241 #endif
22242
22243
22244@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
22245 ENDPROC(native_usergs_sysret64)
22246 #endif /* CONFIG_PARAVIRT */
22247
22248+ .macro ljmpq sel, off
22249+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
22250+ .byte 0x48; ljmp *1234f(%rip)
22251+ .pushsection .rodata
22252+ .align 16
22253+ 1234: .quad \off; .word \sel
22254+ .popsection
22255+#else
22256+ pushq $\sel
22257+ pushq $\off
22258+ lretq
22259+#endif
22260+ .endm
22261+
22262+ .macro pax_enter_kernel
22263+ pax_set_fptr_mask
22264+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22265+ call pax_enter_kernel
22266+#endif
22267+ .endm
22268+
22269+ .macro pax_exit_kernel
22270+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22271+ call pax_exit_kernel
22272+#endif
22273+
22274+ .endm
22275+
22276+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22277+ENTRY(pax_enter_kernel)
22278+ pushq %rdi
22279+
22280+#ifdef CONFIG_PARAVIRT
22281+ PV_SAVE_REGS(CLBR_RDI)
22282+#endif
22283+
22284+#ifdef CONFIG_PAX_KERNEXEC
22285+ GET_CR0_INTO_RDI
22286+ bts $16,%rdi
22287+ jnc 3f
22288+ mov %cs,%edi
22289+ cmp $__KERNEL_CS,%edi
22290+ jnz 2f
22291+1:
22292+#endif
22293+
22294+#ifdef CONFIG_PAX_MEMORY_UDEREF
22295+ 661: jmp 111f
22296+ .pushsection .altinstr_replacement, "a"
22297+ 662: ASM_NOP2
22298+ .popsection
22299+ .pushsection .altinstructions, "a"
22300+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22301+ .popsection
22302+ GET_CR3_INTO_RDI
22303+ cmp $0,%dil
22304+ jnz 112f
22305+ mov $__KERNEL_DS,%edi
22306+ mov %edi,%ss
22307+ jmp 111f
22308+112: cmp $1,%dil
22309+ jz 113f
22310+ ud2
22311+113: sub $4097,%rdi
22312+ bts $63,%rdi
22313+ SET_RDI_INTO_CR3
22314+ mov $__UDEREF_KERNEL_DS,%edi
22315+ mov %edi,%ss
22316+111:
22317+#endif
22318+
22319+#ifdef CONFIG_PARAVIRT
22320+ PV_RESTORE_REGS(CLBR_RDI)
22321+#endif
22322+
22323+ popq %rdi
22324+ pax_force_retaddr
22325+ retq
22326+
22327+#ifdef CONFIG_PAX_KERNEXEC
22328+2: ljmpq __KERNEL_CS,1b
22329+3: ljmpq __KERNEXEC_KERNEL_CS,4f
22330+4: SET_RDI_INTO_CR0
22331+ jmp 1b
22332+#endif
22333+ENDPROC(pax_enter_kernel)
22334+
22335+ENTRY(pax_exit_kernel)
22336+ pushq %rdi
22337+
22338+#ifdef CONFIG_PARAVIRT
22339+ PV_SAVE_REGS(CLBR_RDI)
22340+#endif
22341+
22342+#ifdef CONFIG_PAX_KERNEXEC
22343+ mov %cs,%rdi
22344+ cmp $__KERNEXEC_KERNEL_CS,%edi
22345+ jz 2f
22346+ GET_CR0_INTO_RDI
22347+ bts $16,%rdi
22348+ jnc 4f
22349+1:
22350+#endif
22351+
22352+#ifdef CONFIG_PAX_MEMORY_UDEREF
22353+ 661: jmp 111f
22354+ .pushsection .altinstr_replacement, "a"
22355+ 662: ASM_NOP2
22356+ .popsection
22357+ .pushsection .altinstructions, "a"
22358+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22359+ .popsection
22360+ mov %ss,%edi
22361+ cmp $__UDEREF_KERNEL_DS,%edi
22362+ jnz 111f
22363+ GET_CR3_INTO_RDI
22364+ cmp $0,%dil
22365+ jz 112f
22366+ ud2
22367+112: add $4097,%rdi
22368+ bts $63,%rdi
22369+ SET_RDI_INTO_CR3
22370+ mov $__KERNEL_DS,%edi
22371+ mov %edi,%ss
22372+111:
22373+#endif
22374+
22375+#ifdef CONFIG_PARAVIRT
22376+ PV_RESTORE_REGS(CLBR_RDI);
22377+#endif
22378+
22379+ popq %rdi
22380+ pax_force_retaddr
22381+ retq
22382+
22383+#ifdef CONFIG_PAX_KERNEXEC
22384+2: GET_CR0_INTO_RDI
22385+ btr $16,%rdi
22386+ jnc 4f
22387+ ljmpq __KERNEL_CS,3f
22388+3: SET_RDI_INTO_CR0
22389+ jmp 1b
22390+4: ud2
22391+ jmp 4b
22392+#endif
22393+ENDPROC(pax_exit_kernel)
22394+#endif
22395+
22396+ .macro pax_enter_kernel_user
22397+ pax_set_fptr_mask
22398+#ifdef CONFIG_PAX_MEMORY_UDEREF
22399+ call pax_enter_kernel_user
22400+#endif
22401+ .endm
22402+
22403+ .macro pax_exit_kernel_user
22404+#ifdef CONFIG_PAX_MEMORY_UDEREF
22405+ call pax_exit_kernel_user
22406+#endif
22407+#ifdef CONFIG_PAX_RANDKSTACK
22408+ pushq %rax
22409+ pushq %r11
22410+ call pax_randomize_kstack
22411+ popq %r11
22412+ popq %rax
22413+#endif
22414+ .endm
22415+
22416+#ifdef CONFIG_PAX_MEMORY_UDEREF
22417+ENTRY(pax_enter_kernel_user)
22418+ pushq %rdi
22419+ pushq %rbx
22420+
22421+#ifdef CONFIG_PARAVIRT
22422+ PV_SAVE_REGS(CLBR_RDI)
22423+#endif
22424+
22425+ 661: jmp 111f
22426+ .pushsection .altinstr_replacement, "a"
22427+ 662: ASM_NOP2
22428+ .popsection
22429+ .pushsection .altinstructions, "a"
22430+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22431+ .popsection
22432+ GET_CR3_INTO_RDI
22433+ cmp $1,%dil
22434+ jnz 4f
22435+ sub $4097,%rdi
22436+ bts $63,%rdi
22437+ SET_RDI_INTO_CR3
22438+ jmp 3f
22439+111:
22440+
22441+ GET_CR3_INTO_RDI
22442+ mov %rdi,%rbx
22443+ add $__START_KERNEL_map,%rbx
22444+ sub phys_base(%rip),%rbx
22445+
22446+#ifdef CONFIG_PARAVIRT
22447+ cmpl $0, pv_info+PARAVIRT_enabled
22448+ jz 1f
22449+ pushq %rdi
22450+ i = 0
22451+ .rept USER_PGD_PTRS
22452+ mov i*8(%rbx),%rsi
22453+ mov $0,%sil
22454+ lea i*8(%rbx),%rdi
22455+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22456+ i = i + 1
22457+ .endr
22458+ popq %rdi
22459+ jmp 2f
22460+1:
22461+#endif
22462+
22463+ i = 0
22464+ .rept USER_PGD_PTRS
22465+ movb $0,i*8(%rbx)
22466+ i = i + 1
22467+ .endr
22468+
22469+2: SET_RDI_INTO_CR3
22470+
22471+#ifdef CONFIG_PAX_KERNEXEC
22472+ GET_CR0_INTO_RDI
22473+ bts $16,%rdi
22474+ SET_RDI_INTO_CR0
22475+#endif
22476+
22477+3:
22478+
22479+#ifdef CONFIG_PARAVIRT
22480+ PV_RESTORE_REGS(CLBR_RDI)
22481+#endif
22482+
22483+ popq %rbx
22484+ popq %rdi
22485+ pax_force_retaddr
22486+ retq
22487+4: ud2
22488+ENDPROC(pax_enter_kernel_user)
22489+
22490+ENTRY(pax_exit_kernel_user)
22491+ pushq %rdi
22492+ pushq %rbx
22493+
22494+#ifdef CONFIG_PARAVIRT
22495+ PV_SAVE_REGS(CLBR_RDI)
22496+#endif
22497+
22498+ GET_CR3_INTO_RDI
22499+ 661: jmp 1f
22500+ .pushsection .altinstr_replacement, "a"
22501+ 662: ASM_NOP2
22502+ .popsection
22503+ .pushsection .altinstructions, "a"
22504+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22505+ .popsection
22506+ cmp $0,%dil
22507+ jnz 3f
22508+ add $4097,%rdi
22509+ bts $63,%rdi
22510+ SET_RDI_INTO_CR3
22511+ jmp 2f
22512+1:
22513+
22514+ mov %rdi,%rbx
22515+
22516+#ifdef CONFIG_PAX_KERNEXEC
22517+ GET_CR0_INTO_RDI
22518+ btr $16,%rdi
22519+ jnc 3f
22520+ SET_RDI_INTO_CR0
22521+#endif
22522+
22523+ add $__START_KERNEL_map,%rbx
22524+ sub phys_base(%rip),%rbx
22525+
22526+#ifdef CONFIG_PARAVIRT
22527+ cmpl $0, pv_info+PARAVIRT_enabled
22528+ jz 1f
22529+ i = 0
22530+ .rept USER_PGD_PTRS
22531+ mov i*8(%rbx),%rsi
22532+ mov $0x67,%sil
22533+ lea i*8(%rbx),%rdi
22534+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22535+ i = i + 1
22536+ .endr
22537+ jmp 2f
22538+1:
22539+#endif
22540+
22541+ i = 0
22542+ .rept USER_PGD_PTRS
22543+ movb $0x67,i*8(%rbx)
22544+ i = i + 1
22545+ .endr
22546+2:
22547+
22548+#ifdef CONFIG_PARAVIRT
22549+ PV_RESTORE_REGS(CLBR_RDI)
22550+#endif
22551+
22552+ popq %rbx
22553+ popq %rdi
22554+ pax_force_retaddr
22555+ retq
22556+3: ud2
22557+ENDPROC(pax_exit_kernel_user)
22558+#endif
22559+
22560+ .macro pax_enter_kernel_nmi
22561+ pax_set_fptr_mask
22562+
22563+#ifdef CONFIG_PAX_KERNEXEC
22564+ GET_CR0_INTO_RDI
22565+ bts $16,%rdi
22566+ jc 110f
22567+ SET_RDI_INTO_CR0
22568+ or $2,%ebx
22569+110:
22570+#endif
22571+
22572+#ifdef CONFIG_PAX_MEMORY_UDEREF
22573+ 661: jmp 111f
22574+ .pushsection .altinstr_replacement, "a"
22575+ 662: ASM_NOP2
22576+ .popsection
22577+ .pushsection .altinstructions, "a"
22578+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22579+ .popsection
22580+ GET_CR3_INTO_RDI
22581+ cmp $0,%dil
22582+ jz 111f
22583+ sub $4097,%rdi
22584+ or $4,%ebx
22585+ bts $63,%rdi
22586+ SET_RDI_INTO_CR3
22587+ mov $__UDEREF_KERNEL_DS,%edi
22588+ mov %edi,%ss
22589+111:
22590+#endif
22591+ .endm
22592+
22593+ .macro pax_exit_kernel_nmi
22594+#ifdef CONFIG_PAX_KERNEXEC
22595+ btr $1,%ebx
22596+ jnc 110f
22597+ GET_CR0_INTO_RDI
22598+ btr $16,%rdi
22599+ SET_RDI_INTO_CR0
22600+110:
22601+#endif
22602+
22603+#ifdef CONFIG_PAX_MEMORY_UDEREF
22604+ btr $2,%ebx
22605+ jnc 111f
22606+ GET_CR3_INTO_RDI
22607+ add $4097,%rdi
22608+ bts $63,%rdi
22609+ SET_RDI_INTO_CR3
22610+ mov $__KERNEL_DS,%edi
22611+ mov %edi,%ss
22612+111:
22613+#endif
22614+ .endm
22615+
22616+ .macro pax_erase_kstack
22617+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22618+ call pax_erase_kstack
22619+#endif
22620+ .endm
22621+
22622+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22623+ENTRY(pax_erase_kstack)
22624+ pushq %rdi
22625+ pushq %rcx
22626+ pushq %rax
22627+ pushq %r11
22628+
22629+ GET_THREAD_INFO(%r11)
22630+ mov TI_lowest_stack(%r11), %rdi
22631+ mov $-0xBEEF, %rax
22632+ std
22633+
22634+1: mov %edi, %ecx
22635+ and $THREAD_SIZE_asm - 1, %ecx
22636+ shr $3, %ecx
22637+ repne scasq
22638+ jecxz 2f
22639+
22640+ cmp $2*8, %ecx
22641+ jc 2f
22642+
22643+ mov $2*8, %ecx
22644+ repe scasq
22645+ jecxz 2f
22646+ jne 1b
22647+
22648+2: cld
22649+ mov %esp, %ecx
22650+ sub %edi, %ecx
22651+
22652+ cmp $THREAD_SIZE_asm, %rcx
22653+ jb 3f
22654+ ud2
22655+3:
22656+
22657+ shr $3, %ecx
22658+ rep stosq
22659+
22660+ mov TI_task_thread_sp0(%r11), %rdi
22661+ sub $256, %rdi
22662+ mov %rdi, TI_lowest_stack(%r11)
22663+
22664+ popq %r11
22665+ popq %rax
22666+ popq %rcx
22667+ popq %rdi
22668+ pax_force_retaddr
22669+ ret
22670+ENDPROC(pax_erase_kstack)
22671+#endif
22672
22673 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
22674 #ifdef CONFIG_TRACE_IRQFLAGS
22675@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
22676 .endm
22677
22678 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
22679- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
22680+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
22681 jnc 1f
22682 TRACE_IRQS_ON_DEBUG
22683 1:
22684@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
22685 movq \tmp,R11+\offset(%rsp)
22686 .endm
22687
22688- .macro FAKE_STACK_FRAME child_rip
22689- /* push in order ss, rsp, eflags, cs, rip */
22690- xorl %eax, %eax
22691- pushq_cfi $__KERNEL_DS /* ss */
22692- /*CFI_REL_OFFSET ss,0*/
22693- pushq_cfi %rax /* rsp */
22694- CFI_REL_OFFSET rsp,0
22695- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
22696- /*CFI_REL_OFFSET rflags,0*/
22697- pushq_cfi $__KERNEL_CS /* cs */
22698- /*CFI_REL_OFFSET cs,0*/
22699- pushq_cfi \child_rip /* rip */
22700- CFI_REL_OFFSET rip,0
22701- pushq_cfi %rax /* orig rax */
22702- .endm
22703-
22704- .macro UNFAKE_STACK_FRAME
22705- addq $8*6, %rsp
22706- CFI_ADJUST_CFA_OFFSET -(6*8)
22707- .endm
22708-
22709 /*
22710 * initial frame state for interrupts (and exceptions without error code)
22711 */
22712@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
22713 /* save partial stack frame */
22714 .macro SAVE_ARGS_IRQ
22715 cld
22716- /* start from rbp in pt_regs and jump over */
22717- movq_cfi rdi, (RDI-RBP)
22718- movq_cfi rsi, (RSI-RBP)
22719- movq_cfi rdx, (RDX-RBP)
22720- movq_cfi rcx, (RCX-RBP)
22721- movq_cfi rax, (RAX-RBP)
22722- movq_cfi r8, (R8-RBP)
22723- movq_cfi r9, (R9-RBP)
22724- movq_cfi r10, (R10-RBP)
22725- movq_cfi r11, (R11-RBP)
22726+ /* start from r15 in pt_regs and jump over */
22727+ movq_cfi rdi, RDI
22728+ movq_cfi rsi, RSI
22729+ movq_cfi rdx, RDX
22730+ movq_cfi rcx, RCX
22731+ movq_cfi rax, RAX
22732+ movq_cfi r8, R8
22733+ movq_cfi r9, R9
22734+ movq_cfi r10, R10
22735+ movq_cfi r11, R11
22736+ movq_cfi r12, R12
22737
22738 /* Save rbp so that we can unwind from get_irq_regs() */
22739- movq_cfi rbp, 0
22740+ movq_cfi rbp, RBP
22741
22742 /* Save previous stack value */
22743 movq %rsp, %rsi
22744
22745- leaq -RBP(%rsp),%rdi /* arg1 for handler */
22746- testl $3, CS-RBP(%rsi)
22747+ movq %rsp,%rdi /* arg1 for handler */
22748+ testb $3, CS(%rsi)
22749 je 1f
22750 SWAPGS
22751 /*
22752@@ -483,6 +896,18 @@ ENDPROC(native_usergs_sysret64)
22753 0x06 /* DW_OP_deref */, \
22754 0x08 /* DW_OP_const1u */, SS+8-RBP, \
22755 0x22 /* DW_OP_plus */
22756+
22757+#ifdef CONFIG_PAX_MEMORY_UDEREF
22758+ testb $3, CS(%rdi)
22759+ jnz 1f
22760+ pax_enter_kernel
22761+ jmp 2f
22762+1: pax_enter_kernel_user
22763+2:
22764+#else
22765+ pax_enter_kernel
22766+#endif
22767+
22768 /* We entered an interrupt context - irqs are off: */
22769 TRACE_IRQS_OFF
22770 .endm
22771@@ -514,9 +939,52 @@ ENTRY(save_paranoid)
22772 js 1f /* negative -> in kernel */
22773 SWAPGS
22774 xorl %ebx,%ebx
22775-1: ret
22776+1:
22777+#ifdef CONFIG_PAX_MEMORY_UDEREF
22778+ testb $3, CS+8(%rsp)
22779+ jnz 1f
22780+ pax_enter_kernel
22781+ jmp 2f
22782+1: pax_enter_kernel_user
22783+2:
22784+#else
22785+ pax_enter_kernel
22786+#endif
22787+ pax_force_retaddr
22788+ ret
22789 CFI_ENDPROC
22790-END(save_paranoid)
22791+ENDPROC(save_paranoid)
22792+
22793+ENTRY(save_paranoid_nmi)
22794+ XCPT_FRAME 1 RDI+8
22795+ cld
22796+ movq_cfi rdi, RDI+8
22797+ movq_cfi rsi, RSI+8
22798+ movq_cfi rdx, RDX+8
22799+ movq_cfi rcx, RCX+8
22800+ movq_cfi rax, RAX+8
22801+ movq_cfi r8, R8+8
22802+ movq_cfi r9, R9+8
22803+ movq_cfi r10, R10+8
22804+ movq_cfi r11, R11+8
22805+ movq_cfi rbx, RBX+8
22806+ movq_cfi rbp, RBP+8
22807+ movq_cfi r12, R12+8
22808+ movq_cfi r13, R13+8
22809+ movq_cfi r14, R14+8
22810+ movq_cfi r15, R15+8
22811+ movl $1,%ebx
22812+ movl $MSR_GS_BASE,%ecx
22813+ rdmsr
22814+ testl %edx,%edx
22815+ js 1f /* negative -> in kernel */
22816+ SWAPGS
22817+ xorl %ebx,%ebx
22818+1: pax_enter_kernel_nmi
22819+ pax_force_retaddr
22820+ ret
22821+ CFI_ENDPROC
22822+ENDPROC(save_paranoid_nmi)
22823 .popsection
22824
22825 /*
22826@@ -538,7 +1006,7 @@ ENTRY(ret_from_fork)
22827
22828 RESTORE_REST
22829
22830- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22831+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22832 jz 1f
22833
22834 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
22835@@ -548,15 +1016,13 @@ ENTRY(ret_from_fork)
22836 jmp ret_from_sys_call # go to the SYSRET fastpath
22837
22838 1:
22839- subq $REST_SKIP, %rsp # leave space for volatiles
22840- CFI_ADJUST_CFA_OFFSET REST_SKIP
22841 movq %rbp, %rdi
22842 call *%rbx
22843 movl $0, RAX(%rsp)
22844 RESTORE_REST
22845 jmp int_ret_from_sys_call
22846 CFI_ENDPROC
22847-END(ret_from_fork)
22848+ENDPROC(ret_from_fork)
22849
22850 /*
22851 * System call entry. Up to 6 arguments in registers are supported.
22852@@ -593,7 +1059,7 @@ END(ret_from_fork)
22853 ENTRY(system_call)
22854 CFI_STARTPROC simple
22855 CFI_SIGNAL_FRAME
22856- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
22857+ CFI_DEF_CFA rsp,0
22858 CFI_REGISTER rip,rcx
22859 /*CFI_REGISTER rflags,r11*/
22860 SWAPGS_UNSAFE_STACK
22861@@ -606,16 +1072,23 @@ GLOBAL(system_call_after_swapgs)
22862
22863 movq %rsp,PER_CPU_VAR(old_rsp)
22864 movq PER_CPU_VAR(kernel_stack),%rsp
22865+ SAVE_ARGS 8*6,0
22866+ pax_enter_kernel_user
22867+
22868+#ifdef CONFIG_PAX_RANDKSTACK
22869+ pax_erase_kstack
22870+#endif
22871+
22872 /*
22873 * No need to follow this irqs off/on section - it's straight
22874 * and short:
22875 */
22876 ENABLE_INTERRUPTS(CLBR_NONE)
22877- SAVE_ARGS 8,0
22878 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
22879 movq %rcx,RIP-ARGOFFSET(%rsp)
22880 CFI_REL_OFFSET rip,RIP-ARGOFFSET
22881- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22882+ GET_THREAD_INFO(%rcx)
22883+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
22884 jnz tracesys
22885 system_call_fastpath:
22886 #if __SYSCALL_MASK == ~0
22887@@ -639,10 +1112,13 @@ sysret_check:
22888 LOCKDEP_SYS_EXIT
22889 DISABLE_INTERRUPTS(CLBR_NONE)
22890 TRACE_IRQS_OFF
22891- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
22892+ GET_THREAD_INFO(%rcx)
22893+ movl TI_flags(%rcx),%edx
22894 andl %edi,%edx
22895 jnz sysret_careful
22896 CFI_REMEMBER_STATE
22897+ pax_exit_kernel_user
22898+ pax_erase_kstack
22899 /*
22900 * sysretq will re-enable interrupts:
22901 */
22902@@ -701,6 +1177,9 @@ auditsys:
22903 movq %rax,%rsi /* 2nd arg: syscall number */
22904 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
22905 call __audit_syscall_entry
22906+
22907+ pax_erase_kstack
22908+
22909 LOAD_ARGS 0 /* reload call-clobbered registers */
22910 jmp system_call_fastpath
22911
22912@@ -722,7 +1201,7 @@ sysret_audit:
22913 /* Do syscall tracing */
22914 tracesys:
22915 #ifdef CONFIG_AUDITSYSCALL
22916- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22917+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
22918 jz auditsys
22919 #endif
22920 SAVE_REST
22921@@ -730,12 +1209,15 @@ tracesys:
22922 FIXUP_TOP_OF_STACK %rdi
22923 movq %rsp,%rdi
22924 call syscall_trace_enter
22925+
22926+ pax_erase_kstack
22927+
22928 /*
22929 * Reload arg registers from stack in case ptrace changed them.
22930 * We don't reload %rax because syscall_trace_enter() returned
22931 * the value it wants us to use in the table lookup.
22932 */
22933- LOAD_ARGS ARGOFFSET, 1
22934+ LOAD_ARGS 1
22935 RESTORE_REST
22936 #if __SYSCALL_MASK == ~0
22937 cmpq $__NR_syscall_max,%rax
22938@@ -765,7 +1247,9 @@ GLOBAL(int_with_check)
22939 andl %edi,%edx
22940 jnz int_careful
22941 andl $~TS_COMPAT,TI_status(%rcx)
22942- jmp retint_swapgs
22943+ pax_exit_kernel_user
22944+ pax_erase_kstack
22945+ jmp retint_swapgs_pax
22946
22947 /* Either reschedule or signal or syscall exit tracking needed. */
22948 /* First do a reschedule test. */
22949@@ -811,7 +1295,7 @@ int_restore_rest:
22950 TRACE_IRQS_OFF
22951 jmp int_with_check
22952 CFI_ENDPROC
22953-END(system_call)
22954+ENDPROC(system_call)
22955
22956 .macro FORK_LIKE func
22957 ENTRY(stub_\func)
22958@@ -824,9 +1308,10 @@ ENTRY(stub_\func)
22959 DEFAULT_FRAME 0 8 /* offset 8: return address */
22960 call sys_\func
22961 RESTORE_TOP_OF_STACK %r11, 8
22962- ret $REST_SKIP /* pop extended registers */
22963+ pax_force_retaddr
22964+ ret
22965 CFI_ENDPROC
22966-END(stub_\func)
22967+ENDPROC(stub_\func)
22968 .endm
22969
22970 .macro FIXED_FRAME label,func
22971@@ -836,9 +1321,10 @@ ENTRY(\label)
22972 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
22973 call \func
22974 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
22975+ pax_force_retaddr
22976 ret
22977 CFI_ENDPROC
22978-END(\label)
22979+ENDPROC(\label)
22980 .endm
22981
22982 FORK_LIKE clone
22983@@ -846,19 +1332,6 @@ END(\label)
22984 FORK_LIKE vfork
22985 FIXED_FRAME stub_iopl, sys_iopl
22986
22987-ENTRY(ptregscall_common)
22988- DEFAULT_FRAME 1 8 /* offset 8: return address */
22989- RESTORE_TOP_OF_STACK %r11, 8
22990- movq_cfi_restore R15+8, r15
22991- movq_cfi_restore R14+8, r14
22992- movq_cfi_restore R13+8, r13
22993- movq_cfi_restore R12+8, r12
22994- movq_cfi_restore RBP+8, rbp
22995- movq_cfi_restore RBX+8, rbx
22996- ret $REST_SKIP /* pop extended registers */
22997- CFI_ENDPROC
22998-END(ptregscall_common)
22999-
23000 ENTRY(stub_execve)
23001 CFI_STARTPROC
23002 addq $8, %rsp
23003@@ -870,7 +1343,7 @@ ENTRY(stub_execve)
23004 RESTORE_REST
23005 jmp int_ret_from_sys_call
23006 CFI_ENDPROC
23007-END(stub_execve)
23008+ENDPROC(stub_execve)
23009
23010 /*
23011 * sigreturn is special because it needs to restore all registers on return.
23012@@ -887,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
23013 RESTORE_REST
23014 jmp int_ret_from_sys_call
23015 CFI_ENDPROC
23016-END(stub_rt_sigreturn)
23017+ENDPROC(stub_rt_sigreturn)
23018
23019 #ifdef CONFIG_X86_X32_ABI
23020 ENTRY(stub_x32_rt_sigreturn)
23021@@ -901,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
23022 RESTORE_REST
23023 jmp int_ret_from_sys_call
23024 CFI_ENDPROC
23025-END(stub_x32_rt_sigreturn)
23026+ENDPROC(stub_x32_rt_sigreturn)
23027
23028 ENTRY(stub_x32_execve)
23029 CFI_STARTPROC
23030@@ -915,7 +1388,7 @@ ENTRY(stub_x32_execve)
23031 RESTORE_REST
23032 jmp int_ret_from_sys_call
23033 CFI_ENDPROC
23034-END(stub_x32_execve)
23035+ENDPROC(stub_x32_execve)
23036
23037 #endif
23038
23039@@ -952,7 +1425,7 @@ vector=vector+1
23040 2: jmp common_interrupt
23041 .endr
23042 CFI_ENDPROC
23043-END(irq_entries_start)
23044+ENDPROC(irq_entries_start)
23045
23046 .previous
23047 END(interrupt)
23048@@ -969,8 +1442,8 @@ END(interrupt)
23049 /* 0(%rsp): ~(interrupt number) */
23050 .macro interrupt func
23051 /* reserve pt_regs for scratch regs and rbp */
23052- subq $ORIG_RAX-RBP, %rsp
23053- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23054+ subq $ORIG_RAX, %rsp
23055+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23056 SAVE_ARGS_IRQ
23057 call \func
23058 .endm
23059@@ -997,14 +1470,14 @@ ret_from_intr:
23060
23061 /* Restore saved previous stack */
23062 popq %rsi
23063- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23064- leaq ARGOFFSET-RBP(%rsi), %rsp
23065+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23066+ movq %rsi, %rsp
23067 CFI_DEF_CFA_REGISTER rsp
23068- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23069+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23070
23071 exit_intr:
23072 GET_THREAD_INFO(%rcx)
23073- testl $3,CS-ARGOFFSET(%rsp)
23074+ testb $3,CS-ARGOFFSET(%rsp)
23075 je retint_kernel
23076
23077 /* Interrupt came from user space */
23078@@ -1026,12 +1499,16 @@ retint_swapgs: /* return to user-space */
23079 * The iretq could re-enable interrupts:
23080 */
23081 DISABLE_INTERRUPTS(CLBR_ANY)
23082+ pax_exit_kernel_user
23083+retint_swapgs_pax:
23084 TRACE_IRQS_IRETQ
23085 SWAPGS
23086 jmp restore_args
23087
23088 retint_restore_args: /* return to kernel space */
23089 DISABLE_INTERRUPTS(CLBR_ANY)
23090+ pax_exit_kernel
23091+ pax_force_retaddr (RIP-ARGOFFSET)
23092 /*
23093 * The iretq could re-enable interrupts:
23094 */
23095@@ -1112,7 +1589,7 @@ ENTRY(retint_kernel)
23096 #endif
23097
23098 CFI_ENDPROC
23099-END(common_interrupt)
23100+ENDPROC(common_interrupt)
23101 /*
23102 * End of kprobes section
23103 */
23104@@ -1130,7 +1607,7 @@ ENTRY(\sym)
23105 interrupt \do_sym
23106 jmp ret_from_intr
23107 CFI_ENDPROC
23108-END(\sym)
23109+ENDPROC(\sym)
23110 .endm
23111
23112 #ifdef CONFIG_TRACING
23113@@ -1218,7 +1695,7 @@ ENTRY(\sym)
23114 call \do_sym
23115 jmp error_exit /* %ebx: no swapgs flag */
23116 CFI_ENDPROC
23117-END(\sym)
23118+ENDPROC(\sym)
23119 .endm
23120
23121 .macro paranoidzeroentry sym do_sym
23122@@ -1236,10 +1713,10 @@ ENTRY(\sym)
23123 call \do_sym
23124 jmp paranoid_exit /* %ebx: no swapgs flag */
23125 CFI_ENDPROC
23126-END(\sym)
23127+ENDPROC(\sym)
23128 .endm
23129
23130-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23131+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23132 .macro paranoidzeroentry_ist sym do_sym ist
23133 ENTRY(\sym)
23134 INTR_FRAME
23135@@ -1252,12 +1729,18 @@ ENTRY(\sym)
23136 TRACE_IRQS_OFF_DEBUG
23137 movq %rsp,%rdi /* pt_regs pointer */
23138 xorl %esi,%esi /* no error code */
23139+#ifdef CONFIG_SMP
23140+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23141+ lea init_tss(%r13), %r13
23142+#else
23143+ lea init_tss(%rip), %r13
23144+#endif
23145 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23146 call \do_sym
23147 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23148 jmp paranoid_exit /* %ebx: no swapgs flag */
23149 CFI_ENDPROC
23150-END(\sym)
23151+ENDPROC(\sym)
23152 .endm
23153
23154 .macro errorentry sym do_sym
23155@@ -1275,7 +1758,7 @@ ENTRY(\sym)
23156 call \do_sym
23157 jmp error_exit /* %ebx: no swapgs flag */
23158 CFI_ENDPROC
23159-END(\sym)
23160+ENDPROC(\sym)
23161 .endm
23162
23163 #ifdef CONFIG_TRACING
23164@@ -1306,7 +1789,7 @@ ENTRY(\sym)
23165 call \do_sym
23166 jmp paranoid_exit /* %ebx: no swapgs flag */
23167 CFI_ENDPROC
23168-END(\sym)
23169+ENDPROC(\sym)
23170 .endm
23171
23172 zeroentry divide_error do_divide_error
23173@@ -1336,9 +1819,10 @@ gs_change:
23174 2: mfence /* workaround */
23175 SWAPGS
23176 popfq_cfi
23177+ pax_force_retaddr
23178 ret
23179 CFI_ENDPROC
23180-END(native_load_gs_index)
23181+ENDPROC(native_load_gs_index)
23182
23183 _ASM_EXTABLE(gs_change,bad_gs)
23184 .section .fixup,"ax"
23185@@ -1366,9 +1850,10 @@ ENTRY(do_softirq_own_stack)
23186 CFI_DEF_CFA_REGISTER rsp
23187 CFI_ADJUST_CFA_OFFSET -8
23188 decl PER_CPU_VAR(irq_count)
23189+ pax_force_retaddr
23190 ret
23191 CFI_ENDPROC
23192-END(do_softirq_own_stack)
23193+ENDPROC(do_softirq_own_stack)
23194
23195 #ifdef CONFIG_XEN
23196 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
23197@@ -1406,7 +1891,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23198 decl PER_CPU_VAR(irq_count)
23199 jmp error_exit
23200 CFI_ENDPROC
23201-END(xen_do_hypervisor_callback)
23202+ENDPROC(xen_do_hypervisor_callback)
23203
23204 /*
23205 * Hypervisor uses this for application faults while it executes.
23206@@ -1465,7 +1950,7 @@ ENTRY(xen_failsafe_callback)
23207 SAVE_ALL
23208 jmp error_exit
23209 CFI_ENDPROC
23210-END(xen_failsafe_callback)
23211+ENDPROC(xen_failsafe_callback)
23212
23213 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23214 xen_hvm_callback_vector xen_evtchn_do_upcall
23215@@ -1517,18 +2002,33 @@ ENTRY(paranoid_exit)
23216 DEFAULT_FRAME
23217 DISABLE_INTERRUPTS(CLBR_NONE)
23218 TRACE_IRQS_OFF_DEBUG
23219- testl %ebx,%ebx /* swapgs needed? */
23220+ testl $1,%ebx /* swapgs needed? */
23221 jnz paranoid_restore
23222- testl $3,CS(%rsp)
23223+ testb $3,CS(%rsp)
23224 jnz paranoid_userspace
23225+#ifdef CONFIG_PAX_MEMORY_UDEREF
23226+ pax_exit_kernel
23227+ TRACE_IRQS_IRETQ 0
23228+ SWAPGS_UNSAFE_STACK
23229+ RESTORE_ALL 8
23230+ pax_force_retaddr_bts
23231+ jmp irq_return
23232+#endif
23233 paranoid_swapgs:
23234+#ifdef CONFIG_PAX_MEMORY_UDEREF
23235+ pax_exit_kernel_user
23236+#else
23237+ pax_exit_kernel
23238+#endif
23239 TRACE_IRQS_IRETQ 0
23240 SWAPGS_UNSAFE_STACK
23241 RESTORE_ALL 8
23242 jmp irq_return
23243 paranoid_restore:
23244+ pax_exit_kernel
23245 TRACE_IRQS_IRETQ_DEBUG 0
23246 RESTORE_ALL 8
23247+ pax_force_retaddr_bts
23248 jmp irq_return
23249 paranoid_userspace:
23250 GET_THREAD_INFO(%rcx)
23251@@ -1557,7 +2057,7 @@ paranoid_schedule:
23252 TRACE_IRQS_OFF
23253 jmp paranoid_userspace
23254 CFI_ENDPROC
23255-END(paranoid_exit)
23256+ENDPROC(paranoid_exit)
23257
23258 /*
23259 * Exception entry point. This expects an error code/orig_rax on the stack.
23260@@ -1584,12 +2084,23 @@ ENTRY(error_entry)
23261 movq_cfi r14, R14+8
23262 movq_cfi r15, R15+8
23263 xorl %ebx,%ebx
23264- testl $3,CS+8(%rsp)
23265+ testb $3,CS+8(%rsp)
23266 je error_kernelspace
23267 error_swapgs:
23268 SWAPGS
23269 error_sti:
23270+#ifdef CONFIG_PAX_MEMORY_UDEREF
23271+ testb $3, CS+8(%rsp)
23272+ jnz 1f
23273+ pax_enter_kernel
23274+ jmp 2f
23275+1: pax_enter_kernel_user
23276+2:
23277+#else
23278+ pax_enter_kernel
23279+#endif
23280 TRACE_IRQS_OFF
23281+ pax_force_retaddr
23282 ret
23283
23284 /*
23285@@ -1616,7 +2127,7 @@ bstep_iret:
23286 movq %rcx,RIP+8(%rsp)
23287 jmp error_swapgs
23288 CFI_ENDPROC
23289-END(error_entry)
23290+ENDPROC(error_entry)
23291
23292
23293 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
23294@@ -1627,7 +2138,7 @@ ENTRY(error_exit)
23295 DISABLE_INTERRUPTS(CLBR_NONE)
23296 TRACE_IRQS_OFF
23297 GET_THREAD_INFO(%rcx)
23298- testl %eax,%eax
23299+ testl $1,%eax
23300 jne retint_kernel
23301 LOCKDEP_SYS_EXIT_IRQ
23302 movl TI_flags(%rcx),%edx
23303@@ -1636,7 +2147,7 @@ ENTRY(error_exit)
23304 jnz retint_careful
23305 jmp retint_swapgs
23306 CFI_ENDPROC
23307-END(error_exit)
23308+ENDPROC(error_exit)
23309
23310 /*
23311 * Test if a given stack is an NMI stack or not.
23312@@ -1694,9 +2205,11 @@ ENTRY(nmi)
23313 * If %cs was not the kernel segment, then the NMI triggered in user
23314 * space, which means it is definitely not nested.
23315 */
23316+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
23317+ je 1f
23318 cmpl $__KERNEL_CS, 16(%rsp)
23319 jne first_nmi
23320-
23321+1:
23322 /*
23323 * Check the special variable on the stack to see if NMIs are
23324 * executing.
23325@@ -1730,8 +2243,7 @@ nested_nmi:
23326
23327 1:
23328 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
23329- leaq -1*8(%rsp), %rdx
23330- movq %rdx, %rsp
23331+ subq $8, %rsp
23332 CFI_ADJUST_CFA_OFFSET 1*8
23333 leaq -10*8(%rsp), %rdx
23334 pushq_cfi $__KERNEL_DS
23335@@ -1749,6 +2261,7 @@ nested_nmi_out:
23336 CFI_RESTORE rdx
23337
23338 /* No need to check faults here */
23339+# pax_force_retaddr_bts
23340 INTERRUPT_RETURN
23341
23342 CFI_RESTORE_STATE
23343@@ -1845,13 +2358,13 @@ end_repeat_nmi:
23344 subq $ORIG_RAX-R15, %rsp
23345 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
23346 /*
23347- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
23348+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
23349 * as we should not be calling schedule in NMI context.
23350 * Even with normal interrupts enabled. An NMI should not be
23351 * setting NEED_RESCHED or anything that normal interrupts and
23352 * exceptions might do.
23353 */
23354- call save_paranoid
23355+ call save_paranoid_nmi
23356 DEFAULT_FRAME 0
23357
23358 /*
23359@@ -1861,9 +2374,9 @@ end_repeat_nmi:
23360 * NMI itself takes a page fault, the page fault that was preempted
23361 * will read the information from the NMI page fault and not the
23362 * origin fault. Save it off and restore it if it changes.
23363- * Use the r12 callee-saved register.
23364+ * Use the r13 callee-saved register.
23365 */
23366- movq %cr2, %r12
23367+ movq %cr2, %r13
23368
23369 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
23370 movq %rsp,%rdi
23371@@ -1872,31 +2385,36 @@ end_repeat_nmi:
23372
23373 /* Did the NMI take a page fault? Restore cr2 if it did */
23374 movq %cr2, %rcx
23375- cmpq %rcx, %r12
23376+ cmpq %rcx, %r13
23377 je 1f
23378- movq %r12, %cr2
23379+ movq %r13, %cr2
23380 1:
23381
23382- testl %ebx,%ebx /* swapgs needed? */
23383+ testl $1,%ebx /* swapgs needed? */
23384 jnz nmi_restore
23385 nmi_swapgs:
23386 SWAPGS_UNSAFE_STACK
23387 nmi_restore:
23388+ pax_exit_kernel_nmi
23389 /* Pop the extra iret frame at once */
23390 RESTORE_ALL 6*8
23391+ testb $3, 8(%rsp)
23392+ jnz 1f
23393+ pax_force_retaddr_bts
23394+1:
23395
23396 /* Clear the NMI executing stack variable */
23397 movq $0, 5*8(%rsp)
23398 jmp irq_return
23399 CFI_ENDPROC
23400-END(nmi)
23401+ENDPROC(nmi)
23402
23403 ENTRY(ignore_sysret)
23404 CFI_STARTPROC
23405 mov $-ENOSYS,%eax
23406 sysret
23407 CFI_ENDPROC
23408-END(ignore_sysret)
23409+ENDPROC(ignore_sysret)
23410
23411 /*
23412 * End of kprobes section
23413diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
23414index d4bdd25..912664c 100644
23415--- a/arch/x86/kernel/ftrace.c
23416+++ b/arch/x86/kernel/ftrace.c
23417@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
23418 {
23419 unsigned char replaced[MCOUNT_INSN_SIZE];
23420
23421+ ip = ktla_ktva(ip);
23422+
23423 /*
23424 * Note: Due to modules and __init, code can
23425 * disappear and change, we need to protect against faulting
23426@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23427 unsigned char old[MCOUNT_INSN_SIZE], *new;
23428 int ret;
23429
23430- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
23431+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
23432 new = ftrace_call_replace(ip, (unsigned long)func);
23433
23434 /* See comment above by declaration of modifying_ftrace_code */
23435@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23436 /* Also update the regs callback function */
23437 if (!ret) {
23438 ip = (unsigned long)(&ftrace_regs_call);
23439- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
23440+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
23441 new = ftrace_call_replace(ip, (unsigned long)func);
23442 ret = ftrace_modify_code(ip, old, new);
23443 }
23444@@ -291,7 +293,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
23445 * kernel identity mapping to modify code.
23446 */
23447 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
23448- ip = (unsigned long)__va(__pa_symbol(ip));
23449+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
23450
23451 return probe_kernel_write((void *)ip, val, size);
23452 }
23453@@ -301,7 +303,7 @@ static int add_break(unsigned long ip, const char *old)
23454 unsigned char replaced[MCOUNT_INSN_SIZE];
23455 unsigned char brk = BREAKPOINT_INSTRUCTION;
23456
23457- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
23458+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
23459 return -EFAULT;
23460
23461 /* Make sure it is what we expect it to be */
23462@@ -649,7 +651,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
23463 return ret;
23464
23465 fail_update:
23466- probe_kernel_write((void *)ip, &old_code[0], 1);
23467+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
23468 goto out;
23469 }
23470
23471@@ -682,6 +684,8 @@ static int ftrace_mod_jmp(unsigned long ip,
23472 {
23473 unsigned char code[MCOUNT_INSN_SIZE];
23474
23475+ ip = ktla_ktva(ip);
23476+
23477 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
23478 return -EFAULT;
23479
23480diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
23481index 85126cc..1bbce17 100644
23482--- a/arch/x86/kernel/head64.c
23483+++ b/arch/x86/kernel/head64.c
23484@@ -67,12 +67,12 @@ again:
23485 pgd = *pgd_p;
23486
23487 /*
23488- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
23489- * critical -- __PAGE_OFFSET would point us back into the dynamic
23490+ * The use of __early_va rather than __va here is critical:
23491+ * __va would point us back into the dynamic
23492 * range and we might end up looping forever...
23493 */
23494 if (pgd)
23495- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23496+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
23497 else {
23498 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23499 reset_early_page_tables();
23500@@ -82,13 +82,13 @@ again:
23501 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
23502 for (i = 0; i < PTRS_PER_PUD; i++)
23503 pud_p[i] = 0;
23504- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23505+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
23506 }
23507 pud_p += pud_index(address);
23508 pud = *pud_p;
23509
23510 if (pud)
23511- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23512+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
23513 else {
23514 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23515 reset_early_page_tables();
23516@@ -98,7 +98,7 @@ again:
23517 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
23518 for (i = 0; i < PTRS_PER_PMD; i++)
23519 pmd_p[i] = 0;
23520- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23521+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
23522 }
23523 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
23524 pmd_p[pmd_index(address)] = pmd;
23525@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
23526 if (console_loglevel == 10)
23527 early_printk("Kernel alive\n");
23528
23529- clear_page(init_level4_pgt);
23530 /* set init_level4_pgt kernel high mapping*/
23531 init_level4_pgt[511] = early_level4_pgt[511];
23532
23533diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
23534index 81ba276..30c5411 100644
23535--- a/arch/x86/kernel/head_32.S
23536+++ b/arch/x86/kernel/head_32.S
23537@@ -26,6 +26,12 @@
23538 /* Physical address */
23539 #define pa(X) ((X) - __PAGE_OFFSET)
23540
23541+#ifdef CONFIG_PAX_KERNEXEC
23542+#define ta(X) (X)
23543+#else
23544+#define ta(X) ((X) - __PAGE_OFFSET)
23545+#endif
23546+
23547 /*
23548 * References to members of the new_cpu_data structure.
23549 */
23550@@ -55,11 +61,7 @@
23551 * and small than max_low_pfn, otherwise will waste some page table entries
23552 */
23553
23554-#if PTRS_PER_PMD > 1
23555-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
23556-#else
23557-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
23558-#endif
23559+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
23560
23561 /* Number of possible pages in the lowmem region */
23562 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
23563@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
23564 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23565
23566 /*
23567+ * Real beginning of normal "text" segment
23568+ */
23569+ENTRY(stext)
23570+ENTRY(_stext)
23571+
23572+/*
23573 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
23574 * %esi points to the real-mode code as a 32-bit pointer.
23575 * CS and DS must be 4 GB flat segments, but we don't depend on
23576@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23577 * can.
23578 */
23579 __HEAD
23580+
23581+#ifdef CONFIG_PAX_KERNEXEC
23582+ jmp startup_32
23583+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
23584+.fill PAGE_SIZE-5,1,0xcc
23585+#endif
23586+
23587 ENTRY(startup_32)
23588 movl pa(stack_start),%ecx
23589
23590@@ -106,6 +121,59 @@ ENTRY(startup_32)
23591 2:
23592 leal -__PAGE_OFFSET(%ecx),%esp
23593
23594+#ifdef CONFIG_SMP
23595+ movl $pa(cpu_gdt_table),%edi
23596+ movl $__per_cpu_load,%eax
23597+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
23598+ rorl $16,%eax
23599+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
23600+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
23601+ movl $__per_cpu_end - 1,%eax
23602+ subl $__per_cpu_start,%eax
23603+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
23604+#endif
23605+
23606+#ifdef CONFIG_PAX_MEMORY_UDEREF
23607+ movl $NR_CPUS,%ecx
23608+ movl $pa(cpu_gdt_table),%edi
23609+1:
23610+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
23611+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
23612+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
23613+ addl $PAGE_SIZE_asm,%edi
23614+ loop 1b
23615+#endif
23616+
23617+#ifdef CONFIG_PAX_KERNEXEC
23618+ movl $pa(boot_gdt),%edi
23619+ movl $__LOAD_PHYSICAL_ADDR,%eax
23620+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
23621+ rorl $16,%eax
23622+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
23623+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
23624+ rorl $16,%eax
23625+
23626+ ljmp $(__BOOT_CS),$1f
23627+1:
23628+
23629+ movl $NR_CPUS,%ecx
23630+ movl $pa(cpu_gdt_table),%edi
23631+ addl $__PAGE_OFFSET,%eax
23632+1:
23633+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
23634+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
23635+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
23636+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
23637+ rorl $16,%eax
23638+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
23639+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
23640+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
23641+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
23642+ rorl $16,%eax
23643+ addl $PAGE_SIZE_asm,%edi
23644+ loop 1b
23645+#endif
23646+
23647 /*
23648 * Clear BSS first so that there are no surprises...
23649 */
23650@@ -201,8 +269,11 @@ ENTRY(startup_32)
23651 movl %eax, pa(max_pfn_mapped)
23652
23653 /* Do early initialization of the fixmap area */
23654- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23655- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
23656+#ifdef CONFIG_COMPAT_VDSO
23657+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
23658+#else
23659+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
23660+#endif
23661 #else /* Not PAE */
23662
23663 page_pde_offset = (__PAGE_OFFSET >> 20);
23664@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23665 movl %eax, pa(max_pfn_mapped)
23666
23667 /* Do early initialization of the fixmap area */
23668- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23669- movl %eax,pa(initial_page_table+0xffc)
23670+#ifdef CONFIG_COMPAT_VDSO
23671+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
23672+#else
23673+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
23674+#endif
23675 #endif
23676
23677 #ifdef CONFIG_PARAVIRT
23678@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23679 cmpl $num_subarch_entries, %eax
23680 jae bad_subarch
23681
23682- movl pa(subarch_entries)(,%eax,4), %eax
23683- subl $__PAGE_OFFSET, %eax
23684- jmp *%eax
23685+ jmp *pa(subarch_entries)(,%eax,4)
23686
23687 bad_subarch:
23688 WEAK(lguest_entry)
23689@@ -261,10 +333,10 @@ WEAK(xen_entry)
23690 __INITDATA
23691
23692 subarch_entries:
23693- .long default_entry /* normal x86/PC */
23694- .long lguest_entry /* lguest hypervisor */
23695- .long xen_entry /* Xen hypervisor */
23696- .long default_entry /* Moorestown MID */
23697+ .long ta(default_entry) /* normal x86/PC */
23698+ .long ta(lguest_entry) /* lguest hypervisor */
23699+ .long ta(xen_entry) /* Xen hypervisor */
23700+ .long ta(default_entry) /* Moorestown MID */
23701 num_subarch_entries = (. - subarch_entries) / 4
23702 .previous
23703 #else
23704@@ -354,6 +426,7 @@ default_entry:
23705 movl pa(mmu_cr4_features),%eax
23706 movl %eax,%cr4
23707
23708+#ifdef CONFIG_X86_PAE
23709 testb $X86_CR4_PAE, %al # check if PAE is enabled
23710 jz enable_paging
23711
23712@@ -382,6 +455,9 @@ default_entry:
23713 /* Make changes effective */
23714 wrmsr
23715
23716+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
23717+#endif
23718+
23719 enable_paging:
23720
23721 /*
23722@@ -449,14 +525,20 @@ is486:
23723 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
23724 movl %eax,%ss # after changing gdt.
23725
23726- movl $(__USER_DS),%eax # DS/ES contains default USER segment
23727+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
23728 movl %eax,%ds
23729 movl %eax,%es
23730
23731 movl $(__KERNEL_PERCPU), %eax
23732 movl %eax,%fs # set this cpu's percpu
23733
23734+#ifdef CONFIG_CC_STACKPROTECTOR
23735 movl $(__KERNEL_STACK_CANARY),%eax
23736+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23737+ movl $(__USER_DS),%eax
23738+#else
23739+ xorl %eax,%eax
23740+#endif
23741 movl %eax,%gs
23742
23743 xorl %eax,%eax # Clear LDT
23744@@ -512,8 +594,11 @@ setup_once:
23745 * relocation. Manually set base address in stack canary
23746 * segment descriptor.
23747 */
23748- movl $gdt_page,%eax
23749+ movl $cpu_gdt_table,%eax
23750 movl $stack_canary,%ecx
23751+#ifdef CONFIG_SMP
23752+ addl $__per_cpu_load,%ecx
23753+#endif
23754 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
23755 shrl $16, %ecx
23756 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
23757@@ -544,7 +629,7 @@ ENDPROC(early_idt_handlers)
23758 /* This is global to keep gas from relaxing the jumps */
23759 ENTRY(early_idt_handler)
23760 cld
23761- cmpl $2,%ss:early_recursion_flag
23762+ cmpl $1,%ss:early_recursion_flag
23763 je hlt_loop
23764 incl %ss:early_recursion_flag
23765
23766@@ -582,8 +667,8 @@ ENTRY(early_idt_handler)
23767 pushl (20+6*4)(%esp) /* trapno */
23768 pushl $fault_msg
23769 call printk
23770-#endif
23771 call dump_stack
23772+#endif
23773 hlt_loop:
23774 hlt
23775 jmp hlt_loop
23776@@ -602,8 +687,11 @@ ENDPROC(early_idt_handler)
23777 /* This is the default interrupt "handler" :-) */
23778 ALIGN
23779 ignore_int:
23780- cld
23781 #ifdef CONFIG_PRINTK
23782+ cmpl $2,%ss:early_recursion_flag
23783+ je hlt_loop
23784+ incl %ss:early_recursion_flag
23785+ cld
23786 pushl %eax
23787 pushl %ecx
23788 pushl %edx
23789@@ -612,9 +700,6 @@ ignore_int:
23790 movl $(__KERNEL_DS),%eax
23791 movl %eax,%ds
23792 movl %eax,%es
23793- cmpl $2,early_recursion_flag
23794- je hlt_loop
23795- incl early_recursion_flag
23796 pushl 16(%esp)
23797 pushl 24(%esp)
23798 pushl 32(%esp)
23799@@ -648,29 +733,34 @@ ENTRY(setup_once_ref)
23800 /*
23801 * BSS section
23802 */
23803-__PAGE_ALIGNED_BSS
23804- .align PAGE_SIZE
23805 #ifdef CONFIG_X86_PAE
23806+.section .initial_pg_pmd,"a",@progbits
23807 initial_pg_pmd:
23808 .fill 1024*KPMDS,4,0
23809 #else
23810+.section .initial_page_table,"a",@progbits
23811 ENTRY(initial_page_table)
23812 .fill 1024,4,0
23813 #endif
23814+.section .initial_pg_fixmap,"a",@progbits
23815 initial_pg_fixmap:
23816 .fill 1024,4,0
23817+.section .empty_zero_page,"a",@progbits
23818 ENTRY(empty_zero_page)
23819 .fill 4096,1,0
23820+.section .swapper_pg_dir,"a",@progbits
23821 ENTRY(swapper_pg_dir)
23822+#ifdef CONFIG_X86_PAE
23823+ .fill 4,8,0
23824+#else
23825 .fill 1024,4,0
23826+#endif
23827
23828 /*
23829 * This starts the data section.
23830 */
23831 #ifdef CONFIG_X86_PAE
23832-__PAGE_ALIGNED_DATA
23833- /* Page-aligned for the benefit of paravirt? */
23834- .align PAGE_SIZE
23835+.section .initial_page_table,"a",@progbits
23836 ENTRY(initial_page_table)
23837 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
23838 # if KPMDS == 3
23839@@ -689,12 +779,20 @@ ENTRY(initial_page_table)
23840 # error "Kernel PMDs should be 1, 2 or 3"
23841 # endif
23842 .align PAGE_SIZE /* needs to be page-sized too */
23843+
23844+#ifdef CONFIG_PAX_PER_CPU_PGD
23845+ENTRY(cpu_pgd)
23846+ .rept 2*NR_CPUS
23847+ .fill 4,8,0
23848+ .endr
23849+#endif
23850+
23851 #endif
23852
23853 .data
23854 .balign 4
23855 ENTRY(stack_start)
23856- .long init_thread_union+THREAD_SIZE
23857+ .long init_thread_union+THREAD_SIZE-8
23858
23859 __INITRODATA
23860 int_msg:
23861@@ -722,7 +820,7 @@ fault_msg:
23862 * segment size, and 32-bit linear address value:
23863 */
23864
23865- .data
23866+.section .rodata,"a",@progbits
23867 .globl boot_gdt_descr
23868 .globl idt_descr
23869
23870@@ -731,7 +829,7 @@ fault_msg:
23871 .word 0 # 32 bit align gdt_desc.address
23872 boot_gdt_descr:
23873 .word __BOOT_DS+7
23874- .long boot_gdt - __PAGE_OFFSET
23875+ .long pa(boot_gdt)
23876
23877 .word 0 # 32-bit align idt_desc.address
23878 idt_descr:
23879@@ -742,7 +840,7 @@ idt_descr:
23880 .word 0 # 32 bit align gdt_desc.address
23881 ENTRY(early_gdt_descr)
23882 .word GDT_ENTRIES*8-1
23883- .long gdt_page /* Overwritten for secondary CPUs */
23884+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
23885
23886 /*
23887 * The boot_gdt must mirror the equivalent in setup.S and is
23888@@ -751,5 +849,65 @@ ENTRY(early_gdt_descr)
23889 .align L1_CACHE_BYTES
23890 ENTRY(boot_gdt)
23891 .fill GDT_ENTRY_BOOT_CS,8,0
23892- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
23893- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
23894+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
23895+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
23896+
23897+ .align PAGE_SIZE_asm
23898+ENTRY(cpu_gdt_table)
23899+ .rept NR_CPUS
23900+ .quad 0x0000000000000000 /* NULL descriptor */
23901+ .quad 0x0000000000000000 /* 0x0b reserved */
23902+ .quad 0x0000000000000000 /* 0x13 reserved */
23903+ .quad 0x0000000000000000 /* 0x1b reserved */
23904+
23905+#ifdef CONFIG_PAX_KERNEXEC
23906+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
23907+#else
23908+ .quad 0x0000000000000000 /* 0x20 unused */
23909+#endif
23910+
23911+ .quad 0x0000000000000000 /* 0x28 unused */
23912+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
23913+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
23914+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
23915+ .quad 0x0000000000000000 /* 0x4b reserved */
23916+ .quad 0x0000000000000000 /* 0x53 reserved */
23917+ .quad 0x0000000000000000 /* 0x5b reserved */
23918+
23919+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
23920+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
23921+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
23922+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
23923+
23924+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
23925+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
23926+
23927+ /*
23928+ * Segments used for calling PnP BIOS have byte granularity.
23929+ * The code segments and data segments have fixed 64k limits,
23930+ * the transfer segment sizes are set at run time.
23931+ */
23932+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
23933+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
23934+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
23935+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
23936+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
23937+
23938+ /*
23939+ * The APM segments have byte granularity and their bases
23940+ * are set at run time. All have 64k limits.
23941+ */
23942+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
23943+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
23944+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
23945+
23946+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
23947+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
23948+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
23949+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
23950+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
23951+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
23952+
23953+ /* Be sure this is zeroed to avoid false validations in Xen */
23954+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
23955+ .endr
23956diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
23957index e1aabdb..fee4fee 100644
23958--- a/arch/x86/kernel/head_64.S
23959+++ b/arch/x86/kernel/head_64.S
23960@@ -20,6 +20,8 @@
23961 #include <asm/processor-flags.h>
23962 #include <asm/percpu.h>
23963 #include <asm/nops.h>
23964+#include <asm/cpufeature.h>
23965+#include <asm/alternative-asm.h>
23966
23967 #ifdef CONFIG_PARAVIRT
23968 #include <asm/asm-offsets.h>
23969@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
23970 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
23971 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
23972 L3_START_KERNEL = pud_index(__START_KERNEL_map)
23973+L4_VMALLOC_START = pgd_index(VMALLOC_START)
23974+L3_VMALLOC_START = pud_index(VMALLOC_START)
23975+L4_VMALLOC_END = pgd_index(VMALLOC_END)
23976+L3_VMALLOC_END = pud_index(VMALLOC_END)
23977+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
23978+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
23979
23980 .text
23981 __HEAD
23982@@ -89,11 +97,24 @@ startup_64:
23983 * Fixup the physical addresses in the page table
23984 */
23985 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23986+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23987+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23988+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23989+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23990+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23991
23992- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23993- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23994+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
23995+#ifndef CONFIG_XEN
23996+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
23997+#endif
23998+
23999+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24000+
24001+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24002+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24003
24004 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24005+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24006
24007 /*
24008 * Set up the identity mapping for the switchover. These
24009@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
24010 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24011 1:
24012
24013- /* Enable PAE mode and PGE */
24014- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24015+ /* Enable PAE mode and PSE/PGE */
24016+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24017 movq %rcx, %cr4
24018
24019 /* Setup early boot stage 4 level pagetables. */
24020@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
24021 movl $MSR_EFER, %ecx
24022 rdmsr
24023 btsl $_EFER_SCE, %eax /* Enable System Call */
24024- btl $20,%edi /* No Execute supported? */
24025+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24026 jnc 1f
24027 btsl $_EFER_NX, %eax
24028 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24029+#ifndef CONFIG_EFI
24030+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24031+#endif
24032+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24033+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24034+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24035+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24036+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24037+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24038 1: wrmsr /* Make changes effective */
24039
24040 /* Setup cr0 */
24041@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
24042 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24043 * address given in m16:64.
24044 */
24045+ pax_set_fptr_mask
24046 movq initial_code(%rip),%rax
24047 pushq $0 # fake return address to stop unwinder
24048 pushq $__KERNEL_CS # set correct cs
24049@@ -388,7 +419,7 @@ ENTRY(early_idt_handler)
24050 call dump_stack
24051 #ifdef CONFIG_KALLSYMS
24052 leaq early_idt_ripmsg(%rip),%rdi
24053- movq 40(%rsp),%rsi # %rip again
24054+ movq 88(%rsp),%rsi # %rip again
24055 call __print_symbol
24056 #endif
24057 #endif /* EARLY_PRINTK */
24058@@ -416,6 +447,7 @@ ENDPROC(early_idt_handler)
24059 early_recursion_flag:
24060 .long 0
24061
24062+ .section .rodata,"a",@progbits
24063 #ifdef CONFIG_EARLY_PRINTK
24064 early_idt_msg:
24065 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24066@@ -443,29 +475,52 @@ NEXT_PAGE(early_level4_pgt)
24067 NEXT_PAGE(early_dynamic_pgts)
24068 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24069
24070- .data
24071+ .section .rodata,"a",@progbits
24072
24073-#ifndef CONFIG_XEN
24074 NEXT_PAGE(init_level4_pgt)
24075- .fill 512,8,0
24076-#else
24077-NEXT_PAGE(init_level4_pgt)
24078- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24079 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24080 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24081+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24082+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24083+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24084+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24085+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24086+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24087 .org init_level4_pgt + L4_START_KERNEL*8, 0
24088 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24089 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24090
24091+#ifdef CONFIG_PAX_PER_CPU_PGD
24092+NEXT_PAGE(cpu_pgd)
24093+ .rept 2*NR_CPUS
24094+ .fill 512,8,0
24095+ .endr
24096+#endif
24097+
24098 NEXT_PAGE(level3_ident_pgt)
24099 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24100+#ifdef CONFIG_XEN
24101 .fill 511, 8, 0
24102+#else
24103+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24104+ .fill 510,8,0
24105+#endif
24106+
24107+NEXT_PAGE(level3_vmalloc_start_pgt)
24108+ .fill 512,8,0
24109+
24110+NEXT_PAGE(level3_vmalloc_end_pgt)
24111+ .fill 512,8,0
24112+
24113+NEXT_PAGE(level3_vmemmap_pgt)
24114+ .fill L3_VMEMMAP_START,8,0
24115+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24116+
24117 NEXT_PAGE(level2_ident_pgt)
24118- /* Since I easily can, map the first 1G.
24119+ /* Since I easily can, map the first 2G.
24120 * Don't set NX because code runs from these pages.
24121 */
24122- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24123-#endif
24124+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24125
24126 NEXT_PAGE(level3_kernel_pgt)
24127 .fill L3_START_KERNEL,8,0
24128@@ -473,6 +528,9 @@ NEXT_PAGE(level3_kernel_pgt)
24129 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24130 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24131
24132+NEXT_PAGE(level2_vmemmap_pgt)
24133+ .fill 512,8,0
24134+
24135 NEXT_PAGE(level2_kernel_pgt)
24136 /*
24137 * 512 MB kernel mapping. We spend a full page on this pagetable
24138@@ -490,28 +548,64 @@ NEXT_PAGE(level2_kernel_pgt)
24139 NEXT_PAGE(level2_fixmap_pgt)
24140 .fill 506,8,0
24141 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24142- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24143- .fill 5,8,0
24144+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24145+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24146+ .fill 4,8,0
24147
24148 NEXT_PAGE(level1_fixmap_pgt)
24149 .fill 512,8,0
24150
24151+NEXT_PAGE(level1_vsyscall_pgt)
24152+ .fill 512,8,0
24153+
24154 #undef PMDS
24155
24156- .data
24157+ .align PAGE_SIZE
24158+ENTRY(cpu_gdt_table)
24159+ .rept NR_CPUS
24160+ .quad 0x0000000000000000 /* NULL descriptor */
24161+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24162+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24163+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24164+ .quad 0x00cffb000000ffff /* __USER32_CS */
24165+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24166+ .quad 0x00affb000000ffff /* __USER_CS */
24167+
24168+#ifdef CONFIG_PAX_KERNEXEC
24169+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24170+#else
24171+ .quad 0x0 /* unused */
24172+#endif
24173+
24174+ .quad 0,0 /* TSS */
24175+ .quad 0,0 /* LDT */
24176+ .quad 0,0,0 /* three TLS descriptors */
24177+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24178+ /* asm/segment.h:GDT_ENTRIES must match this */
24179+
24180+#ifdef CONFIG_PAX_MEMORY_UDEREF
24181+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24182+#else
24183+ .quad 0x0 /* unused */
24184+#endif
24185+
24186+ /* zero the remaining page */
24187+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24188+ .endr
24189+
24190 .align 16
24191 .globl early_gdt_descr
24192 early_gdt_descr:
24193 .word GDT_ENTRIES*8-1
24194 early_gdt_descr_base:
24195- .quad INIT_PER_CPU_VAR(gdt_page)
24196+ .quad cpu_gdt_table
24197
24198 ENTRY(phys_base)
24199 /* This must match the first entry in level2_kernel_pgt */
24200 .quad 0x0000000000000000
24201
24202 #include "../../x86/xen/xen-head.S"
24203-
24204- __PAGE_ALIGNED_BSS
24205+
24206+ .section .rodata,"a",@progbits
24207 NEXT_PAGE(empty_zero_page)
24208 .skip PAGE_SIZE
24209diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
24210index 05fd74f..c3548b1 100644
24211--- a/arch/x86/kernel/i386_ksyms_32.c
24212+++ b/arch/x86/kernel/i386_ksyms_32.c
24213@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
24214 EXPORT_SYMBOL(cmpxchg8b_emu);
24215 #endif
24216
24217+EXPORT_SYMBOL_GPL(cpu_gdt_table);
24218+
24219 /* Networking helper routines. */
24220 EXPORT_SYMBOL(csum_partial_copy_generic);
24221+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
24222+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
24223
24224 EXPORT_SYMBOL(__get_user_1);
24225 EXPORT_SYMBOL(__get_user_2);
24226@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
24227 EXPORT_SYMBOL(___preempt_schedule_context);
24228 #endif
24229 #endif
24230+
24231+#ifdef CONFIG_PAX_KERNEXEC
24232+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
24233+#endif
24234+
24235+#ifdef CONFIG_PAX_PER_CPU_PGD
24236+EXPORT_SYMBOL(cpu_pgd);
24237+#endif
24238diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
24239index e8368c6..9c1a712 100644
24240--- a/arch/x86/kernel/i387.c
24241+++ b/arch/x86/kernel/i387.c
24242@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
24243 static inline bool interrupted_user_mode(void)
24244 {
24245 struct pt_regs *regs = get_irq_regs();
24246- return regs && user_mode_vm(regs);
24247+ return regs && user_mode(regs);
24248 }
24249
24250 /*
24251diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
24252index 2e977b5..5f2c273 100644
24253--- a/arch/x86/kernel/i8259.c
24254+++ b/arch/x86/kernel/i8259.c
24255@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
24256 static void make_8259A_irq(unsigned int irq)
24257 {
24258 disable_irq_nosync(irq);
24259- io_apic_irqs &= ~(1<<irq);
24260+ io_apic_irqs &= ~(1UL<<irq);
24261 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
24262 i8259A_chip.name);
24263 enable_irq(irq);
24264@@ -209,7 +209,7 @@ spurious_8259A_irq:
24265 "spurious 8259A interrupt: IRQ%d.\n", irq);
24266 spurious_irq_mask |= irqmask;
24267 }
24268- atomic_inc(&irq_err_count);
24269+ atomic_inc_unchecked(&irq_err_count);
24270 /*
24271 * Theoretically we do not have to handle this IRQ,
24272 * but in Linux this does not cause problems and is
24273@@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi)
24274 /* (slave's support for AEOI in flat mode is to be investigated) */
24275 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
24276
24277+ pax_open_kernel();
24278 if (auto_eoi)
24279 /*
24280 * In AEOI mode we just have to mask the interrupt
24281 * when acking.
24282 */
24283- i8259A_chip.irq_mask_ack = disable_8259A_irq;
24284+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
24285 else
24286- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24287+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24288+ pax_close_kernel();
24289
24290 udelay(100); /* wait for 8259A to initialize */
24291
24292diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
24293index a979b5b..1d6db75 100644
24294--- a/arch/x86/kernel/io_delay.c
24295+++ b/arch/x86/kernel/io_delay.c
24296@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
24297 * Quirk table for systems that misbehave (lock up, etc.) if port
24298 * 0x80 is used:
24299 */
24300-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
24301+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
24302 {
24303 .callback = dmi_io_delay_0xed_port,
24304 .ident = "Compaq Presario V6000",
24305diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
24306index 4ddaf66..49d5c18 100644
24307--- a/arch/x86/kernel/ioport.c
24308+++ b/arch/x86/kernel/ioport.c
24309@@ -6,6 +6,7 @@
24310 #include <linux/sched.h>
24311 #include <linux/kernel.h>
24312 #include <linux/capability.h>
24313+#include <linux/security.h>
24314 #include <linux/errno.h>
24315 #include <linux/types.h>
24316 #include <linux/ioport.h>
24317@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24318 return -EINVAL;
24319 if (turn_on && !capable(CAP_SYS_RAWIO))
24320 return -EPERM;
24321+#ifdef CONFIG_GRKERNSEC_IO
24322+ if (turn_on && grsec_disable_privio) {
24323+ gr_handle_ioperm();
24324+ return -ENODEV;
24325+ }
24326+#endif
24327
24328 /*
24329 * If it's the first ioperm() call in this thread's lifetime, set the
24330@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24331 * because the ->io_bitmap_max value must match the bitmap
24332 * contents:
24333 */
24334- tss = &per_cpu(init_tss, get_cpu());
24335+ tss = init_tss + get_cpu();
24336
24337 if (turn_on)
24338 bitmap_clear(t->io_bitmap_ptr, from, num);
24339@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
24340 if (level > old) {
24341 if (!capable(CAP_SYS_RAWIO))
24342 return -EPERM;
24343+#ifdef CONFIG_GRKERNSEC_IO
24344+ if (grsec_disable_privio) {
24345+ gr_handle_iopl();
24346+ return -ENODEV;
24347+ }
24348+#endif
24349 }
24350 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
24351 t->iopl = level << 12;
24352diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
24353index 22d0687..e07b2a5 100644
24354--- a/arch/x86/kernel/irq.c
24355+++ b/arch/x86/kernel/irq.c
24356@@ -21,7 +21,7 @@
24357 #define CREATE_TRACE_POINTS
24358 #include <asm/trace/irq_vectors.h>
24359
24360-atomic_t irq_err_count;
24361+atomic_unchecked_t irq_err_count;
24362
24363 /* Function pointer for generic interrupt vector handling */
24364 void (*x86_platform_ipi_callback)(void) = NULL;
24365@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
24366 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
24367 seq_printf(p, " Machine check polls\n");
24368 #endif
24369- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
24370+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
24371 #if defined(CONFIG_X86_IO_APIC)
24372- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
24373+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
24374 #endif
24375 return 0;
24376 }
24377@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
24378
24379 u64 arch_irq_stat(void)
24380 {
24381- u64 sum = atomic_read(&irq_err_count);
24382+ u64 sum = atomic_read_unchecked(&irq_err_count);
24383 return sum;
24384 }
24385
24386diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
24387index d7fcbed..1f747f7 100644
24388--- a/arch/x86/kernel/irq_32.c
24389+++ b/arch/x86/kernel/irq_32.c
24390@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
24391 __asm__ __volatile__("andl %%esp,%0" :
24392 "=r" (sp) : "0" (THREAD_SIZE - 1));
24393
24394- return sp < (sizeof(struct thread_info) + STACK_WARN);
24395+ return sp < STACK_WARN;
24396 }
24397
24398 static void print_stack_overflow(void)
24399@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
24400 * per-CPU IRQ handling contexts (thread information and stack)
24401 */
24402 union irq_ctx {
24403- struct thread_info tinfo;
24404- u32 stack[THREAD_SIZE/sizeof(u32)];
24405+ unsigned long previous_esp;
24406+ u32 stack[THREAD_SIZE/sizeof(u32)];
24407 } __attribute__((aligned(THREAD_SIZE)));
24408
24409 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
24410@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
24411 static inline int
24412 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24413 {
24414- union irq_ctx *curctx, *irqctx;
24415+ union irq_ctx *irqctx;
24416 u32 *isp, arg1, arg2;
24417
24418- curctx = (union irq_ctx *) current_thread_info();
24419 irqctx = __this_cpu_read(hardirq_ctx);
24420
24421 /*
24422@@ -92,13 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24423 * handler) we can't do that and just have to keep using the
24424 * current stack (which is the irq stack already after all)
24425 */
24426- if (unlikely(curctx == irqctx))
24427+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
24428 return 0;
24429
24430 /* build the stack frame on the IRQ stack */
24431- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24432- irqctx->tinfo.task = curctx->tinfo.task;
24433- irqctx->tinfo.previous_esp = current_stack_pointer;
24434+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24435+ irqctx->previous_esp = current_stack_pointer;
24436+
24437+#ifdef CONFIG_PAX_MEMORY_UDEREF
24438+ __set_fs(MAKE_MM_SEG(0));
24439+#endif
24440
24441 if (unlikely(overflow))
24442 call_on_stack(print_stack_overflow, isp);
24443@@ -110,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24444 : "0" (irq), "1" (desc), "2" (isp),
24445 "D" (desc->handle_irq)
24446 : "memory", "cc", "ecx");
24447+
24448+#ifdef CONFIG_PAX_MEMORY_UDEREF
24449+ __set_fs(current_thread_info()->addr_limit);
24450+#endif
24451+
24452 return 1;
24453 }
24454
24455@@ -118,48 +125,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24456 */
24457 void irq_ctx_init(int cpu)
24458 {
24459- union irq_ctx *irqctx;
24460-
24461 if (per_cpu(hardirq_ctx, cpu))
24462 return;
24463
24464- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24465- THREADINFO_GFP,
24466- THREAD_SIZE_ORDER));
24467- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24468- irqctx->tinfo.cpu = cpu;
24469- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24470-
24471- per_cpu(hardirq_ctx, cpu) = irqctx;
24472-
24473- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24474- THREADINFO_GFP,
24475- THREAD_SIZE_ORDER));
24476- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24477- irqctx->tinfo.cpu = cpu;
24478- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24479-
24480- per_cpu(softirq_ctx, cpu) = irqctx;
24481-
24482- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24483- cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24484+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24485+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24486 }
24487
24488 void do_softirq_own_stack(void)
24489 {
24490- struct thread_info *curctx;
24491 union irq_ctx *irqctx;
24492 u32 *isp;
24493
24494- curctx = current_thread_info();
24495 irqctx = __this_cpu_read(softirq_ctx);
24496- irqctx->tinfo.task = curctx->task;
24497- irqctx->tinfo.previous_esp = current_stack_pointer;
24498+ irqctx->previous_esp = current_stack_pointer;
24499
24500 /* build the stack frame on the softirq stack */
24501- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24502+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24503+
24504+#ifdef CONFIG_PAX_MEMORY_UDEREF
24505+ __set_fs(MAKE_MM_SEG(0));
24506+#endif
24507
24508 call_on_stack(__do_softirq, isp);
24509+
24510+#ifdef CONFIG_PAX_MEMORY_UDEREF
24511+ __set_fs(current_thread_info()->addr_limit);
24512+#endif
24513+
24514 }
24515
24516 bool handle_irq(unsigned irq, struct pt_regs *regs)
24517@@ -173,7 +166,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
24518 if (unlikely(!desc))
24519 return false;
24520
24521- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24522+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24523 if (unlikely(overflow))
24524 print_stack_overflow();
24525 desc->handle_irq(irq, desc);
24526diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
24527index 4d1c746..232961d 100644
24528--- a/arch/x86/kernel/irq_64.c
24529+++ b/arch/x86/kernel/irq_64.c
24530@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
24531 u64 estack_top, estack_bottom;
24532 u64 curbase = (u64)task_stack_page(current);
24533
24534- if (user_mode_vm(regs))
24535+ if (user_mode(regs))
24536 return;
24537
24538 if (regs->sp >= curbase + sizeof(struct thread_info) +
24539diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
24540index 26d5a55..a01160a 100644
24541--- a/arch/x86/kernel/jump_label.c
24542+++ b/arch/x86/kernel/jump_label.c
24543@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
24544 * Jump label is enabled for the first time.
24545 * So we expect a default_nop...
24546 */
24547- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
24548+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
24549 != 0))
24550 bug_at((void *)entry->code, __LINE__);
24551 } else {
24552@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
24553 * ...otherwise expect an ideal_nop. Otherwise
24554 * something went horribly wrong.
24555 */
24556- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
24557+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
24558 != 0))
24559 bug_at((void *)entry->code, __LINE__);
24560 }
24561@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
24562 * are converting the default nop to the ideal nop.
24563 */
24564 if (init) {
24565- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
24566+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
24567 bug_at((void *)entry->code, __LINE__);
24568 } else {
24569 code.jump = 0xe9;
24570 code.offset = entry->target -
24571 (entry->code + JUMP_LABEL_NOP_SIZE);
24572- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
24573+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
24574 bug_at((void *)entry->code, __LINE__);
24575 }
24576 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
24577diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
24578index 836f832..a8bda67 100644
24579--- a/arch/x86/kernel/kgdb.c
24580+++ b/arch/x86/kernel/kgdb.c
24581@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
24582 #ifdef CONFIG_X86_32
24583 switch (regno) {
24584 case GDB_SS:
24585- if (!user_mode_vm(regs))
24586+ if (!user_mode(regs))
24587 *(unsigned long *)mem = __KERNEL_DS;
24588 break;
24589 case GDB_SP:
24590- if (!user_mode_vm(regs))
24591+ if (!user_mode(regs))
24592 *(unsigned long *)mem = kernel_stack_pointer(regs);
24593 break;
24594 case GDB_GS:
24595@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
24596 bp->attr.bp_addr = breakinfo[breakno].addr;
24597 bp->attr.bp_len = breakinfo[breakno].len;
24598 bp->attr.bp_type = breakinfo[breakno].type;
24599- info->address = breakinfo[breakno].addr;
24600+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
24601+ info->address = ktla_ktva(breakinfo[breakno].addr);
24602+ else
24603+ info->address = breakinfo[breakno].addr;
24604 info->len = breakinfo[breakno].len;
24605 info->type = breakinfo[breakno].type;
24606 val = arch_install_hw_breakpoint(bp);
24607@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
24608 case 'k':
24609 /* clear the trace bit */
24610 linux_regs->flags &= ~X86_EFLAGS_TF;
24611- atomic_set(&kgdb_cpu_doing_single_step, -1);
24612+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
24613
24614 /* set the trace bit if we're stepping */
24615 if (remcomInBuffer[0] == 's') {
24616 linux_regs->flags |= X86_EFLAGS_TF;
24617- atomic_set(&kgdb_cpu_doing_single_step,
24618+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
24619 raw_smp_processor_id());
24620 }
24621
24622@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
24623
24624 switch (cmd) {
24625 case DIE_DEBUG:
24626- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
24627+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
24628 if (user_mode(regs))
24629 return single_step_cont(regs, args);
24630 break;
24631@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24632 #endif /* CONFIG_DEBUG_RODATA */
24633
24634 bpt->type = BP_BREAKPOINT;
24635- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
24636+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
24637 BREAK_INSTR_SIZE);
24638 if (err)
24639 return err;
24640- err = probe_kernel_write((char *)bpt->bpt_addr,
24641+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24642 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
24643 #ifdef CONFIG_DEBUG_RODATA
24644 if (!err)
24645@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24646 return -EBUSY;
24647 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
24648 BREAK_INSTR_SIZE);
24649- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24650+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24651 if (err)
24652 return err;
24653 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
24654@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
24655 if (mutex_is_locked(&text_mutex))
24656 goto knl_write;
24657 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
24658- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24659+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24660 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
24661 goto knl_write;
24662 return err;
24663 knl_write:
24664 #endif /* CONFIG_DEBUG_RODATA */
24665- return probe_kernel_write((char *)bpt->bpt_addr,
24666+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24667 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
24668 }
24669
24670diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
24671index 79a3f96..6ba030a 100644
24672--- a/arch/x86/kernel/kprobes/core.c
24673+++ b/arch/x86/kernel/kprobes/core.c
24674@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
24675 s32 raddr;
24676 } __packed *insn;
24677
24678- insn = (struct __arch_relative_insn *)from;
24679+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
24680+
24681+ pax_open_kernel();
24682 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
24683 insn->op = op;
24684+ pax_close_kernel();
24685 }
24686
24687 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
24688@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
24689 kprobe_opcode_t opcode;
24690 kprobe_opcode_t *orig_opcodes = opcodes;
24691
24692- if (search_exception_tables((unsigned long)opcodes))
24693+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
24694 return 0; /* Page fault may occur on this address. */
24695
24696 retry:
24697@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
24698 * for the first byte, we can recover the original instruction
24699 * from it and kp->opcode.
24700 */
24701- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24702+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24703 buf[0] = kp->opcode;
24704- return (unsigned long)buf;
24705+ return ktva_ktla((unsigned long)buf);
24706 }
24707
24708 /*
24709@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24710 /* Another subsystem puts a breakpoint, failed to recover */
24711 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
24712 return 0;
24713+ pax_open_kernel();
24714 memcpy(dest, insn.kaddr, insn.length);
24715+ pax_close_kernel();
24716
24717 #ifdef CONFIG_X86_64
24718 if (insn_rip_relative(&insn)) {
24719@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24720 return 0;
24721 }
24722 disp = (u8 *) dest + insn_offset_displacement(&insn);
24723+ pax_open_kernel();
24724 *(s32 *) disp = (s32) newdisp;
24725+ pax_close_kernel();
24726 }
24727 #endif
24728 return insn.length;
24729@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24730 * nor set current_kprobe, because it doesn't use single
24731 * stepping.
24732 */
24733- regs->ip = (unsigned long)p->ainsn.insn;
24734+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24735 preempt_enable_no_resched();
24736 return;
24737 }
24738@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24739 regs->flags &= ~X86_EFLAGS_IF;
24740 /* single step inline if the instruction is an int3 */
24741 if (p->opcode == BREAKPOINT_INSTRUCTION)
24742- regs->ip = (unsigned long)p->addr;
24743+ regs->ip = ktla_ktva((unsigned long)p->addr);
24744 else
24745- regs->ip = (unsigned long)p->ainsn.insn;
24746+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24747 }
24748
24749 /*
24750@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
24751 setup_singlestep(p, regs, kcb, 0);
24752 return 1;
24753 }
24754- } else if (*addr != BREAKPOINT_INSTRUCTION) {
24755+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
24756 /*
24757 * The breakpoint instruction was removed right
24758 * after we hit it. Another cpu has removed
24759@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
24760 " movq %rax, 152(%rsp)\n"
24761 RESTORE_REGS_STRING
24762 " popfq\n"
24763+#ifdef KERNEXEC_PLUGIN
24764+ " btsq $63,(%rsp)\n"
24765+#endif
24766 #else
24767 " pushf\n"
24768 SAVE_REGS_STRING
24769@@ -779,7 +789,7 @@ static void __kprobes
24770 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
24771 {
24772 unsigned long *tos = stack_addr(regs);
24773- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
24774+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
24775 unsigned long orig_ip = (unsigned long)p->addr;
24776 kprobe_opcode_t *insn = p->ainsn.insn;
24777
24778@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
24779 struct die_args *args = data;
24780 int ret = NOTIFY_DONE;
24781
24782- if (args->regs && user_mode_vm(args->regs))
24783+ if (args->regs && user_mode(args->regs))
24784 return ret;
24785
24786 switch (val) {
24787diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
24788index 898160b..758cde8 100644
24789--- a/arch/x86/kernel/kprobes/opt.c
24790+++ b/arch/x86/kernel/kprobes/opt.c
24791@@ -79,6 +79,7 @@ found:
24792 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
24793 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
24794 {
24795+ pax_open_kernel();
24796 #ifdef CONFIG_X86_64
24797 *addr++ = 0x48;
24798 *addr++ = 0xbf;
24799@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
24800 *addr++ = 0xb8;
24801 #endif
24802 *(unsigned long *)addr = val;
24803+ pax_close_kernel();
24804 }
24805
24806 asm (
24807@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24808 * Verify if the address gap is in 2GB range, because this uses
24809 * a relative jump.
24810 */
24811- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
24812+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
24813 if (abs(rel) > 0x7fffffff)
24814 return -ERANGE;
24815
24816@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24817 op->optinsn.size = ret;
24818
24819 /* Copy arch-dep-instance from template */
24820- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
24821+ pax_open_kernel();
24822+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
24823+ pax_close_kernel();
24824
24825 /* Set probe information */
24826 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
24827
24828 /* Set probe function call */
24829- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
24830+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
24831
24832 /* Set returning jmp instruction at the tail of out-of-line buffer */
24833- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
24834+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
24835 (u8 *)op->kp.addr + op->optinsn.size);
24836
24837 flush_icache_range((unsigned long) buf,
24838@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
24839 WARN_ON(kprobe_disabled(&op->kp));
24840
24841 /* Backup instructions which will be replaced by jump address */
24842- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
24843+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
24844 RELATIVE_ADDR_SIZE);
24845
24846 insn_buf[0] = RELATIVEJUMP_OPCODE;
24847@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
24848 /* This kprobe is really able to run optimized path. */
24849 op = container_of(p, struct optimized_kprobe, kp);
24850 /* Detour through copied instructions */
24851- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
24852+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
24853 if (!reenter)
24854 reset_current_kprobe();
24855 preempt_enable_no_resched();
24856diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
24857index ebc9873..1b9724b 100644
24858--- a/arch/x86/kernel/ldt.c
24859+++ b/arch/x86/kernel/ldt.c
24860@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
24861 if (reload) {
24862 #ifdef CONFIG_SMP
24863 preempt_disable();
24864- load_LDT(pc);
24865+ load_LDT_nolock(pc);
24866 if (!cpumask_equal(mm_cpumask(current->mm),
24867 cpumask_of(smp_processor_id())))
24868 smp_call_function(flush_ldt, current->mm, 1);
24869 preempt_enable();
24870 #else
24871- load_LDT(pc);
24872+ load_LDT_nolock(pc);
24873 #endif
24874 }
24875 if (oldsize) {
24876@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
24877 return err;
24878
24879 for (i = 0; i < old->size; i++)
24880- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
24881+ write_ldt_entry(new->ldt, i, old->ldt + i);
24882 return 0;
24883 }
24884
24885@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
24886 retval = copy_ldt(&mm->context, &old_mm->context);
24887 mutex_unlock(&old_mm->context.lock);
24888 }
24889+
24890+ if (tsk == current) {
24891+ mm->context.vdso = 0;
24892+
24893+#ifdef CONFIG_X86_32
24894+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24895+ mm->context.user_cs_base = 0UL;
24896+ mm->context.user_cs_limit = ~0UL;
24897+
24898+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
24899+ cpus_clear(mm->context.cpu_user_cs_mask);
24900+#endif
24901+
24902+#endif
24903+#endif
24904+
24905+ }
24906+
24907 return retval;
24908 }
24909
24910@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
24911 }
24912 }
24913
24914+#ifdef CONFIG_PAX_SEGMEXEC
24915+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
24916+ error = -EINVAL;
24917+ goto out_unlock;
24918+ }
24919+#endif
24920+
24921 fill_ldt(&ldt, &ldt_info);
24922 if (oldmode)
24923 ldt.avl = 0;
24924diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
24925index 5b19e4d..6476a76 100644
24926--- a/arch/x86/kernel/machine_kexec_32.c
24927+++ b/arch/x86/kernel/machine_kexec_32.c
24928@@ -26,7 +26,7 @@
24929 #include <asm/cacheflush.h>
24930 #include <asm/debugreg.h>
24931
24932-static void set_idt(void *newidt, __u16 limit)
24933+static void set_idt(struct desc_struct *newidt, __u16 limit)
24934 {
24935 struct desc_ptr curidt;
24936
24937@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
24938 }
24939
24940
24941-static void set_gdt(void *newgdt, __u16 limit)
24942+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
24943 {
24944 struct desc_ptr curgdt;
24945
24946@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
24947 }
24948
24949 control_page = page_address(image->control_code_page);
24950- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
24951+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
24952
24953 relocate_kernel_ptr = control_page;
24954 page_list[PA_CONTROL_PAGE] = __pa(control_page);
24955diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
24956index 15c9876..0a43909 100644
24957--- a/arch/x86/kernel/microcode_core.c
24958+++ b/arch/x86/kernel/microcode_core.c
24959@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
24960 return NOTIFY_OK;
24961 }
24962
24963-static struct notifier_block __refdata mc_cpu_notifier = {
24964+static struct notifier_block mc_cpu_notifier = {
24965 .notifier_call = mc_cpu_callback,
24966 };
24967
24968diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
24969index 5fb2ceb..3ae90bb 100644
24970--- a/arch/x86/kernel/microcode_intel.c
24971+++ b/arch/x86/kernel/microcode_intel.c
24972@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24973
24974 static int get_ucode_user(void *to, const void *from, size_t n)
24975 {
24976- return copy_from_user(to, from, n);
24977+ return copy_from_user(to, (const void __force_user *)from, n);
24978 }
24979
24980 static enum ucode_state
24981 request_microcode_user(int cpu, const void __user *buf, size_t size)
24982 {
24983- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24984+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24985 }
24986
24987 static void microcode_fini_cpu(int cpu)
24988diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24989index 18be189..4a9fe40 100644
24990--- a/arch/x86/kernel/module.c
24991+++ b/arch/x86/kernel/module.c
24992@@ -43,15 +43,60 @@ do { \
24993 } while (0)
24994 #endif
24995
24996-void *module_alloc(unsigned long size)
24997+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24998 {
24999- if (PAGE_ALIGN(size) > MODULES_LEN)
25000+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25001 return NULL;
25002 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
25003- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
25004+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
25005 NUMA_NO_NODE, __builtin_return_address(0));
25006 }
25007
25008+void *module_alloc(unsigned long size)
25009+{
25010+
25011+#ifdef CONFIG_PAX_KERNEXEC
25012+ return __module_alloc(size, PAGE_KERNEL);
25013+#else
25014+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25015+#endif
25016+
25017+}
25018+
25019+#ifdef CONFIG_PAX_KERNEXEC
25020+#ifdef CONFIG_X86_32
25021+void *module_alloc_exec(unsigned long size)
25022+{
25023+ struct vm_struct *area;
25024+
25025+ if (size == 0)
25026+ return NULL;
25027+
25028+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25029+ return area ? area->addr : NULL;
25030+}
25031+EXPORT_SYMBOL(module_alloc_exec);
25032+
25033+void module_free_exec(struct module *mod, void *module_region)
25034+{
25035+ vunmap(module_region);
25036+}
25037+EXPORT_SYMBOL(module_free_exec);
25038+#else
25039+void module_free_exec(struct module *mod, void *module_region)
25040+{
25041+ module_free(mod, module_region);
25042+}
25043+EXPORT_SYMBOL(module_free_exec);
25044+
25045+void *module_alloc_exec(unsigned long size)
25046+{
25047+ return __module_alloc(size, PAGE_KERNEL_RX);
25048+}
25049+EXPORT_SYMBOL(module_alloc_exec);
25050+#endif
25051+#endif
25052+
25053 #ifdef CONFIG_X86_32
25054 int apply_relocate(Elf32_Shdr *sechdrs,
25055 const char *strtab,
25056@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25057 unsigned int i;
25058 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
25059 Elf32_Sym *sym;
25060- uint32_t *location;
25061+ uint32_t *plocation, location;
25062
25063 DEBUGP("Applying relocate section %u to %u\n",
25064 relsec, sechdrs[relsec].sh_info);
25065 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
25066 /* This is where to make the change */
25067- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
25068- + rel[i].r_offset;
25069+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
25070+ location = (uint32_t)plocation;
25071+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
25072+ plocation = ktla_ktva((void *)plocation);
25073 /* This is the symbol it is referring to. Note that all
25074 undefined symbols have been resolved. */
25075 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
25076@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25077 switch (ELF32_R_TYPE(rel[i].r_info)) {
25078 case R_386_32:
25079 /* We add the value into the location given */
25080- *location += sym->st_value;
25081+ pax_open_kernel();
25082+ *plocation += sym->st_value;
25083+ pax_close_kernel();
25084 break;
25085 case R_386_PC32:
25086 /* Add the value, subtract its position */
25087- *location += sym->st_value - (uint32_t)location;
25088+ pax_open_kernel();
25089+ *plocation += sym->st_value - location;
25090+ pax_close_kernel();
25091 break;
25092 default:
25093 pr_err("%s: Unknown relocation: %u\n",
25094@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
25095 case R_X86_64_NONE:
25096 break;
25097 case R_X86_64_64:
25098+ pax_open_kernel();
25099 *(u64 *)loc = val;
25100+ pax_close_kernel();
25101 break;
25102 case R_X86_64_32:
25103+ pax_open_kernel();
25104 *(u32 *)loc = val;
25105+ pax_close_kernel();
25106 if (val != *(u32 *)loc)
25107 goto overflow;
25108 break;
25109 case R_X86_64_32S:
25110+ pax_open_kernel();
25111 *(s32 *)loc = val;
25112+ pax_close_kernel();
25113 if ((s64)val != *(s32 *)loc)
25114 goto overflow;
25115 break;
25116 case R_X86_64_PC32:
25117 val -= (u64)loc;
25118+ pax_open_kernel();
25119 *(u32 *)loc = val;
25120+ pax_close_kernel();
25121+
25122 #if 0
25123 if ((s64)val != *(s32 *)loc)
25124 goto overflow;
25125diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
25126index 05266b5..1577fde 100644
25127--- a/arch/x86/kernel/msr.c
25128+++ b/arch/x86/kernel/msr.c
25129@@ -233,7 +233,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
25130 return notifier_from_errno(err);
25131 }
25132
25133-static struct notifier_block __refdata msr_class_cpu_notifier = {
25134+static struct notifier_block msr_class_cpu_notifier = {
25135 .notifier_call = msr_class_cpu_callback,
25136 };
25137
25138diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
25139index 6fcb49c..5b3f4ff 100644
25140--- a/arch/x86/kernel/nmi.c
25141+++ b/arch/x86/kernel/nmi.c
25142@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
25143 return handled;
25144 }
25145
25146-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25147+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
25148 {
25149 struct nmi_desc *desc = nmi_to_desc(type);
25150 unsigned long flags;
25151@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25152 * event confuses some handlers (kdump uses this flag)
25153 */
25154 if (action->flags & NMI_FLAG_FIRST)
25155- list_add_rcu(&action->list, &desc->head);
25156+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
25157 else
25158- list_add_tail_rcu(&action->list, &desc->head);
25159+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
25160
25161 spin_unlock_irqrestore(&desc->lock, flags);
25162 return 0;
25163@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
25164 if (!strcmp(n->name, name)) {
25165 WARN(in_nmi(),
25166 "Trying to free NMI (%s) from NMI context!\n", n->name);
25167- list_del_rcu(&n->list);
25168+ pax_list_del_rcu((struct list_head *)&n->list);
25169 break;
25170 }
25171 }
25172@@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
25173 dotraplinkage notrace __kprobes void
25174 do_nmi(struct pt_regs *regs, long error_code)
25175 {
25176+
25177+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25178+ if (!user_mode(regs)) {
25179+ unsigned long cs = regs->cs & 0xFFFF;
25180+ unsigned long ip = ktva_ktla(regs->ip);
25181+
25182+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
25183+ regs->ip = ip;
25184+ }
25185+#endif
25186+
25187 nmi_nesting_preprocess(regs);
25188
25189 nmi_enter();
25190diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
25191index 6d9582e..f746287 100644
25192--- a/arch/x86/kernel/nmi_selftest.c
25193+++ b/arch/x86/kernel/nmi_selftest.c
25194@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
25195 {
25196 /* trap all the unknown NMIs we may generate */
25197 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
25198- __initdata);
25199+ __initconst);
25200 }
25201
25202 static void __init cleanup_nmi_testsuite(void)
25203@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
25204 unsigned long timeout;
25205
25206 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
25207- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
25208+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
25209 nmi_fail = FAILURE;
25210 return;
25211 }
25212diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
25213index bbb6c73..24a58ef 100644
25214--- a/arch/x86/kernel/paravirt-spinlocks.c
25215+++ b/arch/x86/kernel/paravirt-spinlocks.c
25216@@ -8,7 +8,7 @@
25217
25218 #include <asm/paravirt.h>
25219
25220-struct pv_lock_ops pv_lock_ops = {
25221+struct pv_lock_ops pv_lock_ops __read_only = {
25222 #ifdef CONFIG_SMP
25223 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
25224 .unlock_kick = paravirt_nop,
25225diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
25226index 1b10af8..0b58cbc 100644
25227--- a/arch/x86/kernel/paravirt.c
25228+++ b/arch/x86/kernel/paravirt.c
25229@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
25230 {
25231 return x;
25232 }
25233+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25234+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
25235+#endif
25236
25237 void __init default_banner(void)
25238 {
25239@@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
25240 if (opfunc == NULL)
25241 /* If there's no function, patch it with a ud2a (BUG) */
25242 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
25243- else if (opfunc == _paravirt_nop)
25244+ else if (opfunc == (void *)_paravirt_nop)
25245 /* If the operation is a nop, then nop the callsite */
25246 ret = paravirt_patch_nop();
25247
25248 /* identity functions just return their single argument */
25249- else if (opfunc == _paravirt_ident_32)
25250+ else if (opfunc == (void *)_paravirt_ident_32)
25251 ret = paravirt_patch_ident_32(insnbuf, len);
25252- else if (opfunc == _paravirt_ident_64)
25253+ else if (opfunc == (void *)_paravirt_ident_64)
25254 ret = paravirt_patch_ident_64(insnbuf, len);
25255+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25256+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
25257+ ret = paravirt_patch_ident_64(insnbuf, len);
25258+#endif
25259
25260 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
25261 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
25262@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
25263 if (insn_len > len || start == NULL)
25264 insn_len = len;
25265 else
25266- memcpy(insnbuf, start, insn_len);
25267+ memcpy(insnbuf, ktla_ktva(start), insn_len);
25268
25269 return insn_len;
25270 }
25271@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
25272 return this_cpu_read(paravirt_lazy_mode);
25273 }
25274
25275-struct pv_info pv_info = {
25276+struct pv_info pv_info __read_only = {
25277 .name = "bare hardware",
25278 .paravirt_enabled = 0,
25279 .kernel_rpl = 0,
25280@@ -310,16 +317,16 @@ struct pv_info pv_info = {
25281 #endif
25282 };
25283
25284-struct pv_init_ops pv_init_ops = {
25285+struct pv_init_ops pv_init_ops __read_only = {
25286 .patch = native_patch,
25287 };
25288
25289-struct pv_time_ops pv_time_ops = {
25290+struct pv_time_ops pv_time_ops __read_only = {
25291 .sched_clock = native_sched_clock,
25292 .steal_clock = native_steal_clock,
25293 };
25294
25295-__visible struct pv_irq_ops pv_irq_ops = {
25296+__visible struct pv_irq_ops pv_irq_ops __read_only = {
25297 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
25298 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
25299 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
25300@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
25301 #endif
25302 };
25303
25304-__visible struct pv_cpu_ops pv_cpu_ops = {
25305+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
25306 .cpuid = native_cpuid,
25307 .get_debugreg = native_get_debugreg,
25308 .set_debugreg = native_set_debugreg,
25309@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
25310 .end_context_switch = paravirt_nop,
25311 };
25312
25313-struct pv_apic_ops pv_apic_ops = {
25314+struct pv_apic_ops pv_apic_ops __read_only= {
25315 #ifdef CONFIG_X86_LOCAL_APIC
25316 .startup_ipi_hook = paravirt_nop,
25317 #endif
25318 };
25319
25320-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
25321+#ifdef CONFIG_X86_32
25322+#ifdef CONFIG_X86_PAE
25323+/* 64-bit pagetable entries */
25324+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
25325+#else
25326 /* 32-bit pagetable entries */
25327 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
25328+#endif
25329 #else
25330 /* 64-bit pagetable entries */
25331 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
25332 #endif
25333
25334-struct pv_mmu_ops pv_mmu_ops = {
25335+struct pv_mmu_ops pv_mmu_ops __read_only = {
25336
25337 .read_cr2 = native_read_cr2,
25338 .write_cr2 = native_write_cr2,
25339@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
25340 .make_pud = PTE_IDENT,
25341
25342 .set_pgd = native_set_pgd,
25343+ .set_pgd_batched = native_set_pgd_batched,
25344 #endif
25345 #endif /* PAGETABLE_LEVELS >= 3 */
25346
25347@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
25348 },
25349
25350 .set_fixmap = native_set_fixmap,
25351+
25352+#ifdef CONFIG_PAX_KERNEXEC
25353+ .pax_open_kernel = native_pax_open_kernel,
25354+ .pax_close_kernel = native_pax_close_kernel,
25355+#endif
25356+
25357 };
25358
25359 EXPORT_SYMBOL_GPL(pv_time_ops);
25360diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
25361index 299d493..2ccb0ee 100644
25362--- a/arch/x86/kernel/pci-calgary_64.c
25363+++ b/arch/x86/kernel/pci-calgary_64.c
25364@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
25365 tce_space = be64_to_cpu(readq(target));
25366 tce_space = tce_space & TAR_SW_BITS;
25367
25368- tce_space = tce_space & (~specified_table_size);
25369+ tce_space = tce_space & (~(unsigned long)specified_table_size);
25370 info->tce_space = (u64 *)__va(tce_space);
25371 }
25372 }
25373diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
25374index 35ccf75..7a15747 100644
25375--- a/arch/x86/kernel/pci-iommu_table.c
25376+++ b/arch/x86/kernel/pci-iommu_table.c
25377@@ -2,7 +2,7 @@
25378 #include <asm/iommu_table.h>
25379 #include <linux/string.h>
25380 #include <linux/kallsyms.h>
25381-
25382+#include <linux/sched.h>
25383
25384 #define DEBUG 1
25385
25386diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
25387index 6c483ba..d10ce2f 100644
25388--- a/arch/x86/kernel/pci-swiotlb.c
25389+++ b/arch/x86/kernel/pci-swiotlb.c
25390@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
25391 void *vaddr, dma_addr_t dma_addr,
25392 struct dma_attrs *attrs)
25393 {
25394- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
25395+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
25396 }
25397
25398 static struct dma_map_ops swiotlb_dma_ops = {
25399diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
25400index ca7f0d5..8996469 100644
25401--- a/arch/x86/kernel/preempt.S
25402+++ b/arch/x86/kernel/preempt.S
25403@@ -3,12 +3,14 @@
25404 #include <asm/dwarf2.h>
25405 #include <asm/asm.h>
25406 #include <asm/calling.h>
25407+#include <asm/alternative-asm.h>
25408
25409 ENTRY(___preempt_schedule)
25410 CFI_STARTPROC
25411 SAVE_ALL
25412 call preempt_schedule
25413 RESTORE_ALL
25414+ pax_force_retaddr
25415 ret
25416 CFI_ENDPROC
25417
25418@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
25419 SAVE_ALL
25420 call preempt_schedule_context
25421 RESTORE_ALL
25422+ pax_force_retaddr
25423 ret
25424 CFI_ENDPROC
25425
25426diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
25427index 3fb8d95..254dc51 100644
25428--- a/arch/x86/kernel/process.c
25429+++ b/arch/x86/kernel/process.c
25430@@ -36,7 +36,8 @@
25431 * section. Since TSS's are completely CPU-local, we want them
25432 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
25433 */
25434-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
25435+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
25436+EXPORT_SYMBOL(init_tss);
25437
25438 #ifdef CONFIG_X86_64
25439 static DEFINE_PER_CPU(unsigned char, is_idle);
25440@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
25441 task_xstate_cachep =
25442 kmem_cache_create("task_xstate", xstate_size,
25443 __alignof__(union thread_xstate),
25444- SLAB_PANIC | SLAB_NOTRACK, NULL);
25445+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
25446 }
25447
25448 /*
25449@@ -105,7 +106,7 @@ void exit_thread(void)
25450 unsigned long *bp = t->io_bitmap_ptr;
25451
25452 if (bp) {
25453- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
25454+ struct tss_struct *tss = init_tss + get_cpu();
25455
25456 t->io_bitmap_ptr = NULL;
25457 clear_thread_flag(TIF_IO_BITMAP);
25458@@ -125,6 +126,9 @@ void flush_thread(void)
25459 {
25460 struct task_struct *tsk = current;
25461
25462+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
25463+ loadsegment(gs, 0);
25464+#endif
25465 flush_ptrace_hw_breakpoint(tsk);
25466 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
25467 drop_init_fpu(tsk);
25468@@ -271,7 +275,7 @@ static void __exit_idle(void)
25469 void exit_idle(void)
25470 {
25471 /* idle loop has pid 0 */
25472- if (current->pid)
25473+ if (task_pid_nr(current))
25474 return;
25475 __exit_idle();
25476 }
25477@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
25478 return ret;
25479 }
25480 #endif
25481-void stop_this_cpu(void *dummy)
25482+__noreturn void stop_this_cpu(void *dummy)
25483 {
25484 local_irq_disable();
25485 /*
25486@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
25487 }
25488 early_param("idle", idle_setup);
25489
25490-unsigned long arch_align_stack(unsigned long sp)
25491+#ifdef CONFIG_PAX_RANDKSTACK
25492+void pax_randomize_kstack(struct pt_regs *regs)
25493 {
25494- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
25495- sp -= get_random_int() % 8192;
25496- return sp & ~0xf;
25497-}
25498+ struct thread_struct *thread = &current->thread;
25499+ unsigned long time;
25500
25501-unsigned long arch_randomize_brk(struct mm_struct *mm)
25502-{
25503- unsigned long range_end = mm->brk + 0x02000000;
25504- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
25505-}
25506+ if (!randomize_va_space)
25507+ return;
25508+
25509+ if (v8086_mode(regs))
25510+ return;
25511
25512+ rdtscl(time);
25513+
25514+ /* P4 seems to return a 0 LSB, ignore it */
25515+#ifdef CONFIG_MPENTIUM4
25516+ time &= 0x3EUL;
25517+ time <<= 2;
25518+#elif defined(CONFIG_X86_64)
25519+ time &= 0xFUL;
25520+ time <<= 4;
25521+#else
25522+ time &= 0x1FUL;
25523+ time <<= 3;
25524+#endif
25525+
25526+ thread->sp0 ^= time;
25527+ load_sp0(init_tss + smp_processor_id(), thread);
25528+
25529+#ifdef CONFIG_X86_64
25530+ this_cpu_write(kernel_stack, thread->sp0);
25531+#endif
25532+}
25533+#endif
25534diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
25535index 6f1236c..fd448d4 100644
25536--- a/arch/x86/kernel/process_32.c
25537+++ b/arch/x86/kernel/process_32.c
25538@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
25539 unsigned long thread_saved_pc(struct task_struct *tsk)
25540 {
25541 return ((unsigned long *)tsk->thread.sp)[3];
25542+//XXX return tsk->thread.eip;
25543 }
25544
25545 void __show_regs(struct pt_regs *regs, int all)
25546@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
25547 unsigned long sp;
25548 unsigned short ss, gs;
25549
25550- if (user_mode_vm(regs)) {
25551+ if (user_mode(regs)) {
25552 sp = regs->sp;
25553 ss = regs->ss & 0xffff;
25554- gs = get_user_gs(regs);
25555 } else {
25556 sp = kernel_stack_pointer(regs);
25557 savesegment(ss, ss);
25558- savesegment(gs, gs);
25559 }
25560+ gs = get_user_gs(regs);
25561
25562 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
25563 (u16)regs->cs, regs->ip, regs->flags,
25564- smp_processor_id());
25565+ raw_smp_processor_id());
25566 print_symbol("EIP is at %s\n", regs->ip);
25567
25568 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
25569@@ -133,20 +133,21 @@ void release_thread(struct task_struct *dead_task)
25570 int copy_thread(unsigned long clone_flags, unsigned long sp,
25571 unsigned long arg, struct task_struct *p)
25572 {
25573- struct pt_regs *childregs = task_pt_regs(p);
25574+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
25575 struct task_struct *tsk;
25576 int err;
25577
25578 p->thread.sp = (unsigned long) childregs;
25579 p->thread.sp0 = (unsigned long) (childregs+1);
25580+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25581
25582 if (unlikely(p->flags & PF_KTHREAD)) {
25583 /* kernel thread */
25584 memset(childregs, 0, sizeof(struct pt_regs));
25585 p->thread.ip = (unsigned long) ret_from_kernel_thread;
25586- task_user_gs(p) = __KERNEL_STACK_CANARY;
25587- childregs->ds = __USER_DS;
25588- childregs->es = __USER_DS;
25589+ savesegment(gs, childregs->gs);
25590+ childregs->ds = __KERNEL_DS;
25591+ childregs->es = __KERNEL_DS;
25592 childregs->fs = __KERNEL_PERCPU;
25593 childregs->bx = sp; /* function */
25594 childregs->bp = arg;
25595@@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25596 struct thread_struct *prev = &prev_p->thread,
25597 *next = &next_p->thread;
25598 int cpu = smp_processor_id();
25599- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25600+ struct tss_struct *tss = init_tss + cpu;
25601 fpu_switch_t fpu;
25602
25603 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
25604@@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25605 */
25606 lazy_save_gs(prev->gs);
25607
25608+#ifdef CONFIG_PAX_MEMORY_UDEREF
25609+ __set_fs(task_thread_info(next_p)->addr_limit);
25610+#endif
25611+
25612 /*
25613 * Load the per-thread Thread-Local Storage descriptor.
25614 */
25615@@ -315,6 +320,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25616 */
25617 arch_end_context_switch(next_p);
25618
25619+ this_cpu_write(current_task, next_p);
25620+ this_cpu_write(current_tinfo, &next_p->tinfo);
25621+
25622 /*
25623 * Restore %gs if needed (which is common)
25624 */
25625@@ -323,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25626
25627 switch_fpu_finish(next_p, fpu);
25628
25629- this_cpu_write(current_task, next_p);
25630-
25631 return prev_p;
25632 }
25633
25634@@ -354,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
25635 } while (count++ < 16);
25636 return 0;
25637 }
25638-
25639diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
25640index 9c0280f..5bbb1c0 100644
25641--- a/arch/x86/kernel/process_64.c
25642+++ b/arch/x86/kernel/process_64.c
25643@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25644 struct pt_regs *childregs;
25645 struct task_struct *me = current;
25646
25647- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
25648+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
25649 childregs = task_pt_regs(p);
25650 p->thread.sp = (unsigned long) childregs;
25651 p->thread.usersp = me->thread.usersp;
25652+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25653 set_tsk_thread_flag(p, TIF_FORK);
25654 p->thread.fpu_counter = 0;
25655 p->thread.io_bitmap_ptr = NULL;
25656@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25657 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
25658 savesegment(es, p->thread.es);
25659 savesegment(ds, p->thread.ds);
25660+ savesegment(ss, p->thread.ss);
25661+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
25662 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
25663
25664 if (unlikely(p->flags & PF_KTHREAD)) {
25665@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25666 struct thread_struct *prev = &prev_p->thread;
25667 struct thread_struct *next = &next_p->thread;
25668 int cpu = smp_processor_id();
25669- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25670+ struct tss_struct *tss = init_tss + cpu;
25671 unsigned fsindex, gsindex;
25672 fpu_switch_t fpu;
25673
25674@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25675 if (unlikely(next->ds | prev->ds))
25676 loadsegment(ds, next->ds);
25677
25678+ savesegment(ss, prev->ss);
25679+ if (unlikely(next->ss != prev->ss))
25680+ loadsegment(ss, next->ss);
25681
25682 /* We must save %fs and %gs before load_TLS() because
25683 * %fs and %gs may be cleared by load_TLS().
25684@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25685 prev->usersp = this_cpu_read(old_rsp);
25686 this_cpu_write(old_rsp, next->usersp);
25687 this_cpu_write(current_task, next_p);
25688+ this_cpu_write(current_tinfo, &next_p->tinfo);
25689
25690 /*
25691 * If it were not for PREEMPT_ACTIVE we could guarantee that the
25692@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25693 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
25694 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
25695
25696- this_cpu_write(kernel_stack,
25697- (unsigned long)task_stack_page(next_p) +
25698- THREAD_SIZE - KERNEL_STACK_OFFSET);
25699+ this_cpu_write(kernel_stack, next->sp0);
25700
25701 /*
25702 * Now maybe reload the debug registers and handle I/O bitmaps
25703@@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_struct *p)
25704 if (!p || p == current || p->state == TASK_RUNNING)
25705 return 0;
25706 stack = (unsigned long)task_stack_page(p);
25707- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
25708+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
25709 return 0;
25710 fp = *(u64 *)(p->thread.sp);
25711 do {
25712- if (fp < (unsigned long)stack ||
25713- fp >= (unsigned long)stack+THREAD_SIZE)
25714+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
25715 return 0;
25716 ip = *(u64 *)(fp+8);
25717 if (!in_sched_functions(ip))
25718diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
25719index 7461f50..1334029 100644
25720--- a/arch/x86/kernel/ptrace.c
25721+++ b/arch/x86/kernel/ptrace.c
25722@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
25723 {
25724 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
25725 unsigned long sp = (unsigned long)&regs->sp;
25726- struct thread_info *tinfo;
25727
25728- if (context == (sp & ~(THREAD_SIZE - 1)))
25729+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
25730 return sp;
25731
25732- tinfo = (struct thread_info *)context;
25733- if (tinfo->previous_esp)
25734- return tinfo->previous_esp;
25735+ sp = *(unsigned long *)context;
25736+ if (sp)
25737+ return sp;
25738
25739 return (unsigned long)regs;
25740 }
25741@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
25742 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
25743 {
25744 int i;
25745- int dr7 = 0;
25746+ unsigned long dr7 = 0;
25747 struct arch_hw_breakpoint *info;
25748
25749 for (i = 0; i < HBP_NUM; i++) {
25750@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
25751 unsigned long addr, unsigned long data)
25752 {
25753 int ret;
25754- unsigned long __user *datap = (unsigned long __user *)data;
25755+ unsigned long __user *datap = (__force unsigned long __user *)data;
25756
25757 switch (request) {
25758 /* read the word at location addr in the USER area. */
25759@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
25760 if ((int) addr < 0)
25761 return -EIO;
25762 ret = do_get_thread_area(child, addr,
25763- (struct user_desc __user *)data);
25764+ (__force struct user_desc __user *) data);
25765 break;
25766
25767 case PTRACE_SET_THREAD_AREA:
25768 if ((int) addr < 0)
25769 return -EIO;
25770 ret = do_set_thread_area(child, addr,
25771- (struct user_desc __user *)data, 0);
25772+ (__force struct user_desc __user *) data, 0);
25773 break;
25774 #endif
25775
25776@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
25777
25778 #ifdef CONFIG_X86_64
25779
25780-static struct user_regset x86_64_regsets[] __read_mostly = {
25781+static user_regset_no_const x86_64_regsets[] __read_only = {
25782 [REGSET_GENERAL] = {
25783 .core_note_type = NT_PRSTATUS,
25784 .n = sizeof(struct user_regs_struct) / sizeof(long),
25785@@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
25786 #endif /* CONFIG_X86_64 */
25787
25788 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
25789-static struct user_regset x86_32_regsets[] __read_mostly = {
25790+static user_regset_no_const x86_32_regsets[] __read_only = {
25791 [REGSET_GENERAL] = {
25792 .core_note_type = NT_PRSTATUS,
25793 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
25794@@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
25795 */
25796 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
25797
25798-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25799+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25800 {
25801 #ifdef CONFIG_X86_64
25802 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
25803@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
25804 memset(info, 0, sizeof(*info));
25805 info->si_signo = SIGTRAP;
25806 info->si_code = si_code;
25807- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
25808+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
25809 }
25810
25811 void user_single_step_siginfo(struct task_struct *tsk,
25812@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
25813 # define IS_IA32 0
25814 #endif
25815
25816+#ifdef CONFIG_GRKERNSEC_SETXID
25817+extern void gr_delayed_cred_worker(void);
25818+#endif
25819+
25820 /*
25821 * We must return the syscall number to actually look up in the table.
25822 * This can be -1L to skip running any syscall at all.
25823@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
25824
25825 user_exit();
25826
25827+#ifdef CONFIG_GRKERNSEC_SETXID
25828+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25829+ gr_delayed_cred_worker();
25830+#endif
25831+
25832 /*
25833 * If we stepped into a sysenter/syscall insn, it trapped in
25834 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
25835@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
25836 */
25837 user_exit();
25838
25839+#ifdef CONFIG_GRKERNSEC_SETXID
25840+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25841+ gr_delayed_cred_worker();
25842+#endif
25843+
25844 audit_syscall_exit(regs);
25845
25846 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
25847diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
25848index 2f355d2..e75ed0a 100644
25849--- a/arch/x86/kernel/pvclock.c
25850+++ b/arch/x86/kernel/pvclock.c
25851@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
25852 reset_hung_task_detector();
25853 }
25854
25855-static atomic64_t last_value = ATOMIC64_INIT(0);
25856+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
25857
25858 void pvclock_resume(void)
25859 {
25860- atomic64_set(&last_value, 0);
25861+ atomic64_set_unchecked(&last_value, 0);
25862 }
25863
25864 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
25865@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
25866 * updating at the same time, and one of them could be slightly behind,
25867 * making the assumption that last_value always go forward fail to hold.
25868 */
25869- last = atomic64_read(&last_value);
25870+ last = atomic64_read_unchecked(&last_value);
25871 do {
25872 if (ret < last)
25873 return last;
25874- last = atomic64_cmpxchg(&last_value, last, ret);
25875+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
25876 } while (unlikely(last != ret));
25877
25878 return ret;
25879diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
25880index c752cb4..866c432 100644
25881--- a/arch/x86/kernel/reboot.c
25882+++ b/arch/x86/kernel/reboot.c
25883@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
25884
25885 void __noreturn machine_real_restart(unsigned int type)
25886 {
25887+
25888+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
25889+ struct desc_struct *gdt;
25890+#endif
25891+
25892 local_irq_disable();
25893
25894 /*
25895@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
25896
25897 /* Jump to the identity-mapped low memory code */
25898 #ifdef CONFIG_X86_32
25899- asm volatile("jmpl *%0" : :
25900+
25901+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
25902+ gdt = get_cpu_gdt_table(smp_processor_id());
25903+ pax_open_kernel();
25904+#ifdef CONFIG_PAX_MEMORY_UDEREF
25905+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
25906+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
25907+ loadsegment(ds, __KERNEL_DS);
25908+ loadsegment(es, __KERNEL_DS);
25909+ loadsegment(ss, __KERNEL_DS);
25910+#endif
25911+#ifdef CONFIG_PAX_KERNEXEC
25912+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
25913+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
25914+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
25915+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
25916+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
25917+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
25918+#endif
25919+ pax_close_kernel();
25920+#endif
25921+
25922+ asm volatile("ljmpl *%0" : :
25923 "rm" (real_mode_header->machine_real_restart_asm),
25924 "a" (type));
25925 #else
25926@@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
25927 * try to force a triple fault and then cycle between hitting the keyboard
25928 * controller and doing that
25929 */
25930-static void native_machine_emergency_restart(void)
25931+static void __noreturn native_machine_emergency_restart(void)
25932 {
25933 int i;
25934 int attempt = 0;
25935@@ -593,13 +620,13 @@ void native_machine_shutdown(void)
25936 #endif
25937 }
25938
25939-static void __machine_emergency_restart(int emergency)
25940+static void __noreturn __machine_emergency_restart(int emergency)
25941 {
25942 reboot_emergency = emergency;
25943 machine_ops.emergency_restart();
25944 }
25945
25946-static void native_machine_restart(char *__unused)
25947+static void __noreturn native_machine_restart(char *__unused)
25948 {
25949 pr_notice("machine restart\n");
25950
25951@@ -608,7 +635,7 @@ static void native_machine_restart(char *__unused)
25952 __machine_emergency_restart(0);
25953 }
25954
25955-static void native_machine_halt(void)
25956+static void __noreturn native_machine_halt(void)
25957 {
25958 /* Stop other cpus and apics */
25959 machine_shutdown();
25960@@ -618,7 +645,7 @@ static void native_machine_halt(void)
25961 stop_this_cpu(NULL);
25962 }
25963
25964-static void native_machine_power_off(void)
25965+static void __noreturn native_machine_power_off(void)
25966 {
25967 if (pm_power_off) {
25968 if (!reboot_force)
25969@@ -627,9 +654,10 @@ static void native_machine_power_off(void)
25970 }
25971 /* A fallback in case there is no PM info available */
25972 tboot_shutdown(TB_SHUTDOWN_HALT);
25973+ unreachable();
25974 }
25975
25976-struct machine_ops machine_ops = {
25977+struct machine_ops machine_ops __read_only = {
25978 .power_off = native_machine_power_off,
25979 .shutdown = native_machine_shutdown,
25980 .emergency_restart = native_machine_emergency_restart,
25981diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
25982index c8e41e9..64049ef 100644
25983--- a/arch/x86/kernel/reboot_fixups_32.c
25984+++ b/arch/x86/kernel/reboot_fixups_32.c
25985@@ -57,7 +57,7 @@ struct device_fixup {
25986 unsigned int vendor;
25987 unsigned int device;
25988 void (*reboot_fixup)(struct pci_dev *);
25989-};
25990+} __do_const;
25991
25992 /*
25993 * PCI ids solely used for fixups_table go here
25994diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
25995index 3fd2c69..16ef367 100644
25996--- a/arch/x86/kernel/relocate_kernel_64.S
25997+++ b/arch/x86/kernel/relocate_kernel_64.S
25998@@ -11,6 +11,7 @@
25999 #include <asm/kexec.h>
26000 #include <asm/processor-flags.h>
26001 #include <asm/pgtable_types.h>
26002+#include <asm/alternative-asm.h>
26003
26004 /*
26005 * Must be relocatable PIC code callable as a C function
26006@@ -96,8 +97,7 @@ relocate_kernel:
26007
26008 /* jump to identity mapped page */
26009 addq $(identity_mapped - relocate_kernel), %r8
26010- pushq %r8
26011- ret
26012+ jmp *%r8
26013
26014 identity_mapped:
26015 /* set return address to 0 if not preserving context */
26016@@ -167,6 +167,7 @@ identity_mapped:
26017 xorl %r14d, %r14d
26018 xorl %r15d, %r15d
26019
26020+ pax_force_retaddr 0, 1
26021 ret
26022
26023 1:
26024diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
26025index cb233bc..23b4879 100644
26026--- a/arch/x86/kernel/setup.c
26027+++ b/arch/x86/kernel/setup.c
26028@@ -110,6 +110,7 @@
26029 #include <asm/mce.h>
26030 #include <asm/alternative.h>
26031 #include <asm/prom.h>
26032+#include <asm/boot.h>
26033
26034 /*
26035 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
26036@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
26037 #endif
26038
26039
26040-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
26041-__visible unsigned long mmu_cr4_features;
26042+#ifdef CONFIG_X86_64
26043+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
26044+#elif defined(CONFIG_X86_PAE)
26045+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
26046 #else
26047-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
26048+__visible unsigned long mmu_cr4_features __read_only;
26049 #endif
26050
26051+void set_in_cr4(unsigned long mask)
26052+{
26053+ unsigned long cr4 = read_cr4();
26054+
26055+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
26056+ return;
26057+
26058+ pax_open_kernel();
26059+ mmu_cr4_features |= mask;
26060+ pax_close_kernel();
26061+
26062+ if (trampoline_cr4_features)
26063+ *trampoline_cr4_features = mmu_cr4_features;
26064+ cr4 |= mask;
26065+ write_cr4(cr4);
26066+}
26067+EXPORT_SYMBOL(set_in_cr4);
26068+
26069+void clear_in_cr4(unsigned long mask)
26070+{
26071+ unsigned long cr4 = read_cr4();
26072+
26073+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
26074+ return;
26075+
26076+ pax_open_kernel();
26077+ mmu_cr4_features &= ~mask;
26078+ pax_close_kernel();
26079+
26080+ if (trampoline_cr4_features)
26081+ *trampoline_cr4_features = mmu_cr4_features;
26082+ cr4 &= ~mask;
26083+ write_cr4(cr4);
26084+}
26085+EXPORT_SYMBOL(clear_in_cr4);
26086+
26087 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
26088 int bootloader_type, bootloader_version;
26089
26090@@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
26091 * area (640->1Mb) as ram even though it is not.
26092 * take them out.
26093 */
26094- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
26095+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
26096
26097 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
26098 }
26099@@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
26100 /* called before trim_bios_range() to spare extra sanitize */
26101 static void __init e820_add_kernel_range(void)
26102 {
26103- u64 start = __pa_symbol(_text);
26104+ u64 start = __pa_symbol(ktla_ktva(_text));
26105 u64 size = __pa_symbol(_end) - start;
26106
26107 /*
26108@@ -838,8 +877,12 @@ static void __init trim_low_memory_range(void)
26109
26110 void __init setup_arch(char **cmdline_p)
26111 {
26112+#ifdef CONFIG_X86_32
26113+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
26114+#else
26115 memblock_reserve(__pa_symbol(_text),
26116 (unsigned long)__bss_stop - (unsigned long)_text);
26117+#endif
26118
26119 early_reserve_initrd();
26120
26121@@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
26122
26123 if (!boot_params.hdr.root_flags)
26124 root_mountflags &= ~MS_RDONLY;
26125- init_mm.start_code = (unsigned long) _text;
26126- init_mm.end_code = (unsigned long) _etext;
26127+ init_mm.start_code = ktla_ktva((unsigned long) _text);
26128+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
26129 init_mm.end_data = (unsigned long) _edata;
26130 init_mm.brk = _brk_end;
26131
26132- code_resource.start = __pa_symbol(_text);
26133- code_resource.end = __pa_symbol(_etext)-1;
26134- data_resource.start = __pa_symbol(_etext);
26135+ code_resource.start = __pa_symbol(ktla_ktva(_text));
26136+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
26137+ data_resource.start = __pa_symbol(_sdata);
26138 data_resource.end = __pa_symbol(_edata)-1;
26139 bss_resource.start = __pa_symbol(__bss_start);
26140 bss_resource.end = __pa_symbol(__bss_stop)-1;
26141diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
26142index 5cdff03..80fa283 100644
26143--- a/arch/x86/kernel/setup_percpu.c
26144+++ b/arch/x86/kernel/setup_percpu.c
26145@@ -21,19 +21,17 @@
26146 #include <asm/cpu.h>
26147 #include <asm/stackprotector.h>
26148
26149-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
26150+#ifdef CONFIG_SMP
26151+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
26152 EXPORT_PER_CPU_SYMBOL(cpu_number);
26153+#endif
26154
26155-#ifdef CONFIG_X86_64
26156 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
26157-#else
26158-#define BOOT_PERCPU_OFFSET 0
26159-#endif
26160
26161 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
26162 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
26163
26164-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
26165+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
26166 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
26167 };
26168 EXPORT_SYMBOL(__per_cpu_offset);
26169@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
26170 {
26171 #ifdef CONFIG_NEED_MULTIPLE_NODES
26172 pg_data_t *last = NULL;
26173- unsigned int cpu;
26174+ int cpu;
26175
26176 for_each_possible_cpu(cpu) {
26177 int node = early_cpu_to_node(cpu);
26178@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
26179 {
26180 #ifdef CONFIG_X86_32
26181 struct desc_struct gdt;
26182+ unsigned long base = per_cpu_offset(cpu);
26183
26184- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
26185- 0x2 | DESCTYPE_S, 0x8);
26186- gdt.s = 1;
26187+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
26188+ 0x83 | DESCTYPE_S, 0xC);
26189 write_gdt_entry(get_cpu_gdt_table(cpu),
26190 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26191 #endif
26192@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
26193 /* alrighty, percpu areas up and running */
26194 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
26195 for_each_possible_cpu(cpu) {
26196+#ifdef CONFIG_CC_STACKPROTECTOR
26197+#ifdef CONFIG_X86_32
26198+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
26199+#endif
26200+#endif
26201 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26202 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
26203 per_cpu(cpu_number, cpu) = cpu;
26204@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
26205 */
26206 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
26207 #endif
26208+#ifdef CONFIG_CC_STACKPROTECTOR
26209+#ifdef CONFIG_X86_32
26210+ if (!cpu)
26211+ per_cpu(stack_canary.canary, cpu) = canary;
26212+#endif
26213+#endif
26214 /*
26215 * Up to this point, the boot CPU has been using .init.data
26216 * area. Reload any changed state for the boot CPU.
26217diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
26218index 9e5de68..16c53cb 100644
26219--- a/arch/x86/kernel/signal.c
26220+++ b/arch/x86/kernel/signal.c
26221@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
26222 * Align the stack pointer according to the i386 ABI,
26223 * i.e. so that on function entry ((sp + 4) & 15) == 0.
26224 */
26225- sp = ((sp + 4) & -16ul) - 4;
26226+ sp = ((sp - 12) & -16ul) - 4;
26227 #else /* !CONFIG_X86_32 */
26228 sp = round_down(sp, 16) - 8;
26229 #endif
26230@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26231 }
26232
26233 if (current->mm->context.vdso)
26234- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26235+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26236 else
26237- restorer = &frame->retcode;
26238+ restorer = (void __user *)&frame->retcode;
26239 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26240 restorer = ksig->ka.sa.sa_restorer;
26241
26242@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26243 * reasons and because gdb uses it as a signature to notice
26244 * signal handler stack frames.
26245 */
26246- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
26247+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
26248
26249 if (err)
26250 return -EFAULT;
26251@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26252 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
26253
26254 /* Set up to return from userspace. */
26255- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26256+ if (current->mm->context.vdso)
26257+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26258+ else
26259+ restorer = (void __user *)&frame->retcode;
26260 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26261 restorer = ksig->ka.sa.sa_restorer;
26262 put_user_ex(restorer, &frame->pretcode);
26263@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26264 * reasons and because gdb uses it as a signature to notice
26265 * signal handler stack frames.
26266 */
26267- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
26268+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
26269 } put_user_catch(err);
26270
26271 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
26272@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26273 {
26274 int usig = signr_convert(ksig->sig);
26275 sigset_t *set = sigmask_to_save();
26276- compat_sigset_t *cset = (compat_sigset_t *) set;
26277+ sigset_t sigcopy;
26278+ compat_sigset_t *cset;
26279+
26280+ sigcopy = *set;
26281+
26282+ cset = (compat_sigset_t *) &sigcopy;
26283
26284 /* Set up the stack frame */
26285 if (is_ia32_frame()) {
26286@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26287 } else if (is_x32_frame()) {
26288 return x32_setup_rt_frame(ksig, cset, regs);
26289 } else {
26290- return __setup_rt_frame(ksig->sig, ksig, set, regs);
26291+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
26292 }
26293 }
26294
26295diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
26296index 7c3a5a6..f0a8961 100644
26297--- a/arch/x86/kernel/smp.c
26298+++ b/arch/x86/kernel/smp.c
26299@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
26300
26301 __setup("nonmi_ipi", nonmi_ipi_setup);
26302
26303-struct smp_ops smp_ops = {
26304+struct smp_ops smp_ops __read_only = {
26305 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
26306 .smp_prepare_cpus = native_smp_prepare_cpus,
26307 .smp_cpus_done = native_smp_cpus_done,
26308diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
26309index 85dc05a..1241266 100644
26310--- a/arch/x86/kernel/smpboot.c
26311+++ b/arch/x86/kernel/smpboot.c
26312@@ -229,14 +229,18 @@ static void notrace start_secondary(void *unused)
26313
26314 enable_start_cpu0 = 0;
26315
26316-#ifdef CONFIG_X86_32
26317- /* switch away from the initial page table */
26318- load_cr3(swapper_pg_dir);
26319- __flush_tlb_all();
26320-#endif
26321-
26322 /* otherwise gcc will move up smp_processor_id before the cpu_init */
26323 barrier();
26324+
26325+ /* switch away from the initial page table */
26326+#ifdef CONFIG_PAX_PER_CPU_PGD
26327+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
26328+ __flush_tlb_all();
26329+#elif defined(CONFIG_X86_32)
26330+ load_cr3(swapper_pg_dir);
26331+ __flush_tlb_all();
26332+#endif
26333+
26334 /*
26335 * Check TSC synchronization with the BP:
26336 */
26337@@ -751,6 +755,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26338 idle->thread.sp = (unsigned long) (((struct pt_regs *)
26339 (THREAD_SIZE + task_stack_page(idle))) - 1);
26340 per_cpu(current_task, cpu) = idle;
26341+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26342
26343 #ifdef CONFIG_X86_32
26344 /* Stack for startup_32 can be just as for start_secondary onwards */
26345@@ -758,11 +763,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26346 #else
26347 clear_tsk_thread_flag(idle, TIF_FORK);
26348 initial_gs = per_cpu_offset(cpu);
26349- per_cpu(kernel_stack, cpu) =
26350- (unsigned long)task_stack_page(idle) -
26351- KERNEL_STACK_OFFSET + THREAD_SIZE;
26352+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26353 #endif
26354+
26355+ pax_open_kernel();
26356 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
26357+ pax_close_kernel();
26358+
26359 initial_code = (unsigned long)start_secondary;
26360 stack_start = idle->thread.sp;
26361
26362@@ -911,6 +918,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
26363 /* the FPU context is blank, nobody can own it */
26364 __cpu_disable_lazy_restore(cpu);
26365
26366+#ifdef CONFIG_PAX_PER_CPU_PGD
26367+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
26368+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26369+ KERNEL_PGD_PTRS);
26370+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
26371+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26372+ KERNEL_PGD_PTRS);
26373+#endif
26374+
26375 err = do_boot_cpu(apicid, cpu, tidle);
26376 if (err) {
26377 pr_debug("do_boot_cpu failed %d\n", err);
26378diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
26379index 9b4d51d..5d28b58 100644
26380--- a/arch/x86/kernel/step.c
26381+++ b/arch/x86/kernel/step.c
26382@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26383 struct desc_struct *desc;
26384 unsigned long base;
26385
26386- seg &= ~7UL;
26387+ seg >>= 3;
26388
26389 mutex_lock(&child->mm->context.lock);
26390- if (unlikely((seg >> 3) >= child->mm->context.size))
26391+ if (unlikely(seg >= child->mm->context.size))
26392 addr = -1L; /* bogus selector, access would fault */
26393 else {
26394 desc = child->mm->context.ldt + seg;
26395@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26396 addr += base;
26397 }
26398 mutex_unlock(&child->mm->context.lock);
26399- }
26400+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
26401+ addr = ktla_ktva(addr);
26402
26403 return addr;
26404 }
26405@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
26406 unsigned char opcode[15];
26407 unsigned long addr = convert_ip_to_linear(child, regs);
26408
26409+ if (addr == -EINVAL)
26410+ return 0;
26411+
26412 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
26413 for (i = 0; i < copied; i++) {
26414 switch (opcode[i]) {
26415diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
26416new file mode 100644
26417index 0000000..5877189
26418--- /dev/null
26419+++ b/arch/x86/kernel/sys_i386_32.c
26420@@ -0,0 +1,189 @@
26421+/*
26422+ * This file contains various random system calls that
26423+ * have a non-standard calling sequence on the Linux/i386
26424+ * platform.
26425+ */
26426+
26427+#include <linux/errno.h>
26428+#include <linux/sched.h>
26429+#include <linux/mm.h>
26430+#include <linux/fs.h>
26431+#include <linux/smp.h>
26432+#include <linux/sem.h>
26433+#include <linux/msg.h>
26434+#include <linux/shm.h>
26435+#include <linux/stat.h>
26436+#include <linux/syscalls.h>
26437+#include <linux/mman.h>
26438+#include <linux/file.h>
26439+#include <linux/utsname.h>
26440+#include <linux/ipc.h>
26441+#include <linux/elf.h>
26442+
26443+#include <linux/uaccess.h>
26444+#include <linux/unistd.h>
26445+
26446+#include <asm/syscalls.h>
26447+
26448+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
26449+{
26450+ unsigned long pax_task_size = TASK_SIZE;
26451+
26452+#ifdef CONFIG_PAX_SEGMEXEC
26453+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
26454+ pax_task_size = SEGMEXEC_TASK_SIZE;
26455+#endif
26456+
26457+ if (flags & MAP_FIXED)
26458+ if (len > pax_task_size || addr > pax_task_size - len)
26459+ return -EINVAL;
26460+
26461+ return 0;
26462+}
26463+
26464+/*
26465+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
26466+ */
26467+static unsigned long get_align_mask(void)
26468+{
26469+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
26470+ return 0;
26471+
26472+ if (!(current->flags & PF_RANDOMIZE))
26473+ return 0;
26474+
26475+ return va_align.mask;
26476+}
26477+
26478+unsigned long
26479+arch_get_unmapped_area(struct file *filp, unsigned long addr,
26480+ unsigned long len, unsigned long pgoff, unsigned long flags)
26481+{
26482+ struct mm_struct *mm = current->mm;
26483+ struct vm_area_struct *vma;
26484+ unsigned long pax_task_size = TASK_SIZE;
26485+ struct vm_unmapped_area_info info;
26486+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26487+
26488+#ifdef CONFIG_PAX_SEGMEXEC
26489+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26490+ pax_task_size = SEGMEXEC_TASK_SIZE;
26491+#endif
26492+
26493+ pax_task_size -= PAGE_SIZE;
26494+
26495+ if (len > pax_task_size)
26496+ return -ENOMEM;
26497+
26498+ if (flags & MAP_FIXED)
26499+ return addr;
26500+
26501+#ifdef CONFIG_PAX_RANDMMAP
26502+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26503+#endif
26504+
26505+ if (addr) {
26506+ addr = PAGE_ALIGN(addr);
26507+ if (pax_task_size - len >= addr) {
26508+ vma = find_vma(mm, addr);
26509+ if (check_heap_stack_gap(vma, addr, len, offset))
26510+ return addr;
26511+ }
26512+ }
26513+
26514+ info.flags = 0;
26515+ info.length = len;
26516+ info.align_mask = filp ? get_align_mask() : 0;
26517+ info.align_offset = pgoff << PAGE_SHIFT;
26518+ info.threadstack_offset = offset;
26519+
26520+#ifdef CONFIG_PAX_PAGEEXEC
26521+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
26522+ info.low_limit = 0x00110000UL;
26523+ info.high_limit = mm->start_code;
26524+
26525+#ifdef CONFIG_PAX_RANDMMAP
26526+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26527+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
26528+#endif
26529+
26530+ if (info.low_limit < info.high_limit) {
26531+ addr = vm_unmapped_area(&info);
26532+ if (!IS_ERR_VALUE(addr))
26533+ return addr;
26534+ }
26535+ } else
26536+#endif
26537+
26538+ info.low_limit = mm->mmap_base;
26539+ info.high_limit = pax_task_size;
26540+
26541+ return vm_unmapped_area(&info);
26542+}
26543+
26544+unsigned long
26545+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26546+ const unsigned long len, const unsigned long pgoff,
26547+ const unsigned long flags)
26548+{
26549+ struct vm_area_struct *vma;
26550+ struct mm_struct *mm = current->mm;
26551+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
26552+ struct vm_unmapped_area_info info;
26553+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26554+
26555+#ifdef CONFIG_PAX_SEGMEXEC
26556+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26557+ pax_task_size = SEGMEXEC_TASK_SIZE;
26558+#endif
26559+
26560+ pax_task_size -= PAGE_SIZE;
26561+
26562+ /* requested length too big for entire address space */
26563+ if (len > pax_task_size)
26564+ return -ENOMEM;
26565+
26566+ if (flags & MAP_FIXED)
26567+ return addr;
26568+
26569+#ifdef CONFIG_PAX_PAGEEXEC
26570+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
26571+ goto bottomup;
26572+#endif
26573+
26574+#ifdef CONFIG_PAX_RANDMMAP
26575+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26576+#endif
26577+
26578+ /* requesting a specific address */
26579+ if (addr) {
26580+ addr = PAGE_ALIGN(addr);
26581+ if (pax_task_size - len >= addr) {
26582+ vma = find_vma(mm, addr);
26583+ if (check_heap_stack_gap(vma, addr, len, offset))
26584+ return addr;
26585+ }
26586+ }
26587+
26588+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
26589+ info.length = len;
26590+ info.low_limit = PAGE_SIZE;
26591+ info.high_limit = mm->mmap_base;
26592+ info.align_mask = filp ? get_align_mask() : 0;
26593+ info.align_offset = pgoff << PAGE_SHIFT;
26594+ info.threadstack_offset = offset;
26595+
26596+ addr = vm_unmapped_area(&info);
26597+ if (!(addr & ~PAGE_MASK))
26598+ return addr;
26599+ VM_BUG_ON(addr != -ENOMEM);
26600+
26601+bottomup:
26602+ /*
26603+ * A failed mmap() very likely causes application failure,
26604+ * so fall back to the bottom-up function here. This scenario
26605+ * can happen with large stack limits and large mmap()
26606+ * allocations.
26607+ */
26608+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
26609+}
26610diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
26611index 30277e2..5664a29 100644
26612--- a/arch/x86/kernel/sys_x86_64.c
26613+++ b/arch/x86/kernel/sys_x86_64.c
26614@@ -81,8 +81,8 @@ out:
26615 return error;
26616 }
26617
26618-static void find_start_end(unsigned long flags, unsigned long *begin,
26619- unsigned long *end)
26620+static void find_start_end(struct mm_struct *mm, unsigned long flags,
26621+ unsigned long *begin, unsigned long *end)
26622 {
26623 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
26624 unsigned long new_begin;
26625@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
26626 *begin = new_begin;
26627 }
26628 } else {
26629- *begin = current->mm->mmap_legacy_base;
26630+ *begin = mm->mmap_legacy_base;
26631 *end = TASK_SIZE;
26632 }
26633 }
26634@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26635 struct vm_area_struct *vma;
26636 struct vm_unmapped_area_info info;
26637 unsigned long begin, end;
26638+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26639
26640 if (flags & MAP_FIXED)
26641 return addr;
26642
26643- find_start_end(flags, &begin, &end);
26644+ find_start_end(mm, flags, &begin, &end);
26645
26646 if (len > end)
26647 return -ENOMEM;
26648
26649+#ifdef CONFIG_PAX_RANDMMAP
26650+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26651+#endif
26652+
26653 if (addr) {
26654 addr = PAGE_ALIGN(addr);
26655 vma = find_vma(mm, addr);
26656- if (end - len >= addr &&
26657- (!vma || addr + len <= vma->vm_start))
26658+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26659 return addr;
26660 }
26661
26662@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26663 info.high_limit = end;
26664 info.align_mask = filp ? get_align_mask() : 0;
26665 info.align_offset = pgoff << PAGE_SHIFT;
26666+ info.threadstack_offset = offset;
26667 return vm_unmapped_area(&info);
26668 }
26669
26670@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26671 struct mm_struct *mm = current->mm;
26672 unsigned long addr = addr0;
26673 struct vm_unmapped_area_info info;
26674+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26675
26676 /* requested length too big for entire address space */
26677 if (len > TASK_SIZE)
26678@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26679 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
26680 goto bottomup;
26681
26682+#ifdef CONFIG_PAX_RANDMMAP
26683+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26684+#endif
26685+
26686 /* requesting a specific address */
26687 if (addr) {
26688 addr = PAGE_ALIGN(addr);
26689 vma = find_vma(mm, addr);
26690- if (TASK_SIZE - len >= addr &&
26691- (!vma || addr + len <= vma->vm_start))
26692+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26693 return addr;
26694 }
26695
26696@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26697 info.high_limit = mm->mmap_base;
26698 info.align_mask = filp ? get_align_mask() : 0;
26699 info.align_offset = pgoff << PAGE_SHIFT;
26700+ info.threadstack_offset = offset;
26701 addr = vm_unmapped_area(&info);
26702 if (!(addr & ~PAGE_MASK))
26703 return addr;
26704diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
26705index 91a4496..bb87552 100644
26706--- a/arch/x86/kernel/tboot.c
26707+++ b/arch/x86/kernel/tboot.c
26708@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
26709
26710 void tboot_shutdown(u32 shutdown_type)
26711 {
26712- void (*shutdown)(void);
26713+ void (* __noreturn shutdown)(void);
26714
26715 if (!tboot_enabled())
26716 return;
26717@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
26718
26719 switch_to_tboot_pt();
26720
26721- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
26722+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
26723 shutdown();
26724
26725 /* should not reach here */
26726@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
26727 return -ENODEV;
26728 }
26729
26730-static atomic_t ap_wfs_count;
26731+static atomic_unchecked_t ap_wfs_count;
26732
26733 static int tboot_wait_for_aps(int num_aps)
26734 {
26735@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
26736 {
26737 switch (action) {
26738 case CPU_DYING:
26739- atomic_inc(&ap_wfs_count);
26740+ atomic_inc_unchecked(&ap_wfs_count);
26741 if (num_online_cpus() == 1)
26742- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
26743+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
26744 return NOTIFY_BAD;
26745 break;
26746 }
26747@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
26748
26749 tboot_create_trampoline();
26750
26751- atomic_set(&ap_wfs_count, 0);
26752+ atomic_set_unchecked(&ap_wfs_count, 0);
26753 register_hotcpu_notifier(&tboot_cpu_notifier);
26754
26755 #ifdef CONFIG_DEBUG_FS
26756diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
26757index 24d3c91..d06b473 100644
26758--- a/arch/x86/kernel/time.c
26759+++ b/arch/x86/kernel/time.c
26760@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
26761 {
26762 unsigned long pc = instruction_pointer(regs);
26763
26764- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
26765+ if (!user_mode(regs) && in_lock_functions(pc)) {
26766 #ifdef CONFIG_FRAME_POINTER
26767- return *(unsigned long *)(regs->bp + sizeof(long));
26768+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
26769 #else
26770 unsigned long *sp =
26771 (unsigned long *)kernel_stack_pointer(regs);
26772@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
26773 * or above a saved flags. Eflags has bits 22-31 zero,
26774 * kernel addresses don't.
26775 */
26776+
26777+#ifdef CONFIG_PAX_KERNEXEC
26778+ return ktla_ktva(sp[0]);
26779+#else
26780 if (sp[0] >> 22)
26781 return sp[0];
26782 if (sp[1] >> 22)
26783 return sp[1];
26784 #endif
26785+
26786+#endif
26787 }
26788 return pc;
26789 }
26790diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
26791index f7fec09..9991981 100644
26792--- a/arch/x86/kernel/tls.c
26793+++ b/arch/x86/kernel/tls.c
26794@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
26795 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
26796 return -EINVAL;
26797
26798+#ifdef CONFIG_PAX_SEGMEXEC
26799+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
26800+ return -EINVAL;
26801+#endif
26802+
26803 set_tls_desc(p, idx, &info, 1);
26804
26805 return 0;
26806@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
26807
26808 if (kbuf)
26809 info = kbuf;
26810- else if (__copy_from_user(infobuf, ubuf, count))
26811+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
26812 return -EFAULT;
26813 else
26814 info = infobuf;
26815diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
26816index 1c113db..287b42e 100644
26817--- a/arch/x86/kernel/tracepoint.c
26818+++ b/arch/x86/kernel/tracepoint.c
26819@@ -9,11 +9,11 @@
26820 #include <linux/atomic.h>
26821
26822 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
26823-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26824+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26825 (unsigned long) trace_idt_table };
26826
26827 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26828-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
26829+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
26830
26831 static int trace_irq_vector_refcount;
26832 static DEFINE_MUTEX(irq_vector_mutex);
26833diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
26834index b857ed8..51ae4cb 100644
26835--- a/arch/x86/kernel/traps.c
26836+++ b/arch/x86/kernel/traps.c
26837@@ -66,7 +66,7 @@
26838 #include <asm/proto.h>
26839
26840 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26841-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
26842+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
26843 #else
26844 #include <asm/processor-flags.h>
26845 #include <asm/setup.h>
26846@@ -75,7 +75,7 @@ asmlinkage int system_call(void);
26847 #endif
26848
26849 /* Must be page-aligned because the real IDT is used in a fixmap. */
26850-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
26851+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
26852
26853 DECLARE_BITMAP(used_vectors, NR_VECTORS);
26854 EXPORT_SYMBOL_GPL(used_vectors);
26855@@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
26856 }
26857
26858 static int __kprobes
26859-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26860+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
26861 struct pt_regs *regs, long error_code)
26862 {
26863 #ifdef CONFIG_X86_32
26864- if (regs->flags & X86_VM_MASK) {
26865+ if (v8086_mode(regs)) {
26866 /*
26867 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
26868 * On nmi (interrupt 2), do_trap should not be called.
26869@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26870 return -1;
26871 }
26872 #endif
26873- if (!user_mode(regs)) {
26874+ if (!user_mode_novm(regs)) {
26875 if (!fixup_exception(regs)) {
26876 tsk->thread.error_code = error_code;
26877 tsk->thread.trap_nr = trapnr;
26878+
26879+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26880+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
26881+ str = "PAX: suspicious stack segment fault";
26882+#endif
26883+
26884 die(str, regs, error_code);
26885 }
26886+
26887+#ifdef CONFIG_PAX_REFCOUNT
26888+ if (trapnr == 4)
26889+ pax_report_refcount_overflow(regs);
26890+#endif
26891+
26892 return 0;
26893 }
26894
26895@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26896 }
26897
26898 static void __kprobes
26899-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26900+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
26901 long error_code, siginfo_t *info)
26902 {
26903 struct task_struct *tsk = current;
26904@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26905 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
26906 printk_ratelimit()) {
26907 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
26908- tsk->comm, tsk->pid, str,
26909+ tsk->comm, task_pid_nr(tsk), str,
26910 regs->ip, regs->sp, error_code);
26911 print_vma_addr(" in ", regs->ip);
26912 pr_cont("\n");
26913@@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
26914 conditional_sti(regs);
26915
26916 #ifdef CONFIG_X86_32
26917- if (regs->flags & X86_VM_MASK) {
26918+ if (v8086_mode(regs)) {
26919 local_irq_enable();
26920 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
26921 goto exit;
26922@@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
26923 #endif
26924
26925 tsk = current;
26926- if (!user_mode(regs)) {
26927+ if (!user_mode_novm(regs)) {
26928 if (fixup_exception(regs))
26929 goto exit;
26930
26931 tsk->thread.error_code = error_code;
26932 tsk->thread.trap_nr = X86_TRAP_GP;
26933 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
26934- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
26935+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
26936+
26937+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26938+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
26939+ die("PAX: suspicious general protection fault", regs, error_code);
26940+ else
26941+#endif
26942+
26943 die("general protection fault", regs, error_code);
26944+ }
26945 goto exit;
26946 }
26947
26948+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26949+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
26950+ struct mm_struct *mm = tsk->mm;
26951+ unsigned long limit;
26952+
26953+ down_write(&mm->mmap_sem);
26954+ limit = mm->context.user_cs_limit;
26955+ if (limit < TASK_SIZE) {
26956+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
26957+ up_write(&mm->mmap_sem);
26958+ return;
26959+ }
26960+ up_write(&mm->mmap_sem);
26961+ }
26962+#endif
26963+
26964 tsk->thread.error_code = error_code;
26965 tsk->thread.trap_nr = X86_TRAP_GP;
26966
26967@@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26968 /* It's safe to allow irq's after DR6 has been saved */
26969 preempt_conditional_sti(regs);
26970
26971- if (regs->flags & X86_VM_MASK) {
26972+ if (v8086_mode(regs)) {
26973 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
26974 X86_TRAP_DB);
26975 preempt_conditional_cli(regs);
26976@@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26977 * We already checked v86 mode above, so we can check for kernel mode
26978 * by just checking the CPL of CS.
26979 */
26980- if ((dr6 & DR_STEP) && !user_mode(regs)) {
26981+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
26982 tsk->thread.debugreg6 &= ~DR_STEP;
26983 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
26984 regs->flags &= ~X86_EFLAGS_TF;
26985@@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
26986 return;
26987 conditional_sti(regs);
26988
26989- if (!user_mode_vm(regs))
26990+ if (!user_mode(regs))
26991 {
26992 if (!fixup_exception(regs)) {
26993 task->thread.error_code = error_code;
26994diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
26995index 2ed8459..7cf329f 100644
26996--- a/arch/x86/kernel/uprobes.c
26997+++ b/arch/x86/kernel/uprobes.c
26998@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
26999 int ret = NOTIFY_DONE;
27000
27001 /* We are only interested in userspace traps */
27002- if (regs && !user_mode_vm(regs))
27003+ if (regs && !user_mode(regs))
27004 return NOTIFY_DONE;
27005
27006 switch (val) {
27007@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
27008
27009 if (ncopied != rasize) {
27010 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
27011- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
27012+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
27013
27014 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
27015 }
27016diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
27017index b9242ba..50c5edd 100644
27018--- a/arch/x86/kernel/verify_cpu.S
27019+++ b/arch/x86/kernel/verify_cpu.S
27020@@ -20,6 +20,7 @@
27021 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
27022 * arch/x86/kernel/trampoline_64.S: secondary processor verification
27023 * arch/x86/kernel/head_32.S: processor startup
27024+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
27025 *
27026 * verify_cpu, returns the status of longmode and SSE in register %eax.
27027 * 0: Success 1: Failure
27028diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
27029index e8edcf5..27f9344 100644
27030--- a/arch/x86/kernel/vm86_32.c
27031+++ b/arch/x86/kernel/vm86_32.c
27032@@ -44,6 +44,7 @@
27033 #include <linux/ptrace.h>
27034 #include <linux/audit.h>
27035 #include <linux/stddef.h>
27036+#include <linux/grsecurity.h>
27037
27038 #include <asm/uaccess.h>
27039 #include <asm/io.h>
27040@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
27041 do_exit(SIGSEGV);
27042 }
27043
27044- tss = &per_cpu(init_tss, get_cpu());
27045+ tss = init_tss + get_cpu();
27046 current->thread.sp0 = current->thread.saved_sp0;
27047 current->thread.sysenter_cs = __KERNEL_CS;
27048 load_sp0(tss, &current->thread);
27049@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
27050
27051 if (tsk->thread.saved_sp0)
27052 return -EPERM;
27053+
27054+#ifdef CONFIG_GRKERNSEC_VM86
27055+ if (!capable(CAP_SYS_RAWIO)) {
27056+ gr_handle_vm86();
27057+ return -EPERM;
27058+ }
27059+#endif
27060+
27061 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
27062 offsetof(struct kernel_vm86_struct, vm86plus) -
27063 sizeof(info.regs));
27064@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
27065 int tmp;
27066 struct vm86plus_struct __user *v86;
27067
27068+#ifdef CONFIG_GRKERNSEC_VM86
27069+ if (!capable(CAP_SYS_RAWIO)) {
27070+ gr_handle_vm86();
27071+ return -EPERM;
27072+ }
27073+#endif
27074+
27075 tsk = current;
27076 switch (cmd) {
27077 case VM86_REQUEST_IRQ:
27078@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
27079 tsk->thread.saved_fs = info->regs32->fs;
27080 tsk->thread.saved_gs = get_user_gs(info->regs32);
27081
27082- tss = &per_cpu(init_tss, get_cpu());
27083+ tss = init_tss + get_cpu();
27084 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
27085 if (cpu_has_sep)
27086 tsk->thread.sysenter_cs = 0;
27087@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
27088 goto cannot_handle;
27089 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
27090 goto cannot_handle;
27091- intr_ptr = (unsigned long __user *) (i << 2);
27092+ intr_ptr = (__force unsigned long __user *) (i << 2);
27093 if (get_user(segoffs, intr_ptr))
27094 goto cannot_handle;
27095 if ((segoffs >> 16) == BIOSSEG)
27096diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
27097index da6b35a..977e9cf 100644
27098--- a/arch/x86/kernel/vmlinux.lds.S
27099+++ b/arch/x86/kernel/vmlinux.lds.S
27100@@ -26,6 +26,13 @@
27101 #include <asm/page_types.h>
27102 #include <asm/cache.h>
27103 #include <asm/boot.h>
27104+#include <asm/segment.h>
27105+
27106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27107+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
27108+#else
27109+#define __KERNEL_TEXT_OFFSET 0
27110+#endif
27111
27112 #undef i386 /* in case the preprocessor is a 32bit one */
27113
27114@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
27115
27116 PHDRS {
27117 text PT_LOAD FLAGS(5); /* R_E */
27118+#ifdef CONFIG_X86_32
27119+ module PT_LOAD FLAGS(5); /* R_E */
27120+#endif
27121+#ifdef CONFIG_XEN
27122+ rodata PT_LOAD FLAGS(5); /* R_E */
27123+#else
27124+ rodata PT_LOAD FLAGS(4); /* R__ */
27125+#endif
27126 data PT_LOAD FLAGS(6); /* RW_ */
27127-#ifdef CONFIG_X86_64
27128+ init.begin PT_LOAD FLAGS(6); /* RW_ */
27129 #ifdef CONFIG_SMP
27130 percpu PT_LOAD FLAGS(6); /* RW_ */
27131 #endif
27132+ text.init PT_LOAD FLAGS(5); /* R_E */
27133+ text.exit PT_LOAD FLAGS(5); /* R_E */
27134 init PT_LOAD FLAGS(7); /* RWE */
27135-#endif
27136 note PT_NOTE FLAGS(0); /* ___ */
27137 }
27138
27139 SECTIONS
27140 {
27141 #ifdef CONFIG_X86_32
27142- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
27143- phys_startup_32 = startup_32 - LOAD_OFFSET;
27144+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
27145 #else
27146- . = __START_KERNEL;
27147- phys_startup_64 = startup_64 - LOAD_OFFSET;
27148+ . = __START_KERNEL;
27149 #endif
27150
27151 /* Text and read-only data */
27152- .text : AT(ADDR(.text) - LOAD_OFFSET) {
27153- _text = .;
27154+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27155 /* bootstrapping code */
27156+#ifdef CONFIG_X86_32
27157+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27158+#else
27159+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27160+#endif
27161+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27162+ _text = .;
27163 HEAD_TEXT
27164 . = ALIGN(8);
27165 _stext = .;
27166@@ -104,13 +124,47 @@ SECTIONS
27167 IRQENTRY_TEXT
27168 *(.fixup)
27169 *(.gnu.warning)
27170- /* End of text section */
27171- _etext = .;
27172 } :text = 0x9090
27173
27174- NOTES :text :note
27175+ . += __KERNEL_TEXT_OFFSET;
27176
27177- EXCEPTION_TABLE(16) :text = 0x9090
27178+#ifdef CONFIG_X86_32
27179+ . = ALIGN(PAGE_SIZE);
27180+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
27181+
27182+#ifdef CONFIG_PAX_KERNEXEC
27183+ MODULES_EXEC_VADDR = .;
27184+ BYTE(0)
27185+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
27186+ . = ALIGN(HPAGE_SIZE) - 1;
27187+ MODULES_EXEC_END = .;
27188+#endif
27189+
27190+ } :module
27191+#endif
27192+
27193+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
27194+ /* End of text section */
27195+ BYTE(0)
27196+ _etext = . - __KERNEL_TEXT_OFFSET;
27197+ }
27198+
27199+#ifdef CONFIG_X86_32
27200+ . = ALIGN(PAGE_SIZE);
27201+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
27202+ . = ALIGN(PAGE_SIZE);
27203+ *(.empty_zero_page)
27204+ *(.initial_pg_fixmap)
27205+ *(.initial_pg_pmd)
27206+ *(.initial_page_table)
27207+ *(.swapper_pg_dir)
27208+ } :rodata
27209+#endif
27210+
27211+ . = ALIGN(PAGE_SIZE);
27212+ NOTES :rodata :note
27213+
27214+ EXCEPTION_TABLE(16) :rodata
27215
27216 #if defined(CONFIG_DEBUG_RODATA)
27217 /* .text should occupy whole number of pages */
27218@@ -122,16 +176,20 @@ SECTIONS
27219
27220 /* Data */
27221 .data : AT(ADDR(.data) - LOAD_OFFSET) {
27222+
27223+#ifdef CONFIG_PAX_KERNEXEC
27224+ . = ALIGN(HPAGE_SIZE);
27225+#else
27226+ . = ALIGN(PAGE_SIZE);
27227+#endif
27228+
27229 /* Start of data section */
27230 _sdata = .;
27231
27232 /* init_task */
27233 INIT_TASK_DATA(THREAD_SIZE)
27234
27235-#ifdef CONFIG_X86_32
27236- /* 32 bit has nosave before _edata */
27237 NOSAVE_DATA
27238-#endif
27239
27240 PAGE_ALIGNED_DATA(PAGE_SIZE)
27241
27242@@ -172,12 +230,19 @@ SECTIONS
27243 #endif /* CONFIG_X86_64 */
27244
27245 /* Init code and data - will be freed after init */
27246- . = ALIGN(PAGE_SIZE);
27247 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
27248+ BYTE(0)
27249+
27250+#ifdef CONFIG_PAX_KERNEXEC
27251+ . = ALIGN(HPAGE_SIZE);
27252+#else
27253+ . = ALIGN(PAGE_SIZE);
27254+#endif
27255+
27256 __init_begin = .; /* paired with __init_end */
27257- }
27258+ } :init.begin
27259
27260-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
27261+#ifdef CONFIG_SMP
27262 /*
27263 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
27264 * output PHDR, so the next output section - .init.text - should
27265@@ -186,12 +251,27 @@ SECTIONS
27266 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
27267 #endif
27268
27269- INIT_TEXT_SECTION(PAGE_SIZE)
27270-#ifdef CONFIG_X86_64
27271- :init
27272-#endif
27273+ . = ALIGN(PAGE_SIZE);
27274+ init_begin = .;
27275+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
27276+ VMLINUX_SYMBOL(_sinittext) = .;
27277+ INIT_TEXT
27278+ VMLINUX_SYMBOL(_einittext) = .;
27279+ . = ALIGN(PAGE_SIZE);
27280+ } :text.init
27281
27282- INIT_DATA_SECTION(16)
27283+ /*
27284+ * .exit.text is discard at runtime, not link time, to deal with
27285+ * references from .altinstructions and .eh_frame
27286+ */
27287+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27288+ EXIT_TEXT
27289+ . = ALIGN(16);
27290+ } :text.exit
27291+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
27292+
27293+ . = ALIGN(PAGE_SIZE);
27294+ INIT_DATA_SECTION(16) :init
27295
27296 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
27297 __x86_cpu_dev_start = .;
27298@@ -262,19 +342,12 @@ SECTIONS
27299 }
27300
27301 . = ALIGN(8);
27302- /*
27303- * .exit.text is discard at runtime, not link time, to deal with
27304- * references from .altinstructions and .eh_frame
27305- */
27306- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
27307- EXIT_TEXT
27308- }
27309
27310 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
27311 EXIT_DATA
27312 }
27313
27314-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
27315+#ifndef CONFIG_SMP
27316 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
27317 #endif
27318
27319@@ -293,16 +366,10 @@ SECTIONS
27320 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
27321 __smp_locks = .;
27322 *(.smp_locks)
27323- . = ALIGN(PAGE_SIZE);
27324 __smp_locks_end = .;
27325+ . = ALIGN(PAGE_SIZE);
27326 }
27327
27328-#ifdef CONFIG_X86_64
27329- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
27330- NOSAVE_DATA
27331- }
27332-#endif
27333-
27334 /* BSS */
27335 . = ALIGN(PAGE_SIZE);
27336 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
27337@@ -318,6 +385,7 @@ SECTIONS
27338 __brk_base = .;
27339 . += 64 * 1024; /* 64k alignment slop space */
27340 *(.brk_reservation) /* areas brk users have reserved */
27341+ . = ALIGN(HPAGE_SIZE);
27342 __brk_limit = .;
27343 }
27344
27345@@ -344,13 +412,12 @@ SECTIONS
27346 * for the boot processor.
27347 */
27348 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
27349-INIT_PER_CPU(gdt_page);
27350 INIT_PER_CPU(irq_stack_union);
27351
27352 /*
27353 * Build-time check on the image size:
27354 */
27355-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
27356+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
27357 "kernel image bigger than KERNEL_IMAGE_SIZE");
27358
27359 #ifdef CONFIG_SMP
27360diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
27361index 1f96f93..d5c8f7a 100644
27362--- a/arch/x86/kernel/vsyscall_64.c
27363+++ b/arch/x86/kernel/vsyscall_64.c
27364@@ -56,15 +56,13 @@
27365 DEFINE_VVAR(int, vgetcpu_mode);
27366 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
27367
27368-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
27369+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
27370
27371 static int __init vsyscall_setup(char *str)
27372 {
27373 if (str) {
27374 if (!strcmp("emulate", str))
27375 vsyscall_mode = EMULATE;
27376- else if (!strcmp("native", str))
27377- vsyscall_mode = NATIVE;
27378 else if (!strcmp("none", str))
27379 vsyscall_mode = NONE;
27380 else
27381@@ -323,8 +321,7 @@ do_ret:
27382 return true;
27383
27384 sigsegv:
27385- force_sig(SIGSEGV, current);
27386- return true;
27387+ do_group_exit(SIGKILL);
27388 }
27389
27390 /*
27391@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
27392 extern char __vvar_page;
27393 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
27394
27395- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
27396- vsyscall_mode == NATIVE
27397- ? PAGE_KERNEL_VSYSCALL
27398- : PAGE_KERNEL_VVAR);
27399+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
27400 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
27401 (unsigned long)VSYSCALL_START);
27402
27403diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
27404index 04068192..4d75aa6 100644
27405--- a/arch/x86/kernel/x8664_ksyms_64.c
27406+++ b/arch/x86/kernel/x8664_ksyms_64.c
27407@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
27408 EXPORT_SYMBOL(copy_user_generic_unrolled);
27409 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
27410 EXPORT_SYMBOL(__copy_user_nocache);
27411-EXPORT_SYMBOL(_copy_from_user);
27412-EXPORT_SYMBOL(_copy_to_user);
27413
27414 EXPORT_SYMBOL(copy_page);
27415 EXPORT_SYMBOL(clear_page);
27416@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
27417 EXPORT_SYMBOL(___preempt_schedule_context);
27418 #endif
27419 #endif
27420+
27421+#ifdef CONFIG_PAX_PER_CPU_PGD
27422+EXPORT_SYMBOL(cpu_pgd);
27423+#endif
27424diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
27425index 021783b..6511282 100644
27426--- a/arch/x86/kernel/x86_init.c
27427+++ b/arch/x86/kernel/x86_init.c
27428@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
27429 static void default_nmi_init(void) { };
27430 static int default_i8042_detect(void) { return 1; };
27431
27432-struct x86_platform_ops x86_platform = {
27433+struct x86_platform_ops x86_platform __read_only = {
27434 .calibrate_tsc = native_calibrate_tsc,
27435 .get_wallclock = mach_get_cmos_time,
27436 .set_wallclock = mach_set_rtc_mmss,
27437@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
27438 EXPORT_SYMBOL_GPL(x86_platform);
27439
27440 #if defined(CONFIG_PCI_MSI)
27441-struct x86_msi_ops x86_msi = {
27442+struct x86_msi_ops x86_msi __read_only = {
27443 .setup_msi_irqs = native_setup_msi_irqs,
27444 .compose_msi_msg = native_compose_msi_msg,
27445 .teardown_msi_irq = native_teardown_msi_irq,
27446@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
27447 }
27448 #endif
27449
27450-struct x86_io_apic_ops x86_io_apic_ops = {
27451+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
27452 .init = native_io_apic_init_mappings,
27453 .read = native_io_apic_read,
27454 .write = native_io_apic_write,
27455diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
27456index 422fd82..b2d262e 100644
27457--- a/arch/x86/kernel/xsave.c
27458+++ b/arch/x86/kernel/xsave.c
27459@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27460
27461 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
27462 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
27463- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
27464+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
27465
27466 if (!use_xsave())
27467 return err;
27468
27469- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
27470+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
27471
27472 /*
27473 * Read the xstate_bv which we copied (directly from the cpu or
27474 * from the state in task struct) to the user buffers.
27475 */
27476- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
27477+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
27478
27479 /*
27480 * For legacy compatible, we always set FP/SSE bits in the bit
27481@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27482 */
27483 xstate_bv |= XSTATE_FPSSE;
27484
27485- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
27486+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
27487
27488 return err;
27489 }
27490@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
27491 {
27492 int err;
27493
27494+ buf = (struct xsave_struct __user *)____m(buf);
27495 if (use_xsave())
27496 err = xsave_user(buf);
27497 else if (use_fxsr())
27498@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
27499 */
27500 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
27501 {
27502+ buf = (void __user *)____m(buf);
27503 if (use_xsave()) {
27504 if ((unsigned long)buf % 64 || fx_only) {
27505 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
27506diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
27507index c697625..a032162 100644
27508--- a/arch/x86/kvm/cpuid.c
27509+++ b/arch/x86/kvm/cpuid.c
27510@@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27511 struct kvm_cpuid2 *cpuid,
27512 struct kvm_cpuid_entry2 __user *entries)
27513 {
27514- int r;
27515+ int r, i;
27516
27517 r = -E2BIG;
27518 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
27519 goto out;
27520 r = -EFAULT;
27521- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
27522- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27523+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27524 goto out;
27525+ for (i = 0; i < cpuid->nent; ++i) {
27526+ struct kvm_cpuid_entry2 cpuid_entry;
27527+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
27528+ goto out;
27529+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
27530+ }
27531 vcpu->arch.cpuid_nent = cpuid->nent;
27532 kvm_apic_set_version(vcpu);
27533 kvm_x86_ops->cpuid_update(vcpu);
27534@@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27535 struct kvm_cpuid2 *cpuid,
27536 struct kvm_cpuid_entry2 __user *entries)
27537 {
27538- int r;
27539+ int r, i;
27540
27541 r = -E2BIG;
27542 if (cpuid->nent < vcpu->arch.cpuid_nent)
27543 goto out;
27544 r = -EFAULT;
27545- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
27546- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27547+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27548 goto out;
27549+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
27550+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
27551+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
27552+ goto out;
27553+ }
27554 return 0;
27555
27556 out:
27557diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
27558index d86ff15..e77b023 100644
27559--- a/arch/x86/kvm/lapic.c
27560+++ b/arch/x86/kvm/lapic.c
27561@@ -55,7 +55,7 @@
27562 #define APIC_BUS_CYCLE_NS 1
27563
27564 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
27565-#define apic_debug(fmt, arg...)
27566+#define apic_debug(fmt, arg...) do {} while (0)
27567
27568 #define APIC_LVT_NUM 6
27569 /* 14 is the version for Xeon and Pentium 8.4.8*/
27570diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
27571index ad75d77..a679d32 100644
27572--- a/arch/x86/kvm/paging_tmpl.h
27573+++ b/arch/x86/kvm/paging_tmpl.h
27574@@ -331,7 +331,7 @@ retry_walk:
27575 if (unlikely(kvm_is_error_hva(host_addr)))
27576 goto error;
27577
27578- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
27579+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
27580 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
27581 goto error;
27582 walker->ptep_user[walker->level - 1] = ptep_user;
27583diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
27584index c7168a5..09070fc 100644
27585--- a/arch/x86/kvm/svm.c
27586+++ b/arch/x86/kvm/svm.c
27587@@ -3497,7 +3497,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
27588 int cpu = raw_smp_processor_id();
27589
27590 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
27591+
27592+ pax_open_kernel();
27593 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
27594+ pax_close_kernel();
27595+
27596 load_TR_desc();
27597 }
27598
27599@@ -3898,6 +3902,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
27600 #endif
27601 #endif
27602
27603+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27604+ __set_fs(current_thread_info()->addr_limit);
27605+#endif
27606+
27607 reload_tss(vcpu);
27608
27609 local_irq_disable();
27610diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
27611index da7837e..86c6ebf 100644
27612--- a/arch/x86/kvm/vmx.c
27613+++ b/arch/x86/kvm/vmx.c
27614@@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long field, u64 value)
27615 #endif
27616 }
27617
27618-static void vmcs_clear_bits(unsigned long field, u32 mask)
27619+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
27620 {
27621 vmcs_writel(field, vmcs_readl(field) & ~mask);
27622 }
27623
27624-static void vmcs_set_bits(unsigned long field, u32 mask)
27625+static void vmcs_set_bits(unsigned long field, unsigned long mask)
27626 {
27627 vmcs_writel(field, vmcs_readl(field) | mask);
27628 }
27629@@ -1522,7 +1522,11 @@ static void reload_tss(void)
27630 struct desc_struct *descs;
27631
27632 descs = (void *)gdt->address;
27633+
27634+ pax_open_kernel();
27635 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
27636+ pax_close_kernel();
27637+
27638 load_TR_desc();
27639 }
27640
27641@@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
27642 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
27643 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
27644
27645+#ifdef CONFIG_PAX_PER_CPU_PGD
27646+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27647+#endif
27648+
27649 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
27650 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
27651 vmx->loaded_vmcs->cpu = cpu;
27652@@ -2033,7 +2041,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
27653 * reads and returns guest's timestamp counter "register"
27654 * guest_tsc = host_tsc + tsc_offset -- 21.3
27655 */
27656-static u64 guest_read_tsc(void)
27657+static u64 __intentional_overflow(-1) guest_read_tsc(void)
27658 {
27659 u64 host_tsc, tsc_offset;
27660
27661@@ -2987,8 +2995,11 @@ static __init int hardware_setup(void)
27662 if (!cpu_has_vmx_flexpriority())
27663 flexpriority_enabled = 0;
27664
27665- if (!cpu_has_vmx_tpr_shadow())
27666- kvm_x86_ops->update_cr8_intercept = NULL;
27667+ if (!cpu_has_vmx_tpr_shadow()) {
27668+ pax_open_kernel();
27669+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27670+ pax_close_kernel();
27671+ }
27672
27673 if (enable_ept && !cpu_has_vmx_ept_2m_page())
27674 kvm_disable_largepages();
27675@@ -2999,13 +3010,15 @@ static __init int hardware_setup(void)
27676 if (!cpu_has_vmx_apicv())
27677 enable_apicv = 0;
27678
27679+ pax_open_kernel();
27680 if (enable_apicv)
27681- kvm_x86_ops->update_cr8_intercept = NULL;
27682+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27683 else {
27684- kvm_x86_ops->hwapic_irr_update = NULL;
27685- kvm_x86_ops->deliver_posted_interrupt = NULL;
27686- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27687+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
27688+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
27689+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27690 }
27691+ pax_close_kernel();
27692
27693 if (nested)
27694 nested_vmx_setup_ctls_msrs();
27695@@ -4134,7 +4147,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27696
27697 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
27698 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
27699+
27700+#ifndef CONFIG_PAX_PER_CPU_PGD
27701 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27702+#endif
27703
27704 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
27705 #ifdef CONFIG_X86_64
27706@@ -4156,7 +4172,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27707 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
27708 vmx->host_idt_base = dt.address;
27709
27710- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
27711+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
27712
27713 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
27714 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
27715@@ -7219,6 +7235,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27716 "jmp 2f \n\t"
27717 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
27718 "2: "
27719+
27720+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27721+ "ljmp %[cs],$3f\n\t"
27722+ "3: "
27723+#endif
27724+
27725 /* Save guest registers, load host registers, keep flags */
27726 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
27727 "pop %0 \n\t"
27728@@ -7271,6 +7293,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27729 #endif
27730 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
27731 [wordsize]"i"(sizeof(ulong))
27732+
27733+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27734+ ,[cs]"i"(__KERNEL_CS)
27735+#endif
27736+
27737 : "cc", "memory"
27738 #ifdef CONFIG_X86_64
27739 , "rax", "rbx", "rdi", "rsi"
27740@@ -7284,7 +7311,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27741 if (debugctlmsr)
27742 update_debugctlmsr(debugctlmsr);
27743
27744-#ifndef CONFIG_X86_64
27745+#ifdef CONFIG_X86_32
27746 /*
27747 * The sysexit path does not restore ds/es, so we must set them to
27748 * a reasonable value ourselves.
27749@@ -7293,8 +7320,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27750 * may be executed in interrupt context, which saves and restore segments
27751 * around it, nullifying its effect.
27752 */
27753- loadsegment(ds, __USER_DS);
27754- loadsegment(es, __USER_DS);
27755+ loadsegment(ds, __KERNEL_DS);
27756+ loadsegment(es, __KERNEL_DS);
27757+ loadsegment(ss, __KERNEL_DS);
27758+
27759+#ifdef CONFIG_PAX_KERNEXEC
27760+ loadsegment(fs, __KERNEL_PERCPU);
27761+#endif
27762+
27763+#ifdef CONFIG_PAX_MEMORY_UDEREF
27764+ __set_fs(current_thread_info()->addr_limit);
27765+#endif
27766+
27767 #endif
27768
27769 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
27770diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
27771index d89d51b..f3c612a 100644
27772--- a/arch/x86/kvm/x86.c
27773+++ b/arch/x86/kvm/x86.c
27774@@ -1791,8 +1791,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
27775 {
27776 struct kvm *kvm = vcpu->kvm;
27777 int lm = is_long_mode(vcpu);
27778- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27779- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27780+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27781+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27782 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
27783 : kvm->arch.xen_hvm_config.blob_size_32;
27784 u32 page_num = data & ~PAGE_MASK;
27785@@ -2676,6 +2676,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
27786 if (n < msr_list.nmsrs)
27787 goto out;
27788 r = -EFAULT;
27789+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
27790+ goto out;
27791 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
27792 num_msrs_to_save * sizeof(u32)))
27793 goto out;
27794@@ -5485,7 +5487,7 @@ static struct notifier_block pvclock_gtod_notifier = {
27795 };
27796 #endif
27797
27798-int kvm_arch_init(void *opaque)
27799+int kvm_arch_init(const void *opaque)
27800 {
27801 int r;
27802 struct kvm_x86_ops *ops = opaque;
27803diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
27804index bdf8532..f63c587 100644
27805--- a/arch/x86/lguest/boot.c
27806+++ b/arch/x86/lguest/boot.c
27807@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
27808 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
27809 * Launcher to reboot us.
27810 */
27811-static void lguest_restart(char *reason)
27812+static __noreturn void lguest_restart(char *reason)
27813 {
27814 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
27815+ BUG();
27816 }
27817
27818 /*G:050
27819diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
27820index 00933d5..3a64af9 100644
27821--- a/arch/x86/lib/atomic64_386_32.S
27822+++ b/arch/x86/lib/atomic64_386_32.S
27823@@ -48,6 +48,10 @@ BEGIN(read)
27824 movl (v), %eax
27825 movl 4(v), %edx
27826 RET_ENDP
27827+BEGIN(read_unchecked)
27828+ movl (v), %eax
27829+ movl 4(v), %edx
27830+RET_ENDP
27831 #undef v
27832
27833 #define v %esi
27834@@ -55,6 +59,10 @@ BEGIN(set)
27835 movl %ebx, (v)
27836 movl %ecx, 4(v)
27837 RET_ENDP
27838+BEGIN(set_unchecked)
27839+ movl %ebx, (v)
27840+ movl %ecx, 4(v)
27841+RET_ENDP
27842 #undef v
27843
27844 #define v %esi
27845@@ -70,6 +78,20 @@ RET_ENDP
27846 BEGIN(add)
27847 addl %eax, (v)
27848 adcl %edx, 4(v)
27849+
27850+#ifdef CONFIG_PAX_REFCOUNT
27851+ jno 0f
27852+ subl %eax, (v)
27853+ sbbl %edx, 4(v)
27854+ int $4
27855+0:
27856+ _ASM_EXTABLE(0b, 0b)
27857+#endif
27858+
27859+RET_ENDP
27860+BEGIN(add_unchecked)
27861+ addl %eax, (v)
27862+ adcl %edx, 4(v)
27863 RET_ENDP
27864 #undef v
27865
27866@@ -77,6 +99,24 @@ RET_ENDP
27867 BEGIN(add_return)
27868 addl (v), %eax
27869 adcl 4(v), %edx
27870+
27871+#ifdef CONFIG_PAX_REFCOUNT
27872+ into
27873+1234:
27874+ _ASM_EXTABLE(1234b, 2f)
27875+#endif
27876+
27877+ movl %eax, (v)
27878+ movl %edx, 4(v)
27879+
27880+#ifdef CONFIG_PAX_REFCOUNT
27881+2:
27882+#endif
27883+
27884+RET_ENDP
27885+BEGIN(add_return_unchecked)
27886+ addl (v), %eax
27887+ adcl 4(v), %edx
27888 movl %eax, (v)
27889 movl %edx, 4(v)
27890 RET_ENDP
27891@@ -86,6 +126,20 @@ RET_ENDP
27892 BEGIN(sub)
27893 subl %eax, (v)
27894 sbbl %edx, 4(v)
27895+
27896+#ifdef CONFIG_PAX_REFCOUNT
27897+ jno 0f
27898+ addl %eax, (v)
27899+ adcl %edx, 4(v)
27900+ int $4
27901+0:
27902+ _ASM_EXTABLE(0b, 0b)
27903+#endif
27904+
27905+RET_ENDP
27906+BEGIN(sub_unchecked)
27907+ subl %eax, (v)
27908+ sbbl %edx, 4(v)
27909 RET_ENDP
27910 #undef v
27911
27912@@ -96,6 +150,27 @@ BEGIN(sub_return)
27913 sbbl $0, %edx
27914 addl (v), %eax
27915 adcl 4(v), %edx
27916+
27917+#ifdef CONFIG_PAX_REFCOUNT
27918+ into
27919+1234:
27920+ _ASM_EXTABLE(1234b, 2f)
27921+#endif
27922+
27923+ movl %eax, (v)
27924+ movl %edx, 4(v)
27925+
27926+#ifdef CONFIG_PAX_REFCOUNT
27927+2:
27928+#endif
27929+
27930+RET_ENDP
27931+BEGIN(sub_return_unchecked)
27932+ negl %edx
27933+ negl %eax
27934+ sbbl $0, %edx
27935+ addl (v), %eax
27936+ adcl 4(v), %edx
27937 movl %eax, (v)
27938 movl %edx, 4(v)
27939 RET_ENDP
27940@@ -105,6 +180,20 @@ RET_ENDP
27941 BEGIN(inc)
27942 addl $1, (v)
27943 adcl $0, 4(v)
27944+
27945+#ifdef CONFIG_PAX_REFCOUNT
27946+ jno 0f
27947+ subl $1, (v)
27948+ sbbl $0, 4(v)
27949+ int $4
27950+0:
27951+ _ASM_EXTABLE(0b, 0b)
27952+#endif
27953+
27954+RET_ENDP
27955+BEGIN(inc_unchecked)
27956+ addl $1, (v)
27957+ adcl $0, 4(v)
27958 RET_ENDP
27959 #undef v
27960
27961@@ -114,6 +203,26 @@ BEGIN(inc_return)
27962 movl 4(v), %edx
27963 addl $1, %eax
27964 adcl $0, %edx
27965+
27966+#ifdef CONFIG_PAX_REFCOUNT
27967+ into
27968+1234:
27969+ _ASM_EXTABLE(1234b, 2f)
27970+#endif
27971+
27972+ movl %eax, (v)
27973+ movl %edx, 4(v)
27974+
27975+#ifdef CONFIG_PAX_REFCOUNT
27976+2:
27977+#endif
27978+
27979+RET_ENDP
27980+BEGIN(inc_return_unchecked)
27981+ movl (v), %eax
27982+ movl 4(v), %edx
27983+ addl $1, %eax
27984+ adcl $0, %edx
27985 movl %eax, (v)
27986 movl %edx, 4(v)
27987 RET_ENDP
27988@@ -123,6 +232,20 @@ RET_ENDP
27989 BEGIN(dec)
27990 subl $1, (v)
27991 sbbl $0, 4(v)
27992+
27993+#ifdef CONFIG_PAX_REFCOUNT
27994+ jno 0f
27995+ addl $1, (v)
27996+ adcl $0, 4(v)
27997+ int $4
27998+0:
27999+ _ASM_EXTABLE(0b, 0b)
28000+#endif
28001+
28002+RET_ENDP
28003+BEGIN(dec_unchecked)
28004+ subl $1, (v)
28005+ sbbl $0, 4(v)
28006 RET_ENDP
28007 #undef v
28008
28009@@ -132,6 +255,26 @@ BEGIN(dec_return)
28010 movl 4(v), %edx
28011 subl $1, %eax
28012 sbbl $0, %edx
28013+
28014+#ifdef CONFIG_PAX_REFCOUNT
28015+ into
28016+1234:
28017+ _ASM_EXTABLE(1234b, 2f)
28018+#endif
28019+
28020+ movl %eax, (v)
28021+ movl %edx, 4(v)
28022+
28023+#ifdef CONFIG_PAX_REFCOUNT
28024+2:
28025+#endif
28026+
28027+RET_ENDP
28028+BEGIN(dec_return_unchecked)
28029+ movl (v), %eax
28030+ movl 4(v), %edx
28031+ subl $1, %eax
28032+ sbbl $0, %edx
28033 movl %eax, (v)
28034 movl %edx, 4(v)
28035 RET_ENDP
28036@@ -143,6 +286,13 @@ BEGIN(add_unless)
28037 adcl %edx, %edi
28038 addl (v), %eax
28039 adcl 4(v), %edx
28040+
28041+#ifdef CONFIG_PAX_REFCOUNT
28042+ into
28043+1234:
28044+ _ASM_EXTABLE(1234b, 2f)
28045+#endif
28046+
28047 cmpl %eax, %ecx
28048 je 3f
28049 1:
28050@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
28051 1:
28052 addl $1, %eax
28053 adcl $0, %edx
28054+
28055+#ifdef CONFIG_PAX_REFCOUNT
28056+ into
28057+1234:
28058+ _ASM_EXTABLE(1234b, 2f)
28059+#endif
28060+
28061 movl %eax, (v)
28062 movl %edx, 4(v)
28063 movl $1, %eax
28064@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
28065 movl 4(v), %edx
28066 subl $1, %eax
28067 sbbl $0, %edx
28068+
28069+#ifdef CONFIG_PAX_REFCOUNT
28070+ into
28071+1234:
28072+ _ASM_EXTABLE(1234b, 1f)
28073+#endif
28074+
28075 js 1f
28076 movl %eax, (v)
28077 movl %edx, 4(v)
28078diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
28079index f5cc9eb..51fa319 100644
28080--- a/arch/x86/lib/atomic64_cx8_32.S
28081+++ b/arch/x86/lib/atomic64_cx8_32.S
28082@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
28083 CFI_STARTPROC
28084
28085 read64 %ecx
28086+ pax_force_retaddr
28087 ret
28088 CFI_ENDPROC
28089 ENDPROC(atomic64_read_cx8)
28090
28091+ENTRY(atomic64_read_unchecked_cx8)
28092+ CFI_STARTPROC
28093+
28094+ read64 %ecx
28095+ pax_force_retaddr
28096+ ret
28097+ CFI_ENDPROC
28098+ENDPROC(atomic64_read_unchecked_cx8)
28099+
28100 ENTRY(atomic64_set_cx8)
28101 CFI_STARTPROC
28102
28103@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
28104 cmpxchg8b (%esi)
28105 jne 1b
28106
28107+ pax_force_retaddr
28108 ret
28109 CFI_ENDPROC
28110 ENDPROC(atomic64_set_cx8)
28111
28112+ENTRY(atomic64_set_unchecked_cx8)
28113+ CFI_STARTPROC
28114+
28115+1:
28116+/* we don't need LOCK_PREFIX since aligned 64-bit writes
28117+ * are atomic on 586 and newer */
28118+ cmpxchg8b (%esi)
28119+ jne 1b
28120+
28121+ pax_force_retaddr
28122+ ret
28123+ CFI_ENDPROC
28124+ENDPROC(atomic64_set_unchecked_cx8)
28125+
28126 ENTRY(atomic64_xchg_cx8)
28127 CFI_STARTPROC
28128
28129@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
28130 cmpxchg8b (%esi)
28131 jne 1b
28132
28133+ pax_force_retaddr
28134 ret
28135 CFI_ENDPROC
28136 ENDPROC(atomic64_xchg_cx8)
28137
28138-.macro addsub_return func ins insc
28139-ENTRY(atomic64_\func\()_return_cx8)
28140+.macro addsub_return func ins insc unchecked=""
28141+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28142 CFI_STARTPROC
28143 SAVE ebp
28144 SAVE ebx
28145@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
28146 movl %edx, %ecx
28147 \ins\()l %esi, %ebx
28148 \insc\()l %edi, %ecx
28149+
28150+.ifb \unchecked
28151+#ifdef CONFIG_PAX_REFCOUNT
28152+ into
28153+2:
28154+ _ASM_EXTABLE(2b, 3f)
28155+#endif
28156+.endif
28157+
28158 LOCK_PREFIX
28159 cmpxchg8b (%ebp)
28160 jne 1b
28161-
28162-10:
28163 movl %ebx, %eax
28164 movl %ecx, %edx
28165+
28166+.ifb \unchecked
28167+#ifdef CONFIG_PAX_REFCOUNT
28168+3:
28169+#endif
28170+.endif
28171+
28172 RESTORE edi
28173 RESTORE esi
28174 RESTORE ebx
28175 RESTORE ebp
28176+ pax_force_retaddr
28177 ret
28178 CFI_ENDPROC
28179-ENDPROC(atomic64_\func\()_return_cx8)
28180+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28181 .endm
28182
28183 addsub_return add add adc
28184 addsub_return sub sub sbb
28185+addsub_return add add adc _unchecked
28186+addsub_return sub sub sbb _unchecked
28187
28188-.macro incdec_return func ins insc
28189-ENTRY(atomic64_\func\()_return_cx8)
28190+.macro incdec_return func ins insc unchecked=""
28191+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28192 CFI_STARTPROC
28193 SAVE ebx
28194
28195@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
28196 movl %edx, %ecx
28197 \ins\()l $1, %ebx
28198 \insc\()l $0, %ecx
28199+
28200+.ifb \unchecked
28201+#ifdef CONFIG_PAX_REFCOUNT
28202+ into
28203+2:
28204+ _ASM_EXTABLE(2b, 3f)
28205+#endif
28206+.endif
28207+
28208 LOCK_PREFIX
28209 cmpxchg8b (%esi)
28210 jne 1b
28211
28212-10:
28213 movl %ebx, %eax
28214 movl %ecx, %edx
28215+
28216+.ifb \unchecked
28217+#ifdef CONFIG_PAX_REFCOUNT
28218+3:
28219+#endif
28220+.endif
28221+
28222 RESTORE ebx
28223+ pax_force_retaddr
28224 ret
28225 CFI_ENDPROC
28226-ENDPROC(atomic64_\func\()_return_cx8)
28227+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28228 .endm
28229
28230 incdec_return inc add adc
28231 incdec_return dec sub sbb
28232+incdec_return inc add adc _unchecked
28233+incdec_return dec sub sbb _unchecked
28234
28235 ENTRY(atomic64_dec_if_positive_cx8)
28236 CFI_STARTPROC
28237@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
28238 movl %edx, %ecx
28239 subl $1, %ebx
28240 sbb $0, %ecx
28241+
28242+#ifdef CONFIG_PAX_REFCOUNT
28243+ into
28244+1234:
28245+ _ASM_EXTABLE(1234b, 2f)
28246+#endif
28247+
28248 js 2f
28249 LOCK_PREFIX
28250 cmpxchg8b (%esi)
28251@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
28252 movl %ebx, %eax
28253 movl %ecx, %edx
28254 RESTORE ebx
28255+ pax_force_retaddr
28256 ret
28257 CFI_ENDPROC
28258 ENDPROC(atomic64_dec_if_positive_cx8)
28259@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
28260 movl %edx, %ecx
28261 addl %ebp, %ebx
28262 adcl %edi, %ecx
28263+
28264+#ifdef CONFIG_PAX_REFCOUNT
28265+ into
28266+1234:
28267+ _ASM_EXTABLE(1234b, 3f)
28268+#endif
28269+
28270 LOCK_PREFIX
28271 cmpxchg8b (%esi)
28272 jne 1b
28273@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
28274 CFI_ADJUST_CFA_OFFSET -8
28275 RESTORE ebx
28276 RESTORE ebp
28277+ pax_force_retaddr
28278 ret
28279 4:
28280 cmpl %edx, 4(%esp)
28281@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
28282 xorl %ecx, %ecx
28283 addl $1, %ebx
28284 adcl %edx, %ecx
28285+
28286+#ifdef CONFIG_PAX_REFCOUNT
28287+ into
28288+1234:
28289+ _ASM_EXTABLE(1234b, 3f)
28290+#endif
28291+
28292 LOCK_PREFIX
28293 cmpxchg8b (%esi)
28294 jne 1b
28295@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
28296 movl $1, %eax
28297 3:
28298 RESTORE ebx
28299+ pax_force_retaddr
28300 ret
28301 CFI_ENDPROC
28302 ENDPROC(atomic64_inc_not_zero_cx8)
28303diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
28304index e78b8ee..7e173a8 100644
28305--- a/arch/x86/lib/checksum_32.S
28306+++ b/arch/x86/lib/checksum_32.S
28307@@ -29,7 +29,8 @@
28308 #include <asm/dwarf2.h>
28309 #include <asm/errno.h>
28310 #include <asm/asm.h>
28311-
28312+#include <asm/segment.h>
28313+
28314 /*
28315 * computes a partial checksum, e.g. for TCP/UDP fragments
28316 */
28317@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
28318
28319 #define ARGBASE 16
28320 #define FP 12
28321-
28322-ENTRY(csum_partial_copy_generic)
28323+
28324+ENTRY(csum_partial_copy_generic_to_user)
28325 CFI_STARTPROC
28326+
28327+#ifdef CONFIG_PAX_MEMORY_UDEREF
28328+ pushl_cfi %gs
28329+ popl_cfi %es
28330+ jmp csum_partial_copy_generic
28331+#endif
28332+
28333+ENTRY(csum_partial_copy_generic_from_user)
28334+
28335+#ifdef CONFIG_PAX_MEMORY_UDEREF
28336+ pushl_cfi %gs
28337+ popl_cfi %ds
28338+#endif
28339+
28340+ENTRY(csum_partial_copy_generic)
28341 subl $4,%esp
28342 CFI_ADJUST_CFA_OFFSET 4
28343 pushl_cfi %edi
28344@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
28345 jmp 4f
28346 SRC(1: movw (%esi), %bx )
28347 addl $2, %esi
28348-DST( movw %bx, (%edi) )
28349+DST( movw %bx, %es:(%edi) )
28350 addl $2, %edi
28351 addw %bx, %ax
28352 adcl $0, %eax
28353@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
28354 SRC(1: movl (%esi), %ebx )
28355 SRC( movl 4(%esi), %edx )
28356 adcl %ebx, %eax
28357-DST( movl %ebx, (%edi) )
28358+DST( movl %ebx, %es:(%edi) )
28359 adcl %edx, %eax
28360-DST( movl %edx, 4(%edi) )
28361+DST( movl %edx, %es:4(%edi) )
28362
28363 SRC( movl 8(%esi), %ebx )
28364 SRC( movl 12(%esi), %edx )
28365 adcl %ebx, %eax
28366-DST( movl %ebx, 8(%edi) )
28367+DST( movl %ebx, %es:8(%edi) )
28368 adcl %edx, %eax
28369-DST( movl %edx, 12(%edi) )
28370+DST( movl %edx, %es:12(%edi) )
28371
28372 SRC( movl 16(%esi), %ebx )
28373 SRC( movl 20(%esi), %edx )
28374 adcl %ebx, %eax
28375-DST( movl %ebx, 16(%edi) )
28376+DST( movl %ebx, %es:16(%edi) )
28377 adcl %edx, %eax
28378-DST( movl %edx, 20(%edi) )
28379+DST( movl %edx, %es:20(%edi) )
28380
28381 SRC( movl 24(%esi), %ebx )
28382 SRC( movl 28(%esi), %edx )
28383 adcl %ebx, %eax
28384-DST( movl %ebx, 24(%edi) )
28385+DST( movl %ebx, %es:24(%edi) )
28386 adcl %edx, %eax
28387-DST( movl %edx, 28(%edi) )
28388+DST( movl %edx, %es:28(%edi) )
28389
28390 lea 32(%esi), %esi
28391 lea 32(%edi), %edi
28392@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
28393 shrl $2, %edx # This clears CF
28394 SRC(3: movl (%esi), %ebx )
28395 adcl %ebx, %eax
28396-DST( movl %ebx, (%edi) )
28397+DST( movl %ebx, %es:(%edi) )
28398 lea 4(%esi), %esi
28399 lea 4(%edi), %edi
28400 dec %edx
28401@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
28402 jb 5f
28403 SRC( movw (%esi), %cx )
28404 leal 2(%esi), %esi
28405-DST( movw %cx, (%edi) )
28406+DST( movw %cx, %es:(%edi) )
28407 leal 2(%edi), %edi
28408 je 6f
28409 shll $16,%ecx
28410 SRC(5: movb (%esi), %cl )
28411-DST( movb %cl, (%edi) )
28412+DST( movb %cl, %es:(%edi) )
28413 6: addl %ecx, %eax
28414 adcl $0, %eax
28415 7:
28416@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
28417
28418 6001:
28419 movl ARGBASE+20(%esp), %ebx # src_err_ptr
28420- movl $-EFAULT, (%ebx)
28421+ movl $-EFAULT, %ss:(%ebx)
28422
28423 # zero the complete destination - computing the rest
28424 # is too much work
28425@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
28426
28427 6002:
28428 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28429- movl $-EFAULT,(%ebx)
28430+ movl $-EFAULT,%ss:(%ebx)
28431 jmp 5000b
28432
28433 .previous
28434
28435+ pushl_cfi %ss
28436+ popl_cfi %ds
28437+ pushl_cfi %ss
28438+ popl_cfi %es
28439 popl_cfi %ebx
28440 CFI_RESTORE ebx
28441 popl_cfi %esi
28442@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
28443 popl_cfi %ecx # equivalent to addl $4,%esp
28444 ret
28445 CFI_ENDPROC
28446-ENDPROC(csum_partial_copy_generic)
28447+ENDPROC(csum_partial_copy_generic_to_user)
28448
28449 #else
28450
28451 /* Version for PentiumII/PPro */
28452
28453 #define ROUND1(x) \
28454+ nop; nop; nop; \
28455 SRC(movl x(%esi), %ebx ) ; \
28456 addl %ebx, %eax ; \
28457- DST(movl %ebx, x(%edi) ) ;
28458+ DST(movl %ebx, %es:x(%edi)) ;
28459
28460 #define ROUND(x) \
28461+ nop; nop; nop; \
28462 SRC(movl x(%esi), %ebx ) ; \
28463 adcl %ebx, %eax ; \
28464- DST(movl %ebx, x(%edi) ) ;
28465+ DST(movl %ebx, %es:x(%edi)) ;
28466
28467 #define ARGBASE 12
28468-
28469-ENTRY(csum_partial_copy_generic)
28470+
28471+ENTRY(csum_partial_copy_generic_to_user)
28472 CFI_STARTPROC
28473+
28474+#ifdef CONFIG_PAX_MEMORY_UDEREF
28475+ pushl_cfi %gs
28476+ popl_cfi %es
28477+ jmp csum_partial_copy_generic
28478+#endif
28479+
28480+ENTRY(csum_partial_copy_generic_from_user)
28481+
28482+#ifdef CONFIG_PAX_MEMORY_UDEREF
28483+ pushl_cfi %gs
28484+ popl_cfi %ds
28485+#endif
28486+
28487+ENTRY(csum_partial_copy_generic)
28488 pushl_cfi %ebx
28489 CFI_REL_OFFSET ebx, 0
28490 pushl_cfi %edi
28491@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
28492 subl %ebx, %edi
28493 lea -1(%esi),%edx
28494 andl $-32,%edx
28495- lea 3f(%ebx,%ebx), %ebx
28496+ lea 3f(%ebx,%ebx,2), %ebx
28497 testl %esi, %esi
28498 jmp *%ebx
28499 1: addl $64,%esi
28500@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
28501 jb 5f
28502 SRC( movw (%esi), %dx )
28503 leal 2(%esi), %esi
28504-DST( movw %dx, (%edi) )
28505+DST( movw %dx, %es:(%edi) )
28506 leal 2(%edi), %edi
28507 je 6f
28508 shll $16,%edx
28509 5:
28510 SRC( movb (%esi), %dl )
28511-DST( movb %dl, (%edi) )
28512+DST( movb %dl, %es:(%edi) )
28513 6: addl %edx, %eax
28514 adcl $0, %eax
28515 7:
28516 .section .fixup, "ax"
28517 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
28518- movl $-EFAULT, (%ebx)
28519+ movl $-EFAULT, %ss:(%ebx)
28520 # zero the complete destination (computing the rest is too much work)
28521 movl ARGBASE+8(%esp),%edi # dst
28522 movl ARGBASE+12(%esp),%ecx # len
28523@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
28524 rep; stosb
28525 jmp 7b
28526 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28527- movl $-EFAULT, (%ebx)
28528+ movl $-EFAULT, %ss:(%ebx)
28529 jmp 7b
28530 .previous
28531
28532+#ifdef CONFIG_PAX_MEMORY_UDEREF
28533+ pushl_cfi %ss
28534+ popl_cfi %ds
28535+ pushl_cfi %ss
28536+ popl_cfi %es
28537+#endif
28538+
28539 popl_cfi %esi
28540 CFI_RESTORE esi
28541 popl_cfi %edi
28542@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
28543 CFI_RESTORE ebx
28544 ret
28545 CFI_ENDPROC
28546-ENDPROC(csum_partial_copy_generic)
28547+ENDPROC(csum_partial_copy_generic_to_user)
28548
28549 #undef ROUND
28550 #undef ROUND1
28551diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
28552index f2145cf..cea889d 100644
28553--- a/arch/x86/lib/clear_page_64.S
28554+++ b/arch/x86/lib/clear_page_64.S
28555@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
28556 movl $4096/8,%ecx
28557 xorl %eax,%eax
28558 rep stosq
28559+ pax_force_retaddr
28560 ret
28561 CFI_ENDPROC
28562 ENDPROC(clear_page_c)
28563@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
28564 movl $4096,%ecx
28565 xorl %eax,%eax
28566 rep stosb
28567+ pax_force_retaddr
28568 ret
28569 CFI_ENDPROC
28570 ENDPROC(clear_page_c_e)
28571@@ -43,6 +45,7 @@ ENTRY(clear_page)
28572 leaq 64(%rdi),%rdi
28573 jnz .Lloop
28574 nop
28575+ pax_force_retaddr
28576 ret
28577 CFI_ENDPROC
28578 .Lclear_page_end:
28579@@ -58,7 +61,7 @@ ENDPROC(clear_page)
28580
28581 #include <asm/cpufeature.h>
28582
28583- .section .altinstr_replacement,"ax"
28584+ .section .altinstr_replacement,"a"
28585 1: .byte 0xeb /* jmp <disp8> */
28586 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
28587 2: .byte 0xeb /* jmp <disp8> */
28588diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
28589index 1e572c5..2a162cd 100644
28590--- a/arch/x86/lib/cmpxchg16b_emu.S
28591+++ b/arch/x86/lib/cmpxchg16b_emu.S
28592@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
28593
28594 popf
28595 mov $1, %al
28596+ pax_force_retaddr
28597 ret
28598
28599 not_same:
28600 popf
28601 xor %al,%al
28602+ pax_force_retaddr
28603 ret
28604
28605 CFI_ENDPROC
28606diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
28607index 176cca6..e0d658e 100644
28608--- a/arch/x86/lib/copy_page_64.S
28609+++ b/arch/x86/lib/copy_page_64.S
28610@@ -9,6 +9,7 @@ copy_page_rep:
28611 CFI_STARTPROC
28612 movl $4096/8, %ecx
28613 rep movsq
28614+ pax_force_retaddr
28615 ret
28616 CFI_ENDPROC
28617 ENDPROC(copy_page_rep)
28618@@ -24,8 +25,8 @@ ENTRY(copy_page)
28619 CFI_ADJUST_CFA_OFFSET 2*8
28620 movq %rbx, (%rsp)
28621 CFI_REL_OFFSET rbx, 0
28622- movq %r12, 1*8(%rsp)
28623- CFI_REL_OFFSET r12, 1*8
28624+ movq %r13, 1*8(%rsp)
28625+ CFI_REL_OFFSET r13, 1*8
28626
28627 movl $(4096/64)-5, %ecx
28628 .p2align 4
28629@@ -38,7 +39,7 @@ ENTRY(copy_page)
28630 movq 0x8*4(%rsi), %r9
28631 movq 0x8*5(%rsi), %r10
28632 movq 0x8*6(%rsi), %r11
28633- movq 0x8*7(%rsi), %r12
28634+ movq 0x8*7(%rsi), %r13
28635
28636 prefetcht0 5*64(%rsi)
28637
28638@@ -49,7 +50,7 @@ ENTRY(copy_page)
28639 movq %r9, 0x8*4(%rdi)
28640 movq %r10, 0x8*5(%rdi)
28641 movq %r11, 0x8*6(%rdi)
28642- movq %r12, 0x8*7(%rdi)
28643+ movq %r13, 0x8*7(%rdi)
28644
28645 leaq 64 (%rsi), %rsi
28646 leaq 64 (%rdi), %rdi
28647@@ -68,7 +69,7 @@ ENTRY(copy_page)
28648 movq 0x8*4(%rsi), %r9
28649 movq 0x8*5(%rsi), %r10
28650 movq 0x8*6(%rsi), %r11
28651- movq 0x8*7(%rsi), %r12
28652+ movq 0x8*7(%rsi), %r13
28653
28654 movq %rax, 0x8*0(%rdi)
28655 movq %rbx, 0x8*1(%rdi)
28656@@ -77,7 +78,7 @@ ENTRY(copy_page)
28657 movq %r9, 0x8*4(%rdi)
28658 movq %r10, 0x8*5(%rdi)
28659 movq %r11, 0x8*6(%rdi)
28660- movq %r12, 0x8*7(%rdi)
28661+ movq %r13, 0x8*7(%rdi)
28662
28663 leaq 64(%rdi), %rdi
28664 leaq 64(%rsi), %rsi
28665@@ -85,10 +86,11 @@ ENTRY(copy_page)
28666
28667 movq (%rsp), %rbx
28668 CFI_RESTORE rbx
28669- movq 1*8(%rsp), %r12
28670- CFI_RESTORE r12
28671+ movq 1*8(%rsp), %r13
28672+ CFI_RESTORE r13
28673 addq $2*8, %rsp
28674 CFI_ADJUST_CFA_OFFSET -2*8
28675+ pax_force_retaddr
28676 ret
28677 .Lcopy_page_end:
28678 CFI_ENDPROC
28679@@ -99,7 +101,7 @@ ENDPROC(copy_page)
28680
28681 #include <asm/cpufeature.h>
28682
28683- .section .altinstr_replacement,"ax"
28684+ .section .altinstr_replacement,"a"
28685 1: .byte 0xeb /* jmp <disp8> */
28686 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
28687 2:
28688diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
28689index a30ca15..407412b 100644
28690--- a/arch/x86/lib/copy_user_64.S
28691+++ b/arch/x86/lib/copy_user_64.S
28692@@ -18,31 +18,7 @@
28693 #include <asm/alternative-asm.h>
28694 #include <asm/asm.h>
28695 #include <asm/smap.h>
28696-
28697-/*
28698- * By placing feature2 after feature1 in altinstructions section, we logically
28699- * implement:
28700- * If CPU has feature2, jmp to alt2 is used
28701- * else if CPU has feature1, jmp to alt1 is used
28702- * else jmp to orig is used.
28703- */
28704- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
28705-0:
28706- .byte 0xe9 /* 32bit jump */
28707- .long \orig-1f /* by default jump to orig */
28708-1:
28709- .section .altinstr_replacement,"ax"
28710-2: .byte 0xe9 /* near jump with 32bit immediate */
28711- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
28712-3: .byte 0xe9 /* near jump with 32bit immediate */
28713- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
28714- .previous
28715-
28716- .section .altinstructions,"a"
28717- altinstruction_entry 0b,2b,\feature1,5,5
28718- altinstruction_entry 0b,3b,\feature2,5,5
28719- .previous
28720- .endm
28721+#include <asm/pgtable.h>
28722
28723 .macro ALIGN_DESTINATION
28724 #ifdef FIX_ALIGNMENT
28725@@ -70,52 +46,6 @@
28726 #endif
28727 .endm
28728
28729-/* Standard copy_to_user with segment limit checking */
28730-ENTRY(_copy_to_user)
28731- CFI_STARTPROC
28732- GET_THREAD_INFO(%rax)
28733- movq %rdi,%rcx
28734- addq %rdx,%rcx
28735- jc bad_to_user
28736- cmpq TI_addr_limit(%rax),%rcx
28737- ja bad_to_user
28738- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28739- copy_user_generic_unrolled,copy_user_generic_string, \
28740- copy_user_enhanced_fast_string
28741- CFI_ENDPROC
28742-ENDPROC(_copy_to_user)
28743-
28744-/* Standard copy_from_user with segment limit checking */
28745-ENTRY(_copy_from_user)
28746- CFI_STARTPROC
28747- GET_THREAD_INFO(%rax)
28748- movq %rsi,%rcx
28749- addq %rdx,%rcx
28750- jc bad_from_user
28751- cmpq TI_addr_limit(%rax),%rcx
28752- ja bad_from_user
28753- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28754- copy_user_generic_unrolled,copy_user_generic_string, \
28755- copy_user_enhanced_fast_string
28756- CFI_ENDPROC
28757-ENDPROC(_copy_from_user)
28758-
28759- .section .fixup,"ax"
28760- /* must zero dest */
28761-ENTRY(bad_from_user)
28762-bad_from_user:
28763- CFI_STARTPROC
28764- movl %edx,%ecx
28765- xorl %eax,%eax
28766- rep
28767- stosb
28768-bad_to_user:
28769- movl %edx,%eax
28770- ret
28771- CFI_ENDPROC
28772-ENDPROC(bad_from_user)
28773- .previous
28774-
28775 /*
28776 * copy_user_generic_unrolled - memory copy with exception handling.
28777 * This version is for CPUs like P4 that don't have efficient micro
28778@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
28779 */
28780 ENTRY(copy_user_generic_unrolled)
28781 CFI_STARTPROC
28782+ ASM_PAX_OPEN_USERLAND
28783 ASM_STAC
28784 cmpl $8,%edx
28785 jb 20f /* less then 8 bytes, go to byte copy loop */
28786@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
28787 jnz 21b
28788 23: xor %eax,%eax
28789 ASM_CLAC
28790+ ASM_PAX_CLOSE_USERLAND
28791+ pax_force_retaddr
28792 ret
28793
28794 .section .fixup,"ax"
28795@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
28796 */
28797 ENTRY(copy_user_generic_string)
28798 CFI_STARTPROC
28799+ ASM_PAX_OPEN_USERLAND
28800 ASM_STAC
28801 andl %edx,%edx
28802 jz 4f
28803@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
28804 movsb
28805 4: xorl %eax,%eax
28806 ASM_CLAC
28807+ ASM_PAX_CLOSE_USERLAND
28808+ pax_force_retaddr
28809 ret
28810
28811 .section .fixup,"ax"
28812@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
28813 */
28814 ENTRY(copy_user_enhanced_fast_string)
28815 CFI_STARTPROC
28816+ ASM_PAX_OPEN_USERLAND
28817 ASM_STAC
28818 andl %edx,%edx
28819 jz 2f
28820@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
28821 movsb
28822 2: xorl %eax,%eax
28823 ASM_CLAC
28824+ ASM_PAX_CLOSE_USERLAND
28825+ pax_force_retaddr
28826 ret
28827
28828 .section .fixup,"ax"
28829diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
28830index 6a4f43c..c70fb52 100644
28831--- a/arch/x86/lib/copy_user_nocache_64.S
28832+++ b/arch/x86/lib/copy_user_nocache_64.S
28833@@ -8,6 +8,7 @@
28834
28835 #include <linux/linkage.h>
28836 #include <asm/dwarf2.h>
28837+#include <asm/alternative-asm.h>
28838
28839 #define FIX_ALIGNMENT 1
28840
28841@@ -16,6 +17,7 @@
28842 #include <asm/thread_info.h>
28843 #include <asm/asm.h>
28844 #include <asm/smap.h>
28845+#include <asm/pgtable.h>
28846
28847 .macro ALIGN_DESTINATION
28848 #ifdef FIX_ALIGNMENT
28849@@ -49,6 +51,16 @@
28850 */
28851 ENTRY(__copy_user_nocache)
28852 CFI_STARTPROC
28853+
28854+#ifdef CONFIG_PAX_MEMORY_UDEREF
28855+ mov pax_user_shadow_base,%rcx
28856+ cmp %rcx,%rsi
28857+ jae 1f
28858+ add %rcx,%rsi
28859+1:
28860+#endif
28861+
28862+ ASM_PAX_OPEN_USERLAND
28863 ASM_STAC
28864 cmpl $8,%edx
28865 jb 20f /* less then 8 bytes, go to byte copy loop */
28866@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
28867 jnz 21b
28868 23: xorl %eax,%eax
28869 ASM_CLAC
28870+ ASM_PAX_CLOSE_USERLAND
28871 sfence
28872+ pax_force_retaddr
28873 ret
28874
28875 .section .fixup,"ax"
28876diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
28877index 2419d5f..fe52d0e 100644
28878--- a/arch/x86/lib/csum-copy_64.S
28879+++ b/arch/x86/lib/csum-copy_64.S
28880@@ -9,6 +9,7 @@
28881 #include <asm/dwarf2.h>
28882 #include <asm/errno.h>
28883 #include <asm/asm.h>
28884+#include <asm/alternative-asm.h>
28885
28886 /*
28887 * Checksum copy with exception handling.
28888@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
28889 CFI_ADJUST_CFA_OFFSET 7*8
28890 movq %rbx, 2*8(%rsp)
28891 CFI_REL_OFFSET rbx, 2*8
28892- movq %r12, 3*8(%rsp)
28893- CFI_REL_OFFSET r12, 3*8
28894+ movq %r15, 3*8(%rsp)
28895+ CFI_REL_OFFSET r15, 3*8
28896 movq %r14, 4*8(%rsp)
28897 CFI_REL_OFFSET r14, 4*8
28898 movq %r13, 5*8(%rsp)
28899@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
28900 movl %edx, %ecx
28901
28902 xorl %r9d, %r9d
28903- movq %rcx, %r12
28904+ movq %rcx, %r15
28905
28906- shrq $6, %r12
28907+ shrq $6, %r15
28908 jz .Lhandle_tail /* < 64 */
28909
28910 clc
28911
28912 /* main loop. clear in 64 byte blocks */
28913 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
28914- /* r11: temp3, rdx: temp4, r12 loopcnt */
28915+ /* r11: temp3, rdx: temp4, r15 loopcnt */
28916 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
28917 .p2align 4
28918 .Lloop:
28919@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
28920 adcq %r14, %rax
28921 adcq %r13, %rax
28922
28923- decl %r12d
28924+ decl %r15d
28925
28926 dest
28927 movq %rbx, (%rsi)
28928@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
28929 .Lende:
28930 movq 2*8(%rsp), %rbx
28931 CFI_RESTORE rbx
28932- movq 3*8(%rsp), %r12
28933- CFI_RESTORE r12
28934+ movq 3*8(%rsp), %r15
28935+ CFI_RESTORE r15
28936 movq 4*8(%rsp), %r14
28937 CFI_RESTORE r14
28938 movq 5*8(%rsp), %r13
28939@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
28940 CFI_RESTORE rbp
28941 addq $7*8, %rsp
28942 CFI_ADJUST_CFA_OFFSET -7*8
28943+ pax_force_retaddr
28944 ret
28945 CFI_RESTORE_STATE
28946
28947diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
28948index 7609e0e..b449b98 100644
28949--- a/arch/x86/lib/csum-wrappers_64.c
28950+++ b/arch/x86/lib/csum-wrappers_64.c
28951@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
28952 len -= 2;
28953 }
28954 }
28955+ pax_open_userland();
28956 stac();
28957- isum = csum_partial_copy_generic((__force const void *)src,
28958+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
28959 dst, len, isum, errp, NULL);
28960 clac();
28961+ pax_close_userland();
28962 if (unlikely(*errp))
28963 goto out_err;
28964
28965@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
28966 }
28967
28968 *errp = 0;
28969+ pax_open_userland();
28970 stac();
28971- ret = csum_partial_copy_generic(src, (void __force *)dst,
28972+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
28973 len, isum, NULL, errp);
28974 clac();
28975+ pax_close_userland();
28976 return ret;
28977 }
28978 EXPORT_SYMBOL(csum_partial_copy_to_user);
28979diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
28980index a451235..1daa956 100644
28981--- a/arch/x86/lib/getuser.S
28982+++ b/arch/x86/lib/getuser.S
28983@@ -33,17 +33,40 @@
28984 #include <asm/thread_info.h>
28985 #include <asm/asm.h>
28986 #include <asm/smap.h>
28987+#include <asm/segment.h>
28988+#include <asm/pgtable.h>
28989+#include <asm/alternative-asm.h>
28990+
28991+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28992+#define __copyuser_seg gs;
28993+#else
28994+#define __copyuser_seg
28995+#endif
28996
28997 .text
28998 ENTRY(__get_user_1)
28999 CFI_STARTPROC
29000+
29001+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29002 GET_THREAD_INFO(%_ASM_DX)
29003 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29004 jae bad_get_user
29005 ASM_STAC
29006-1: movzbl (%_ASM_AX),%edx
29007+
29008+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29009+ mov pax_user_shadow_base,%_ASM_DX
29010+ cmp %_ASM_DX,%_ASM_AX
29011+ jae 1234f
29012+ add %_ASM_DX,%_ASM_AX
29013+1234:
29014+#endif
29015+
29016+#endif
29017+
29018+1: __copyuser_seg movzbl (%_ASM_AX),%edx
29019 xor %eax,%eax
29020 ASM_CLAC
29021+ pax_force_retaddr
29022 ret
29023 CFI_ENDPROC
29024 ENDPROC(__get_user_1)
29025@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
29026 ENTRY(__get_user_2)
29027 CFI_STARTPROC
29028 add $1,%_ASM_AX
29029+
29030+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29031 jc bad_get_user
29032 GET_THREAD_INFO(%_ASM_DX)
29033 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29034 jae bad_get_user
29035 ASM_STAC
29036-2: movzwl -1(%_ASM_AX),%edx
29037+
29038+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29039+ mov pax_user_shadow_base,%_ASM_DX
29040+ cmp %_ASM_DX,%_ASM_AX
29041+ jae 1234f
29042+ add %_ASM_DX,%_ASM_AX
29043+1234:
29044+#endif
29045+
29046+#endif
29047+
29048+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
29049 xor %eax,%eax
29050 ASM_CLAC
29051+ pax_force_retaddr
29052 ret
29053 CFI_ENDPROC
29054 ENDPROC(__get_user_2)
29055@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
29056 ENTRY(__get_user_4)
29057 CFI_STARTPROC
29058 add $3,%_ASM_AX
29059+
29060+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29061 jc bad_get_user
29062 GET_THREAD_INFO(%_ASM_DX)
29063 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29064 jae bad_get_user
29065 ASM_STAC
29066-3: movl -3(%_ASM_AX),%edx
29067+
29068+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29069+ mov pax_user_shadow_base,%_ASM_DX
29070+ cmp %_ASM_DX,%_ASM_AX
29071+ jae 1234f
29072+ add %_ASM_DX,%_ASM_AX
29073+1234:
29074+#endif
29075+
29076+#endif
29077+
29078+3: __copyuser_seg movl -3(%_ASM_AX),%edx
29079 xor %eax,%eax
29080 ASM_CLAC
29081+ pax_force_retaddr
29082 ret
29083 CFI_ENDPROC
29084 ENDPROC(__get_user_4)
29085@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
29086 GET_THREAD_INFO(%_ASM_DX)
29087 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29088 jae bad_get_user
29089+
29090+#ifdef CONFIG_PAX_MEMORY_UDEREF
29091+ mov pax_user_shadow_base,%_ASM_DX
29092+ cmp %_ASM_DX,%_ASM_AX
29093+ jae 1234f
29094+ add %_ASM_DX,%_ASM_AX
29095+1234:
29096+#endif
29097+
29098 ASM_STAC
29099 4: movq -7(%_ASM_AX),%rdx
29100 xor %eax,%eax
29101 ASM_CLAC
29102+ pax_force_retaddr
29103 ret
29104 #else
29105 add $7,%_ASM_AX
29106@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
29107 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29108 jae bad_get_user_8
29109 ASM_STAC
29110-4: movl -7(%_ASM_AX),%edx
29111-5: movl -3(%_ASM_AX),%ecx
29112+4: __copyuser_seg movl -7(%_ASM_AX),%edx
29113+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
29114 xor %eax,%eax
29115 ASM_CLAC
29116+ pax_force_retaddr
29117 ret
29118 #endif
29119 CFI_ENDPROC
29120@@ -113,6 +175,7 @@ bad_get_user:
29121 xor %edx,%edx
29122 mov $(-EFAULT),%_ASM_AX
29123 ASM_CLAC
29124+ pax_force_retaddr
29125 ret
29126 CFI_ENDPROC
29127 END(bad_get_user)
29128@@ -124,6 +187,7 @@ bad_get_user_8:
29129 xor %ecx,%ecx
29130 mov $(-EFAULT),%_ASM_AX
29131 ASM_CLAC
29132+ pax_force_retaddr
29133 ret
29134 CFI_ENDPROC
29135 END(bad_get_user_8)
29136diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
29137index 54fcffe..7be149e 100644
29138--- a/arch/x86/lib/insn.c
29139+++ b/arch/x86/lib/insn.c
29140@@ -20,8 +20,10 @@
29141
29142 #ifdef __KERNEL__
29143 #include <linux/string.h>
29144+#include <asm/pgtable_types.h>
29145 #else
29146 #include <string.h>
29147+#define ktla_ktva(addr) addr
29148 #endif
29149 #include <asm/inat.h>
29150 #include <asm/insn.h>
29151@@ -53,8 +55,8 @@
29152 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
29153 {
29154 memset(insn, 0, sizeof(*insn));
29155- insn->kaddr = kaddr;
29156- insn->next_byte = kaddr;
29157+ insn->kaddr = ktla_ktva(kaddr);
29158+ insn->next_byte = ktla_ktva(kaddr);
29159 insn->x86_64 = x86_64 ? 1 : 0;
29160 insn->opnd_bytes = 4;
29161 if (x86_64)
29162diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
29163index 05a95e7..326f2fa 100644
29164--- a/arch/x86/lib/iomap_copy_64.S
29165+++ b/arch/x86/lib/iomap_copy_64.S
29166@@ -17,6 +17,7 @@
29167
29168 #include <linux/linkage.h>
29169 #include <asm/dwarf2.h>
29170+#include <asm/alternative-asm.h>
29171
29172 /*
29173 * override generic version in lib/iomap_copy.c
29174@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
29175 CFI_STARTPROC
29176 movl %edx,%ecx
29177 rep movsd
29178+ pax_force_retaddr
29179 ret
29180 CFI_ENDPROC
29181 ENDPROC(__iowrite32_copy)
29182diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
29183index 56313a3..0db417e 100644
29184--- a/arch/x86/lib/memcpy_64.S
29185+++ b/arch/x86/lib/memcpy_64.S
29186@@ -24,7 +24,7 @@
29187 * This gets patched over the unrolled variant (below) via the
29188 * alternative instructions framework:
29189 */
29190- .section .altinstr_replacement, "ax", @progbits
29191+ .section .altinstr_replacement, "a", @progbits
29192 .Lmemcpy_c:
29193 movq %rdi, %rax
29194 movq %rdx, %rcx
29195@@ -33,6 +33,7 @@
29196 rep movsq
29197 movl %edx, %ecx
29198 rep movsb
29199+ pax_force_retaddr
29200 ret
29201 .Lmemcpy_e:
29202 .previous
29203@@ -44,11 +45,12 @@
29204 * This gets patched over the unrolled variant (below) via the
29205 * alternative instructions framework:
29206 */
29207- .section .altinstr_replacement, "ax", @progbits
29208+ .section .altinstr_replacement, "a", @progbits
29209 .Lmemcpy_c_e:
29210 movq %rdi, %rax
29211 movq %rdx, %rcx
29212 rep movsb
29213+ pax_force_retaddr
29214 ret
29215 .Lmemcpy_e_e:
29216 .previous
29217@@ -136,6 +138,7 @@ ENTRY(memcpy)
29218 movq %r9, 1*8(%rdi)
29219 movq %r10, -2*8(%rdi, %rdx)
29220 movq %r11, -1*8(%rdi, %rdx)
29221+ pax_force_retaddr
29222 retq
29223 .p2align 4
29224 .Lless_16bytes:
29225@@ -148,6 +151,7 @@ ENTRY(memcpy)
29226 movq -1*8(%rsi, %rdx), %r9
29227 movq %r8, 0*8(%rdi)
29228 movq %r9, -1*8(%rdi, %rdx)
29229+ pax_force_retaddr
29230 retq
29231 .p2align 4
29232 .Lless_8bytes:
29233@@ -161,6 +165,7 @@ ENTRY(memcpy)
29234 movl -4(%rsi, %rdx), %r8d
29235 movl %ecx, (%rdi)
29236 movl %r8d, -4(%rdi, %rdx)
29237+ pax_force_retaddr
29238 retq
29239 .p2align 4
29240 .Lless_3bytes:
29241@@ -179,6 +184,7 @@ ENTRY(memcpy)
29242 movb %cl, (%rdi)
29243
29244 .Lend:
29245+ pax_force_retaddr
29246 retq
29247 CFI_ENDPROC
29248 ENDPROC(memcpy)
29249diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
29250index 65268a6..dd1de11 100644
29251--- a/arch/x86/lib/memmove_64.S
29252+++ b/arch/x86/lib/memmove_64.S
29253@@ -202,14 +202,16 @@ ENTRY(memmove)
29254 movb (%rsi), %r11b
29255 movb %r11b, (%rdi)
29256 13:
29257+ pax_force_retaddr
29258 retq
29259 CFI_ENDPROC
29260
29261- .section .altinstr_replacement,"ax"
29262+ .section .altinstr_replacement,"a"
29263 .Lmemmove_begin_forward_efs:
29264 /* Forward moving data. */
29265 movq %rdx, %rcx
29266 rep movsb
29267+ pax_force_retaddr
29268 retq
29269 .Lmemmove_end_forward_efs:
29270 .previous
29271diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
29272index 2dcb380..2eb79fe 100644
29273--- a/arch/x86/lib/memset_64.S
29274+++ b/arch/x86/lib/memset_64.S
29275@@ -16,7 +16,7 @@
29276 *
29277 * rax original destination
29278 */
29279- .section .altinstr_replacement, "ax", @progbits
29280+ .section .altinstr_replacement, "a", @progbits
29281 .Lmemset_c:
29282 movq %rdi,%r9
29283 movq %rdx,%rcx
29284@@ -30,6 +30,7 @@
29285 movl %edx,%ecx
29286 rep stosb
29287 movq %r9,%rax
29288+ pax_force_retaddr
29289 ret
29290 .Lmemset_e:
29291 .previous
29292@@ -45,13 +46,14 @@
29293 *
29294 * rax original destination
29295 */
29296- .section .altinstr_replacement, "ax", @progbits
29297+ .section .altinstr_replacement, "a", @progbits
29298 .Lmemset_c_e:
29299 movq %rdi,%r9
29300 movb %sil,%al
29301 movq %rdx,%rcx
29302 rep stosb
29303 movq %r9,%rax
29304+ pax_force_retaddr
29305 ret
29306 .Lmemset_e_e:
29307 .previous
29308@@ -118,6 +120,7 @@ ENTRY(__memset)
29309
29310 .Lende:
29311 movq %r10,%rax
29312+ pax_force_retaddr
29313 ret
29314
29315 CFI_RESTORE_STATE
29316diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
29317index c9f2d9b..e7fd2c0 100644
29318--- a/arch/x86/lib/mmx_32.c
29319+++ b/arch/x86/lib/mmx_32.c
29320@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29321 {
29322 void *p;
29323 int i;
29324+ unsigned long cr0;
29325
29326 if (unlikely(in_interrupt()))
29327 return __memcpy(to, from, len);
29328@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29329 kernel_fpu_begin();
29330
29331 __asm__ __volatile__ (
29332- "1: prefetch (%0)\n" /* This set is 28 bytes */
29333- " prefetch 64(%0)\n"
29334- " prefetch 128(%0)\n"
29335- " prefetch 192(%0)\n"
29336- " prefetch 256(%0)\n"
29337+ "1: prefetch (%1)\n" /* This set is 28 bytes */
29338+ " prefetch 64(%1)\n"
29339+ " prefetch 128(%1)\n"
29340+ " prefetch 192(%1)\n"
29341+ " prefetch 256(%1)\n"
29342 "2: \n"
29343 ".section .fixup, \"ax\"\n"
29344- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29345+ "3: \n"
29346+
29347+#ifdef CONFIG_PAX_KERNEXEC
29348+ " movl %%cr0, %0\n"
29349+ " movl %0, %%eax\n"
29350+ " andl $0xFFFEFFFF, %%eax\n"
29351+ " movl %%eax, %%cr0\n"
29352+#endif
29353+
29354+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29355+
29356+#ifdef CONFIG_PAX_KERNEXEC
29357+ " movl %0, %%cr0\n"
29358+#endif
29359+
29360 " jmp 2b\n"
29361 ".previous\n"
29362 _ASM_EXTABLE(1b, 3b)
29363- : : "r" (from));
29364+ : "=&r" (cr0) : "r" (from) : "ax");
29365
29366 for ( ; i > 5; i--) {
29367 __asm__ __volatile__ (
29368- "1: prefetch 320(%0)\n"
29369- "2: movq (%0), %%mm0\n"
29370- " movq 8(%0), %%mm1\n"
29371- " movq 16(%0), %%mm2\n"
29372- " movq 24(%0), %%mm3\n"
29373- " movq %%mm0, (%1)\n"
29374- " movq %%mm1, 8(%1)\n"
29375- " movq %%mm2, 16(%1)\n"
29376- " movq %%mm3, 24(%1)\n"
29377- " movq 32(%0), %%mm0\n"
29378- " movq 40(%0), %%mm1\n"
29379- " movq 48(%0), %%mm2\n"
29380- " movq 56(%0), %%mm3\n"
29381- " movq %%mm0, 32(%1)\n"
29382- " movq %%mm1, 40(%1)\n"
29383- " movq %%mm2, 48(%1)\n"
29384- " movq %%mm3, 56(%1)\n"
29385+ "1: prefetch 320(%1)\n"
29386+ "2: movq (%1), %%mm0\n"
29387+ " movq 8(%1), %%mm1\n"
29388+ " movq 16(%1), %%mm2\n"
29389+ " movq 24(%1), %%mm3\n"
29390+ " movq %%mm0, (%2)\n"
29391+ " movq %%mm1, 8(%2)\n"
29392+ " movq %%mm2, 16(%2)\n"
29393+ " movq %%mm3, 24(%2)\n"
29394+ " movq 32(%1), %%mm0\n"
29395+ " movq 40(%1), %%mm1\n"
29396+ " movq 48(%1), %%mm2\n"
29397+ " movq 56(%1), %%mm3\n"
29398+ " movq %%mm0, 32(%2)\n"
29399+ " movq %%mm1, 40(%2)\n"
29400+ " movq %%mm2, 48(%2)\n"
29401+ " movq %%mm3, 56(%2)\n"
29402 ".section .fixup, \"ax\"\n"
29403- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29404+ "3:\n"
29405+
29406+#ifdef CONFIG_PAX_KERNEXEC
29407+ " movl %%cr0, %0\n"
29408+ " movl %0, %%eax\n"
29409+ " andl $0xFFFEFFFF, %%eax\n"
29410+ " movl %%eax, %%cr0\n"
29411+#endif
29412+
29413+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29414+
29415+#ifdef CONFIG_PAX_KERNEXEC
29416+ " movl %0, %%cr0\n"
29417+#endif
29418+
29419 " jmp 2b\n"
29420 ".previous\n"
29421 _ASM_EXTABLE(1b, 3b)
29422- : : "r" (from), "r" (to) : "memory");
29423+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29424
29425 from += 64;
29426 to += 64;
29427@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
29428 static void fast_copy_page(void *to, void *from)
29429 {
29430 int i;
29431+ unsigned long cr0;
29432
29433 kernel_fpu_begin();
29434
29435@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
29436 * but that is for later. -AV
29437 */
29438 __asm__ __volatile__(
29439- "1: prefetch (%0)\n"
29440- " prefetch 64(%0)\n"
29441- " prefetch 128(%0)\n"
29442- " prefetch 192(%0)\n"
29443- " prefetch 256(%0)\n"
29444+ "1: prefetch (%1)\n"
29445+ " prefetch 64(%1)\n"
29446+ " prefetch 128(%1)\n"
29447+ " prefetch 192(%1)\n"
29448+ " prefetch 256(%1)\n"
29449 "2: \n"
29450 ".section .fixup, \"ax\"\n"
29451- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29452+ "3: \n"
29453+
29454+#ifdef CONFIG_PAX_KERNEXEC
29455+ " movl %%cr0, %0\n"
29456+ " movl %0, %%eax\n"
29457+ " andl $0xFFFEFFFF, %%eax\n"
29458+ " movl %%eax, %%cr0\n"
29459+#endif
29460+
29461+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29462+
29463+#ifdef CONFIG_PAX_KERNEXEC
29464+ " movl %0, %%cr0\n"
29465+#endif
29466+
29467 " jmp 2b\n"
29468 ".previous\n"
29469- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29470+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29471
29472 for (i = 0; i < (4096-320)/64; i++) {
29473 __asm__ __volatile__ (
29474- "1: prefetch 320(%0)\n"
29475- "2: movq (%0), %%mm0\n"
29476- " movntq %%mm0, (%1)\n"
29477- " movq 8(%0), %%mm1\n"
29478- " movntq %%mm1, 8(%1)\n"
29479- " movq 16(%0), %%mm2\n"
29480- " movntq %%mm2, 16(%1)\n"
29481- " movq 24(%0), %%mm3\n"
29482- " movntq %%mm3, 24(%1)\n"
29483- " movq 32(%0), %%mm4\n"
29484- " movntq %%mm4, 32(%1)\n"
29485- " movq 40(%0), %%mm5\n"
29486- " movntq %%mm5, 40(%1)\n"
29487- " movq 48(%0), %%mm6\n"
29488- " movntq %%mm6, 48(%1)\n"
29489- " movq 56(%0), %%mm7\n"
29490- " movntq %%mm7, 56(%1)\n"
29491+ "1: prefetch 320(%1)\n"
29492+ "2: movq (%1), %%mm0\n"
29493+ " movntq %%mm0, (%2)\n"
29494+ " movq 8(%1), %%mm1\n"
29495+ " movntq %%mm1, 8(%2)\n"
29496+ " movq 16(%1), %%mm2\n"
29497+ " movntq %%mm2, 16(%2)\n"
29498+ " movq 24(%1), %%mm3\n"
29499+ " movntq %%mm3, 24(%2)\n"
29500+ " movq 32(%1), %%mm4\n"
29501+ " movntq %%mm4, 32(%2)\n"
29502+ " movq 40(%1), %%mm5\n"
29503+ " movntq %%mm5, 40(%2)\n"
29504+ " movq 48(%1), %%mm6\n"
29505+ " movntq %%mm6, 48(%2)\n"
29506+ " movq 56(%1), %%mm7\n"
29507+ " movntq %%mm7, 56(%2)\n"
29508 ".section .fixup, \"ax\"\n"
29509- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29510+ "3:\n"
29511+
29512+#ifdef CONFIG_PAX_KERNEXEC
29513+ " movl %%cr0, %0\n"
29514+ " movl %0, %%eax\n"
29515+ " andl $0xFFFEFFFF, %%eax\n"
29516+ " movl %%eax, %%cr0\n"
29517+#endif
29518+
29519+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29520+
29521+#ifdef CONFIG_PAX_KERNEXEC
29522+ " movl %0, %%cr0\n"
29523+#endif
29524+
29525 " jmp 2b\n"
29526 ".previous\n"
29527- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
29528+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29529
29530 from += 64;
29531 to += 64;
29532@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
29533 static void fast_copy_page(void *to, void *from)
29534 {
29535 int i;
29536+ unsigned long cr0;
29537
29538 kernel_fpu_begin();
29539
29540 __asm__ __volatile__ (
29541- "1: prefetch (%0)\n"
29542- " prefetch 64(%0)\n"
29543- " prefetch 128(%0)\n"
29544- " prefetch 192(%0)\n"
29545- " prefetch 256(%0)\n"
29546+ "1: prefetch (%1)\n"
29547+ " prefetch 64(%1)\n"
29548+ " prefetch 128(%1)\n"
29549+ " prefetch 192(%1)\n"
29550+ " prefetch 256(%1)\n"
29551 "2: \n"
29552 ".section .fixup, \"ax\"\n"
29553- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29554+ "3: \n"
29555+
29556+#ifdef CONFIG_PAX_KERNEXEC
29557+ " movl %%cr0, %0\n"
29558+ " movl %0, %%eax\n"
29559+ " andl $0xFFFEFFFF, %%eax\n"
29560+ " movl %%eax, %%cr0\n"
29561+#endif
29562+
29563+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29564+
29565+#ifdef CONFIG_PAX_KERNEXEC
29566+ " movl %0, %%cr0\n"
29567+#endif
29568+
29569 " jmp 2b\n"
29570 ".previous\n"
29571- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29572+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29573
29574 for (i = 0; i < 4096/64; i++) {
29575 __asm__ __volatile__ (
29576- "1: prefetch 320(%0)\n"
29577- "2: movq (%0), %%mm0\n"
29578- " movq 8(%0), %%mm1\n"
29579- " movq 16(%0), %%mm2\n"
29580- " movq 24(%0), %%mm3\n"
29581- " movq %%mm0, (%1)\n"
29582- " movq %%mm1, 8(%1)\n"
29583- " movq %%mm2, 16(%1)\n"
29584- " movq %%mm3, 24(%1)\n"
29585- " movq 32(%0), %%mm0\n"
29586- " movq 40(%0), %%mm1\n"
29587- " movq 48(%0), %%mm2\n"
29588- " movq 56(%0), %%mm3\n"
29589- " movq %%mm0, 32(%1)\n"
29590- " movq %%mm1, 40(%1)\n"
29591- " movq %%mm2, 48(%1)\n"
29592- " movq %%mm3, 56(%1)\n"
29593+ "1: prefetch 320(%1)\n"
29594+ "2: movq (%1), %%mm0\n"
29595+ " movq 8(%1), %%mm1\n"
29596+ " movq 16(%1), %%mm2\n"
29597+ " movq 24(%1), %%mm3\n"
29598+ " movq %%mm0, (%2)\n"
29599+ " movq %%mm1, 8(%2)\n"
29600+ " movq %%mm2, 16(%2)\n"
29601+ " movq %%mm3, 24(%2)\n"
29602+ " movq 32(%1), %%mm0\n"
29603+ " movq 40(%1), %%mm1\n"
29604+ " movq 48(%1), %%mm2\n"
29605+ " movq 56(%1), %%mm3\n"
29606+ " movq %%mm0, 32(%2)\n"
29607+ " movq %%mm1, 40(%2)\n"
29608+ " movq %%mm2, 48(%2)\n"
29609+ " movq %%mm3, 56(%2)\n"
29610 ".section .fixup, \"ax\"\n"
29611- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29612+ "3:\n"
29613+
29614+#ifdef CONFIG_PAX_KERNEXEC
29615+ " movl %%cr0, %0\n"
29616+ " movl %0, %%eax\n"
29617+ " andl $0xFFFEFFFF, %%eax\n"
29618+ " movl %%eax, %%cr0\n"
29619+#endif
29620+
29621+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29622+
29623+#ifdef CONFIG_PAX_KERNEXEC
29624+ " movl %0, %%cr0\n"
29625+#endif
29626+
29627 " jmp 2b\n"
29628 ".previous\n"
29629 _ASM_EXTABLE(1b, 3b)
29630- : : "r" (from), "r" (to) : "memory");
29631+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29632
29633 from += 64;
29634 to += 64;
29635diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
29636index f6d13ee..d789440 100644
29637--- a/arch/x86/lib/msr-reg.S
29638+++ b/arch/x86/lib/msr-reg.S
29639@@ -3,6 +3,7 @@
29640 #include <asm/dwarf2.h>
29641 #include <asm/asm.h>
29642 #include <asm/msr.h>
29643+#include <asm/alternative-asm.h>
29644
29645 #ifdef CONFIG_X86_64
29646 /*
29647@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
29648 movl %edi, 28(%r10)
29649 popq_cfi %rbp
29650 popq_cfi %rbx
29651+ pax_force_retaddr
29652 ret
29653 3:
29654 CFI_RESTORE_STATE
29655diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
29656index fc6ba17..d4d989d 100644
29657--- a/arch/x86/lib/putuser.S
29658+++ b/arch/x86/lib/putuser.S
29659@@ -16,7 +16,9 @@
29660 #include <asm/errno.h>
29661 #include <asm/asm.h>
29662 #include <asm/smap.h>
29663-
29664+#include <asm/segment.h>
29665+#include <asm/pgtable.h>
29666+#include <asm/alternative-asm.h>
29667
29668 /*
29669 * __put_user_X
29670@@ -30,57 +32,125 @@
29671 * as they get called from within inline assembly.
29672 */
29673
29674-#define ENTER CFI_STARTPROC ; \
29675- GET_THREAD_INFO(%_ASM_BX)
29676-#define EXIT ASM_CLAC ; \
29677- ret ; \
29678+#define ENTER CFI_STARTPROC
29679+#define EXIT ASM_CLAC ; \
29680+ pax_force_retaddr ; \
29681+ ret ; \
29682 CFI_ENDPROC
29683
29684+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29685+#define _DEST %_ASM_CX,%_ASM_BX
29686+#else
29687+#define _DEST %_ASM_CX
29688+#endif
29689+
29690+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29691+#define __copyuser_seg gs;
29692+#else
29693+#define __copyuser_seg
29694+#endif
29695+
29696 .text
29697 ENTRY(__put_user_1)
29698 ENTER
29699+
29700+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29701+ GET_THREAD_INFO(%_ASM_BX)
29702 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
29703 jae bad_put_user
29704 ASM_STAC
29705-1: movb %al,(%_ASM_CX)
29706+
29707+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29708+ mov pax_user_shadow_base,%_ASM_BX
29709+ cmp %_ASM_BX,%_ASM_CX
29710+ jb 1234f
29711+ xor %ebx,%ebx
29712+1234:
29713+#endif
29714+
29715+#endif
29716+
29717+1: __copyuser_seg movb %al,(_DEST)
29718 xor %eax,%eax
29719 EXIT
29720 ENDPROC(__put_user_1)
29721
29722 ENTRY(__put_user_2)
29723 ENTER
29724+
29725+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29726+ GET_THREAD_INFO(%_ASM_BX)
29727 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29728 sub $1,%_ASM_BX
29729 cmp %_ASM_BX,%_ASM_CX
29730 jae bad_put_user
29731 ASM_STAC
29732-2: movw %ax,(%_ASM_CX)
29733+
29734+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29735+ mov pax_user_shadow_base,%_ASM_BX
29736+ cmp %_ASM_BX,%_ASM_CX
29737+ jb 1234f
29738+ xor %ebx,%ebx
29739+1234:
29740+#endif
29741+
29742+#endif
29743+
29744+2: __copyuser_seg movw %ax,(_DEST)
29745 xor %eax,%eax
29746 EXIT
29747 ENDPROC(__put_user_2)
29748
29749 ENTRY(__put_user_4)
29750 ENTER
29751+
29752+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29753+ GET_THREAD_INFO(%_ASM_BX)
29754 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29755 sub $3,%_ASM_BX
29756 cmp %_ASM_BX,%_ASM_CX
29757 jae bad_put_user
29758 ASM_STAC
29759-3: movl %eax,(%_ASM_CX)
29760+
29761+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29762+ mov pax_user_shadow_base,%_ASM_BX
29763+ cmp %_ASM_BX,%_ASM_CX
29764+ jb 1234f
29765+ xor %ebx,%ebx
29766+1234:
29767+#endif
29768+
29769+#endif
29770+
29771+3: __copyuser_seg movl %eax,(_DEST)
29772 xor %eax,%eax
29773 EXIT
29774 ENDPROC(__put_user_4)
29775
29776 ENTRY(__put_user_8)
29777 ENTER
29778+
29779+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29780+ GET_THREAD_INFO(%_ASM_BX)
29781 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29782 sub $7,%_ASM_BX
29783 cmp %_ASM_BX,%_ASM_CX
29784 jae bad_put_user
29785 ASM_STAC
29786-4: mov %_ASM_AX,(%_ASM_CX)
29787+
29788+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29789+ mov pax_user_shadow_base,%_ASM_BX
29790+ cmp %_ASM_BX,%_ASM_CX
29791+ jb 1234f
29792+ xor %ebx,%ebx
29793+1234:
29794+#endif
29795+
29796+#endif
29797+
29798+4: __copyuser_seg mov %_ASM_AX,(_DEST)
29799 #ifdef CONFIG_X86_32
29800-5: movl %edx,4(%_ASM_CX)
29801+5: __copyuser_seg movl %edx,4(_DEST)
29802 #endif
29803 xor %eax,%eax
29804 EXIT
29805diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
29806index 1cad221..de671ee 100644
29807--- a/arch/x86/lib/rwlock.S
29808+++ b/arch/x86/lib/rwlock.S
29809@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29810 FRAME
29811 0: LOCK_PREFIX
29812 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29813+
29814+#ifdef CONFIG_PAX_REFCOUNT
29815+ jno 1234f
29816+ LOCK_PREFIX
29817+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29818+ int $4
29819+1234:
29820+ _ASM_EXTABLE(1234b, 1234b)
29821+#endif
29822+
29823 1: rep; nop
29824 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29825 jne 1b
29826 LOCK_PREFIX
29827 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29828+
29829+#ifdef CONFIG_PAX_REFCOUNT
29830+ jno 1234f
29831+ LOCK_PREFIX
29832+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29833+ int $4
29834+1234:
29835+ _ASM_EXTABLE(1234b, 1234b)
29836+#endif
29837+
29838 jnz 0b
29839 ENDFRAME
29840+ pax_force_retaddr
29841 ret
29842 CFI_ENDPROC
29843 END(__write_lock_failed)
29844@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29845 FRAME
29846 0: LOCK_PREFIX
29847 READ_LOCK_SIZE(inc) (%__lock_ptr)
29848+
29849+#ifdef CONFIG_PAX_REFCOUNT
29850+ jno 1234f
29851+ LOCK_PREFIX
29852+ READ_LOCK_SIZE(dec) (%__lock_ptr)
29853+ int $4
29854+1234:
29855+ _ASM_EXTABLE(1234b, 1234b)
29856+#endif
29857+
29858 1: rep; nop
29859 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29860 js 1b
29861 LOCK_PREFIX
29862 READ_LOCK_SIZE(dec) (%__lock_ptr)
29863+
29864+#ifdef CONFIG_PAX_REFCOUNT
29865+ jno 1234f
29866+ LOCK_PREFIX
29867+ READ_LOCK_SIZE(inc) (%__lock_ptr)
29868+ int $4
29869+1234:
29870+ _ASM_EXTABLE(1234b, 1234b)
29871+#endif
29872+
29873 js 0b
29874 ENDFRAME
29875+ pax_force_retaddr
29876 ret
29877 CFI_ENDPROC
29878 END(__read_lock_failed)
29879diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29880index 5dff5f0..cadebf4 100644
29881--- a/arch/x86/lib/rwsem.S
29882+++ b/arch/x86/lib/rwsem.S
29883@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29884 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29885 CFI_RESTORE __ASM_REG(dx)
29886 restore_common_regs
29887+ pax_force_retaddr
29888 ret
29889 CFI_ENDPROC
29890 ENDPROC(call_rwsem_down_read_failed)
29891@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29892 movq %rax,%rdi
29893 call rwsem_down_write_failed
29894 restore_common_regs
29895+ pax_force_retaddr
29896 ret
29897 CFI_ENDPROC
29898 ENDPROC(call_rwsem_down_write_failed)
29899@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29900 movq %rax,%rdi
29901 call rwsem_wake
29902 restore_common_regs
29903-1: ret
29904+1: pax_force_retaddr
29905+ ret
29906 CFI_ENDPROC
29907 ENDPROC(call_rwsem_wake)
29908
29909@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29910 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29911 CFI_RESTORE __ASM_REG(dx)
29912 restore_common_regs
29913+ pax_force_retaddr
29914 ret
29915 CFI_ENDPROC
29916 ENDPROC(call_rwsem_downgrade_wake)
29917diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29918index a63efd6..8149fbe 100644
29919--- a/arch/x86/lib/thunk_64.S
29920+++ b/arch/x86/lib/thunk_64.S
29921@@ -8,6 +8,7 @@
29922 #include <linux/linkage.h>
29923 #include <asm/dwarf2.h>
29924 #include <asm/calling.h>
29925+#include <asm/alternative-asm.h>
29926
29927 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29928 .macro THUNK name, func, put_ret_addr_in_rdi=0
29929@@ -15,11 +16,11 @@
29930 \name:
29931 CFI_STARTPROC
29932
29933- /* this one pushes 9 elems, the next one would be %rIP */
29934- SAVE_ARGS
29935+ /* this one pushes 15+1 elems, the next one would be %rIP */
29936+ SAVE_ARGS 8
29937
29938 .if \put_ret_addr_in_rdi
29939- movq_cfi_restore 9*8, rdi
29940+ movq_cfi_restore RIP, rdi
29941 .endif
29942
29943 call \func
29944@@ -38,8 +39,9 @@
29945
29946 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
29947 CFI_STARTPROC
29948- SAVE_ARGS
29949+ SAVE_ARGS 8
29950 restore:
29951- RESTORE_ARGS
29952+ RESTORE_ARGS 1,8
29953+ pax_force_retaddr
29954 ret
29955 CFI_ENDPROC
29956diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29957index e2f5e21..4b22130 100644
29958--- a/arch/x86/lib/usercopy_32.c
29959+++ b/arch/x86/lib/usercopy_32.c
29960@@ -42,11 +42,13 @@ do { \
29961 int __d0; \
29962 might_fault(); \
29963 __asm__ __volatile__( \
29964+ __COPYUSER_SET_ES \
29965 ASM_STAC "\n" \
29966 "0: rep; stosl\n" \
29967 " movl %2,%0\n" \
29968 "1: rep; stosb\n" \
29969 "2: " ASM_CLAC "\n" \
29970+ __COPYUSER_RESTORE_ES \
29971 ".section .fixup,\"ax\"\n" \
29972 "3: lea 0(%2,%0,4),%0\n" \
29973 " jmp 2b\n" \
29974@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29975
29976 #ifdef CONFIG_X86_INTEL_USERCOPY
29977 static unsigned long
29978-__copy_user_intel(void __user *to, const void *from, unsigned long size)
29979+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
29980 {
29981 int d0, d1;
29982 __asm__ __volatile__(
29983@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29984 " .align 2,0x90\n"
29985 "3: movl 0(%4), %%eax\n"
29986 "4: movl 4(%4), %%edx\n"
29987- "5: movl %%eax, 0(%3)\n"
29988- "6: movl %%edx, 4(%3)\n"
29989+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
29990+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
29991 "7: movl 8(%4), %%eax\n"
29992 "8: movl 12(%4),%%edx\n"
29993- "9: movl %%eax, 8(%3)\n"
29994- "10: movl %%edx, 12(%3)\n"
29995+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
29996+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
29997 "11: movl 16(%4), %%eax\n"
29998 "12: movl 20(%4), %%edx\n"
29999- "13: movl %%eax, 16(%3)\n"
30000- "14: movl %%edx, 20(%3)\n"
30001+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
30002+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
30003 "15: movl 24(%4), %%eax\n"
30004 "16: movl 28(%4), %%edx\n"
30005- "17: movl %%eax, 24(%3)\n"
30006- "18: movl %%edx, 28(%3)\n"
30007+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
30008+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
30009 "19: movl 32(%4), %%eax\n"
30010 "20: movl 36(%4), %%edx\n"
30011- "21: movl %%eax, 32(%3)\n"
30012- "22: movl %%edx, 36(%3)\n"
30013+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
30014+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
30015 "23: movl 40(%4), %%eax\n"
30016 "24: movl 44(%4), %%edx\n"
30017- "25: movl %%eax, 40(%3)\n"
30018- "26: movl %%edx, 44(%3)\n"
30019+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
30020+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
30021 "27: movl 48(%4), %%eax\n"
30022 "28: movl 52(%4), %%edx\n"
30023- "29: movl %%eax, 48(%3)\n"
30024- "30: movl %%edx, 52(%3)\n"
30025+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
30026+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
30027 "31: movl 56(%4), %%eax\n"
30028 "32: movl 60(%4), %%edx\n"
30029- "33: movl %%eax, 56(%3)\n"
30030- "34: movl %%edx, 60(%3)\n"
30031+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
30032+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
30033 " addl $-64, %0\n"
30034 " addl $64, %4\n"
30035 " addl $64, %3\n"
30036@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30037 " shrl $2, %0\n"
30038 " andl $3, %%eax\n"
30039 " cld\n"
30040+ __COPYUSER_SET_ES
30041 "99: rep; movsl\n"
30042 "36: movl %%eax, %0\n"
30043 "37: rep; movsb\n"
30044 "100:\n"
30045+ __COPYUSER_RESTORE_ES
30046+ ".section .fixup,\"ax\"\n"
30047+ "101: lea 0(%%eax,%0,4),%0\n"
30048+ " jmp 100b\n"
30049+ ".previous\n"
30050+ _ASM_EXTABLE(1b,100b)
30051+ _ASM_EXTABLE(2b,100b)
30052+ _ASM_EXTABLE(3b,100b)
30053+ _ASM_EXTABLE(4b,100b)
30054+ _ASM_EXTABLE(5b,100b)
30055+ _ASM_EXTABLE(6b,100b)
30056+ _ASM_EXTABLE(7b,100b)
30057+ _ASM_EXTABLE(8b,100b)
30058+ _ASM_EXTABLE(9b,100b)
30059+ _ASM_EXTABLE(10b,100b)
30060+ _ASM_EXTABLE(11b,100b)
30061+ _ASM_EXTABLE(12b,100b)
30062+ _ASM_EXTABLE(13b,100b)
30063+ _ASM_EXTABLE(14b,100b)
30064+ _ASM_EXTABLE(15b,100b)
30065+ _ASM_EXTABLE(16b,100b)
30066+ _ASM_EXTABLE(17b,100b)
30067+ _ASM_EXTABLE(18b,100b)
30068+ _ASM_EXTABLE(19b,100b)
30069+ _ASM_EXTABLE(20b,100b)
30070+ _ASM_EXTABLE(21b,100b)
30071+ _ASM_EXTABLE(22b,100b)
30072+ _ASM_EXTABLE(23b,100b)
30073+ _ASM_EXTABLE(24b,100b)
30074+ _ASM_EXTABLE(25b,100b)
30075+ _ASM_EXTABLE(26b,100b)
30076+ _ASM_EXTABLE(27b,100b)
30077+ _ASM_EXTABLE(28b,100b)
30078+ _ASM_EXTABLE(29b,100b)
30079+ _ASM_EXTABLE(30b,100b)
30080+ _ASM_EXTABLE(31b,100b)
30081+ _ASM_EXTABLE(32b,100b)
30082+ _ASM_EXTABLE(33b,100b)
30083+ _ASM_EXTABLE(34b,100b)
30084+ _ASM_EXTABLE(35b,100b)
30085+ _ASM_EXTABLE(36b,100b)
30086+ _ASM_EXTABLE(37b,100b)
30087+ _ASM_EXTABLE(99b,101b)
30088+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
30089+ : "1"(to), "2"(from), "0"(size)
30090+ : "eax", "edx", "memory");
30091+ return size;
30092+}
30093+
30094+static unsigned long
30095+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
30096+{
30097+ int d0, d1;
30098+ __asm__ __volatile__(
30099+ " .align 2,0x90\n"
30100+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
30101+ " cmpl $67, %0\n"
30102+ " jbe 3f\n"
30103+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
30104+ " .align 2,0x90\n"
30105+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
30106+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
30107+ "5: movl %%eax, 0(%3)\n"
30108+ "6: movl %%edx, 4(%3)\n"
30109+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
30110+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
30111+ "9: movl %%eax, 8(%3)\n"
30112+ "10: movl %%edx, 12(%3)\n"
30113+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
30114+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
30115+ "13: movl %%eax, 16(%3)\n"
30116+ "14: movl %%edx, 20(%3)\n"
30117+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
30118+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
30119+ "17: movl %%eax, 24(%3)\n"
30120+ "18: movl %%edx, 28(%3)\n"
30121+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
30122+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
30123+ "21: movl %%eax, 32(%3)\n"
30124+ "22: movl %%edx, 36(%3)\n"
30125+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
30126+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
30127+ "25: movl %%eax, 40(%3)\n"
30128+ "26: movl %%edx, 44(%3)\n"
30129+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
30130+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
30131+ "29: movl %%eax, 48(%3)\n"
30132+ "30: movl %%edx, 52(%3)\n"
30133+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
30134+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
30135+ "33: movl %%eax, 56(%3)\n"
30136+ "34: movl %%edx, 60(%3)\n"
30137+ " addl $-64, %0\n"
30138+ " addl $64, %4\n"
30139+ " addl $64, %3\n"
30140+ " cmpl $63, %0\n"
30141+ " ja 1b\n"
30142+ "35: movl %0, %%eax\n"
30143+ " shrl $2, %0\n"
30144+ " andl $3, %%eax\n"
30145+ " cld\n"
30146+ "99: rep; "__copyuser_seg" movsl\n"
30147+ "36: movl %%eax, %0\n"
30148+ "37: rep; "__copyuser_seg" movsb\n"
30149+ "100:\n"
30150 ".section .fixup,\"ax\"\n"
30151 "101: lea 0(%%eax,%0,4),%0\n"
30152 " jmp 100b\n"
30153@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30154 int d0, d1;
30155 __asm__ __volatile__(
30156 " .align 2,0x90\n"
30157- "0: movl 32(%4), %%eax\n"
30158+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30159 " cmpl $67, %0\n"
30160 " jbe 2f\n"
30161- "1: movl 64(%4), %%eax\n"
30162+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30163 " .align 2,0x90\n"
30164- "2: movl 0(%4), %%eax\n"
30165- "21: movl 4(%4), %%edx\n"
30166+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30167+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30168 " movl %%eax, 0(%3)\n"
30169 " movl %%edx, 4(%3)\n"
30170- "3: movl 8(%4), %%eax\n"
30171- "31: movl 12(%4),%%edx\n"
30172+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30173+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30174 " movl %%eax, 8(%3)\n"
30175 " movl %%edx, 12(%3)\n"
30176- "4: movl 16(%4), %%eax\n"
30177- "41: movl 20(%4), %%edx\n"
30178+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30179+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30180 " movl %%eax, 16(%3)\n"
30181 " movl %%edx, 20(%3)\n"
30182- "10: movl 24(%4), %%eax\n"
30183- "51: movl 28(%4), %%edx\n"
30184+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30185+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30186 " movl %%eax, 24(%3)\n"
30187 " movl %%edx, 28(%3)\n"
30188- "11: movl 32(%4), %%eax\n"
30189- "61: movl 36(%4), %%edx\n"
30190+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30191+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30192 " movl %%eax, 32(%3)\n"
30193 " movl %%edx, 36(%3)\n"
30194- "12: movl 40(%4), %%eax\n"
30195- "71: movl 44(%4), %%edx\n"
30196+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30197+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30198 " movl %%eax, 40(%3)\n"
30199 " movl %%edx, 44(%3)\n"
30200- "13: movl 48(%4), %%eax\n"
30201- "81: movl 52(%4), %%edx\n"
30202+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30203+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30204 " movl %%eax, 48(%3)\n"
30205 " movl %%edx, 52(%3)\n"
30206- "14: movl 56(%4), %%eax\n"
30207- "91: movl 60(%4), %%edx\n"
30208+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30209+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30210 " movl %%eax, 56(%3)\n"
30211 " movl %%edx, 60(%3)\n"
30212 " addl $-64, %0\n"
30213@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30214 " shrl $2, %0\n"
30215 " andl $3, %%eax\n"
30216 " cld\n"
30217- "6: rep; movsl\n"
30218+ "6: rep; "__copyuser_seg" movsl\n"
30219 " movl %%eax,%0\n"
30220- "7: rep; movsb\n"
30221+ "7: rep; "__copyuser_seg" movsb\n"
30222 "8:\n"
30223 ".section .fixup,\"ax\"\n"
30224 "9: lea 0(%%eax,%0,4),%0\n"
30225@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30226
30227 __asm__ __volatile__(
30228 " .align 2,0x90\n"
30229- "0: movl 32(%4), %%eax\n"
30230+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30231 " cmpl $67, %0\n"
30232 " jbe 2f\n"
30233- "1: movl 64(%4), %%eax\n"
30234+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30235 " .align 2,0x90\n"
30236- "2: movl 0(%4), %%eax\n"
30237- "21: movl 4(%4), %%edx\n"
30238+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30239+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30240 " movnti %%eax, 0(%3)\n"
30241 " movnti %%edx, 4(%3)\n"
30242- "3: movl 8(%4), %%eax\n"
30243- "31: movl 12(%4),%%edx\n"
30244+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30245+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30246 " movnti %%eax, 8(%3)\n"
30247 " movnti %%edx, 12(%3)\n"
30248- "4: movl 16(%4), %%eax\n"
30249- "41: movl 20(%4), %%edx\n"
30250+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30251+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30252 " movnti %%eax, 16(%3)\n"
30253 " movnti %%edx, 20(%3)\n"
30254- "10: movl 24(%4), %%eax\n"
30255- "51: movl 28(%4), %%edx\n"
30256+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30257+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30258 " movnti %%eax, 24(%3)\n"
30259 " movnti %%edx, 28(%3)\n"
30260- "11: movl 32(%4), %%eax\n"
30261- "61: movl 36(%4), %%edx\n"
30262+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30263+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30264 " movnti %%eax, 32(%3)\n"
30265 " movnti %%edx, 36(%3)\n"
30266- "12: movl 40(%4), %%eax\n"
30267- "71: movl 44(%4), %%edx\n"
30268+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30269+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30270 " movnti %%eax, 40(%3)\n"
30271 " movnti %%edx, 44(%3)\n"
30272- "13: movl 48(%4), %%eax\n"
30273- "81: movl 52(%4), %%edx\n"
30274+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30275+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30276 " movnti %%eax, 48(%3)\n"
30277 " movnti %%edx, 52(%3)\n"
30278- "14: movl 56(%4), %%eax\n"
30279- "91: movl 60(%4), %%edx\n"
30280+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30281+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30282 " movnti %%eax, 56(%3)\n"
30283 " movnti %%edx, 60(%3)\n"
30284 " addl $-64, %0\n"
30285@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30286 " shrl $2, %0\n"
30287 " andl $3, %%eax\n"
30288 " cld\n"
30289- "6: rep; movsl\n"
30290+ "6: rep; "__copyuser_seg" movsl\n"
30291 " movl %%eax,%0\n"
30292- "7: rep; movsb\n"
30293+ "7: rep; "__copyuser_seg" movsb\n"
30294 "8:\n"
30295 ".section .fixup,\"ax\"\n"
30296 "9: lea 0(%%eax,%0,4),%0\n"
30297@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
30298
30299 __asm__ __volatile__(
30300 " .align 2,0x90\n"
30301- "0: movl 32(%4), %%eax\n"
30302+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30303 " cmpl $67, %0\n"
30304 " jbe 2f\n"
30305- "1: movl 64(%4), %%eax\n"
30306+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30307 " .align 2,0x90\n"
30308- "2: movl 0(%4), %%eax\n"
30309- "21: movl 4(%4), %%edx\n"
30310+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30311+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30312 " movnti %%eax, 0(%3)\n"
30313 " movnti %%edx, 4(%3)\n"
30314- "3: movl 8(%4), %%eax\n"
30315- "31: movl 12(%4),%%edx\n"
30316+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30317+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30318 " movnti %%eax, 8(%3)\n"
30319 " movnti %%edx, 12(%3)\n"
30320- "4: movl 16(%4), %%eax\n"
30321- "41: movl 20(%4), %%edx\n"
30322+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30323+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30324 " movnti %%eax, 16(%3)\n"
30325 " movnti %%edx, 20(%3)\n"
30326- "10: movl 24(%4), %%eax\n"
30327- "51: movl 28(%4), %%edx\n"
30328+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30329+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30330 " movnti %%eax, 24(%3)\n"
30331 " movnti %%edx, 28(%3)\n"
30332- "11: movl 32(%4), %%eax\n"
30333- "61: movl 36(%4), %%edx\n"
30334+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30335+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30336 " movnti %%eax, 32(%3)\n"
30337 " movnti %%edx, 36(%3)\n"
30338- "12: movl 40(%4), %%eax\n"
30339- "71: movl 44(%4), %%edx\n"
30340+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30341+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30342 " movnti %%eax, 40(%3)\n"
30343 " movnti %%edx, 44(%3)\n"
30344- "13: movl 48(%4), %%eax\n"
30345- "81: movl 52(%4), %%edx\n"
30346+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30347+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30348 " movnti %%eax, 48(%3)\n"
30349 " movnti %%edx, 52(%3)\n"
30350- "14: movl 56(%4), %%eax\n"
30351- "91: movl 60(%4), %%edx\n"
30352+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30353+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30354 " movnti %%eax, 56(%3)\n"
30355 " movnti %%edx, 60(%3)\n"
30356 " addl $-64, %0\n"
30357@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
30358 " shrl $2, %0\n"
30359 " andl $3, %%eax\n"
30360 " cld\n"
30361- "6: rep; movsl\n"
30362+ "6: rep; "__copyuser_seg" movsl\n"
30363 " movl %%eax,%0\n"
30364- "7: rep; movsb\n"
30365+ "7: rep; "__copyuser_seg" movsb\n"
30366 "8:\n"
30367 ".section .fixup,\"ax\"\n"
30368 "9: lea 0(%%eax,%0,4),%0\n"
30369@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
30370 */
30371 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
30372 unsigned long size);
30373-unsigned long __copy_user_intel(void __user *to, const void *from,
30374+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
30375+ unsigned long size);
30376+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
30377 unsigned long size);
30378 unsigned long __copy_user_zeroing_intel_nocache(void *to,
30379 const void __user *from, unsigned long size);
30380 #endif /* CONFIG_X86_INTEL_USERCOPY */
30381
30382 /* Generic arbitrary sized copy. */
30383-#define __copy_user(to, from, size) \
30384+#define __copy_user(to, from, size, prefix, set, restore) \
30385 do { \
30386 int __d0, __d1, __d2; \
30387 __asm__ __volatile__( \
30388+ set \
30389 " cmp $7,%0\n" \
30390 " jbe 1f\n" \
30391 " movl %1,%0\n" \
30392 " negl %0\n" \
30393 " andl $7,%0\n" \
30394 " subl %0,%3\n" \
30395- "4: rep; movsb\n" \
30396+ "4: rep; "prefix"movsb\n" \
30397 " movl %3,%0\n" \
30398 " shrl $2,%0\n" \
30399 " andl $3,%3\n" \
30400 " .align 2,0x90\n" \
30401- "0: rep; movsl\n" \
30402+ "0: rep; "prefix"movsl\n" \
30403 " movl %3,%0\n" \
30404- "1: rep; movsb\n" \
30405+ "1: rep; "prefix"movsb\n" \
30406 "2:\n" \
30407+ restore \
30408 ".section .fixup,\"ax\"\n" \
30409 "5: addl %3,%0\n" \
30410 " jmp 2b\n" \
30411@@ -538,14 +650,14 @@ do { \
30412 " negl %0\n" \
30413 " andl $7,%0\n" \
30414 " subl %0,%3\n" \
30415- "4: rep; movsb\n" \
30416+ "4: rep; "__copyuser_seg"movsb\n" \
30417 " movl %3,%0\n" \
30418 " shrl $2,%0\n" \
30419 " andl $3,%3\n" \
30420 " .align 2,0x90\n" \
30421- "0: rep; movsl\n" \
30422+ "0: rep; "__copyuser_seg"movsl\n" \
30423 " movl %3,%0\n" \
30424- "1: rep; movsb\n" \
30425+ "1: rep; "__copyuser_seg"movsb\n" \
30426 "2:\n" \
30427 ".section .fixup,\"ax\"\n" \
30428 "5: addl %3,%0\n" \
30429@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
30430 {
30431 stac();
30432 if (movsl_is_ok(to, from, n))
30433- __copy_user(to, from, n);
30434+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
30435 else
30436- n = __copy_user_intel(to, from, n);
30437+ n = __generic_copy_to_user_intel(to, from, n);
30438 clac();
30439 return n;
30440 }
30441@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
30442 {
30443 stac();
30444 if (movsl_is_ok(to, from, n))
30445- __copy_user(to, from, n);
30446+ __copy_user(to, from, n, __copyuser_seg, "", "");
30447 else
30448- n = __copy_user_intel((void __user *)to,
30449- (const void *)from, n);
30450+ n = __generic_copy_from_user_intel(to, from, n);
30451 clac();
30452 return n;
30453 }
30454@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
30455 if (n > 64 && cpu_has_xmm2)
30456 n = __copy_user_intel_nocache(to, from, n);
30457 else
30458- __copy_user(to, from, n);
30459+ __copy_user(to, from, n, __copyuser_seg, "", "");
30460 #else
30461- __copy_user(to, from, n);
30462+ __copy_user(to, from, n, __copyuser_seg, "", "");
30463 #endif
30464 clac();
30465 return n;
30466 }
30467 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
30468
30469-/**
30470- * copy_to_user: - Copy a block of data into user space.
30471- * @to: Destination address, in user space.
30472- * @from: Source address, in kernel space.
30473- * @n: Number of bytes to copy.
30474- *
30475- * Context: User context only. This function may sleep.
30476- *
30477- * Copy data from kernel space to user space.
30478- *
30479- * Returns number of bytes that could not be copied.
30480- * On success, this will be zero.
30481- */
30482-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
30483+#ifdef CONFIG_PAX_MEMORY_UDEREF
30484+void __set_fs(mm_segment_t x)
30485 {
30486- if (access_ok(VERIFY_WRITE, to, n))
30487- n = __copy_to_user(to, from, n);
30488- return n;
30489+ switch (x.seg) {
30490+ case 0:
30491+ loadsegment(gs, 0);
30492+ break;
30493+ case TASK_SIZE_MAX:
30494+ loadsegment(gs, __USER_DS);
30495+ break;
30496+ case -1UL:
30497+ loadsegment(gs, __KERNEL_DS);
30498+ break;
30499+ default:
30500+ BUG();
30501+ }
30502 }
30503-EXPORT_SYMBOL(_copy_to_user);
30504+EXPORT_SYMBOL(__set_fs);
30505
30506-/**
30507- * copy_from_user: - Copy a block of data from user space.
30508- * @to: Destination address, in kernel space.
30509- * @from: Source address, in user space.
30510- * @n: Number of bytes to copy.
30511- *
30512- * Context: User context only. This function may sleep.
30513- *
30514- * Copy data from user space to kernel space.
30515- *
30516- * Returns number of bytes that could not be copied.
30517- * On success, this will be zero.
30518- *
30519- * If some data could not be copied, this function will pad the copied
30520- * data to the requested size using zero bytes.
30521- */
30522-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
30523+void set_fs(mm_segment_t x)
30524 {
30525- if (access_ok(VERIFY_READ, from, n))
30526- n = __copy_from_user(to, from, n);
30527- else
30528- memset(to, 0, n);
30529- return n;
30530+ current_thread_info()->addr_limit = x;
30531+ __set_fs(x);
30532 }
30533-EXPORT_SYMBOL(_copy_from_user);
30534+EXPORT_SYMBOL(set_fs);
30535+#endif
30536diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
30537index c905e89..01ab928 100644
30538--- a/arch/x86/lib/usercopy_64.c
30539+++ b/arch/x86/lib/usercopy_64.c
30540@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30541 might_fault();
30542 /* no memory constraint because it doesn't change any memory gcc knows
30543 about */
30544+ pax_open_userland();
30545 stac();
30546 asm volatile(
30547 " testq %[size8],%[size8]\n"
30548@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30549 _ASM_EXTABLE(0b,3b)
30550 _ASM_EXTABLE(1b,2b)
30551 : [size8] "=&c"(size), [dst] "=&D" (__d0)
30552- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
30553+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
30554 [zero] "r" (0UL), [eight] "r" (8UL));
30555 clac();
30556+ pax_close_userland();
30557 return size;
30558 }
30559 EXPORT_SYMBOL(__clear_user);
30560@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
30561 }
30562 EXPORT_SYMBOL(clear_user);
30563
30564-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
30565+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
30566 {
30567- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
30568- return copy_user_generic((__force void *)to, (__force void *)from, len);
30569- }
30570- return len;
30571+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
30572+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
30573+ return len;
30574 }
30575 EXPORT_SYMBOL(copy_in_user);
30576
30577@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
30578 * it is not necessary to optimize tail handling.
30579 */
30580 __visible unsigned long
30581-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30582+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
30583 {
30584 char c;
30585 unsigned zero_len;
30586
30587+ clac();
30588+ pax_close_userland();
30589 for (; len; --len, to++) {
30590 if (__get_user_nocheck(c, from++, sizeof(char)))
30591 break;
30592@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30593 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
30594 if (__put_user_nocheck(c, to++, sizeof(char)))
30595 break;
30596- clac();
30597 return len;
30598 }
30599diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
30600index 6a19ad9..1c48f9a 100644
30601--- a/arch/x86/mm/Makefile
30602+++ b/arch/x86/mm/Makefile
30603@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
30604 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
30605
30606 obj-$(CONFIG_MEMTEST) += memtest.o
30607+
30608+quote:="
30609+obj-$(CONFIG_X86_64) += uderef_64.o
30610+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
30611diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
30612index 903ec1e..c4166b2 100644
30613--- a/arch/x86/mm/extable.c
30614+++ b/arch/x86/mm/extable.c
30615@@ -6,12 +6,24 @@
30616 static inline unsigned long
30617 ex_insn_addr(const struct exception_table_entry *x)
30618 {
30619- return (unsigned long)&x->insn + x->insn;
30620+ unsigned long reloc = 0;
30621+
30622+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30623+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30624+#endif
30625+
30626+ return (unsigned long)&x->insn + x->insn + reloc;
30627 }
30628 static inline unsigned long
30629 ex_fixup_addr(const struct exception_table_entry *x)
30630 {
30631- return (unsigned long)&x->fixup + x->fixup;
30632+ unsigned long reloc = 0;
30633+
30634+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30635+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30636+#endif
30637+
30638+ return (unsigned long)&x->fixup + x->fixup + reloc;
30639 }
30640
30641 int fixup_exception(struct pt_regs *regs)
30642@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
30643 unsigned long new_ip;
30644
30645 #ifdef CONFIG_PNPBIOS
30646- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
30647+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
30648 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
30649 extern u32 pnp_bios_is_utter_crap;
30650 pnp_bios_is_utter_crap = 1;
30651@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
30652 i += 4;
30653 p->fixup -= i;
30654 i += 4;
30655+
30656+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30657+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
30658+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30659+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30660+#endif
30661+
30662 }
30663 }
30664
30665diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
30666index 9d591c8..2e61790 100644
30667--- a/arch/x86/mm/fault.c
30668+++ b/arch/x86/mm/fault.c
30669@@ -14,11 +14,18 @@
30670 #include <linux/hugetlb.h> /* hstate_index_to_shift */
30671 #include <linux/prefetch.h> /* prefetchw */
30672 #include <linux/context_tracking.h> /* exception_enter(), ... */
30673+#include <linux/unistd.h>
30674+#include <linux/compiler.h>
30675
30676 #include <asm/traps.h> /* dotraplinkage, ... */
30677 #include <asm/pgalloc.h> /* pgd_*(), ... */
30678 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
30679 #include <asm/fixmap.h> /* VSYSCALL_START */
30680+#include <asm/tlbflush.h>
30681+
30682+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30683+#include <asm/stacktrace.h>
30684+#endif
30685
30686 #define CREATE_TRACE_POINTS
30687 #include <asm/trace/exceptions.h>
30688@@ -59,7 +66,7 @@ static inline int __kprobes kprobes_fault(struct pt_regs *regs)
30689 int ret = 0;
30690
30691 /* kprobe_running() needs smp_processor_id() */
30692- if (kprobes_built_in() && !user_mode_vm(regs)) {
30693+ if (kprobes_built_in() && !user_mode(regs)) {
30694 preempt_disable();
30695 if (kprobe_running() && kprobe_fault_handler(regs, 14))
30696 ret = 1;
30697@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
30698 return !instr_lo || (instr_lo>>1) == 1;
30699 case 0x00:
30700 /* Prefetch instruction is 0x0F0D or 0x0F18 */
30701- if (probe_kernel_address(instr, opcode))
30702+ if (user_mode(regs)) {
30703+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30704+ return 0;
30705+ } else if (probe_kernel_address(instr, opcode))
30706 return 0;
30707
30708 *prefetch = (instr_lo == 0xF) &&
30709@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
30710 while (instr < max_instr) {
30711 unsigned char opcode;
30712
30713- if (probe_kernel_address(instr, opcode))
30714+ if (user_mode(regs)) {
30715+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30716+ break;
30717+ } else if (probe_kernel_address(instr, opcode))
30718 break;
30719
30720 instr++;
30721@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
30722 force_sig_info(si_signo, &info, tsk);
30723 }
30724
30725+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30726+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
30727+#endif
30728+
30729+#ifdef CONFIG_PAX_EMUTRAMP
30730+static int pax_handle_fetch_fault(struct pt_regs *regs);
30731+#endif
30732+
30733+#ifdef CONFIG_PAX_PAGEEXEC
30734+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
30735+{
30736+ pgd_t *pgd;
30737+ pud_t *pud;
30738+ pmd_t *pmd;
30739+
30740+ pgd = pgd_offset(mm, address);
30741+ if (!pgd_present(*pgd))
30742+ return NULL;
30743+ pud = pud_offset(pgd, address);
30744+ if (!pud_present(*pud))
30745+ return NULL;
30746+ pmd = pmd_offset(pud, address);
30747+ if (!pmd_present(*pmd))
30748+ return NULL;
30749+ return pmd;
30750+}
30751+#endif
30752+
30753 DEFINE_SPINLOCK(pgd_lock);
30754 LIST_HEAD(pgd_list);
30755
30756@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
30757 for (address = VMALLOC_START & PMD_MASK;
30758 address >= TASK_SIZE && address < FIXADDR_TOP;
30759 address += PMD_SIZE) {
30760+
30761+#ifdef CONFIG_PAX_PER_CPU_PGD
30762+ unsigned long cpu;
30763+#else
30764 struct page *page;
30765+#endif
30766
30767 spin_lock(&pgd_lock);
30768+
30769+#ifdef CONFIG_PAX_PER_CPU_PGD
30770+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30771+ pgd_t *pgd = get_cpu_pgd(cpu, user);
30772+ pmd_t *ret;
30773+
30774+ ret = vmalloc_sync_one(pgd, address);
30775+ if (!ret)
30776+ break;
30777+ pgd = get_cpu_pgd(cpu, kernel);
30778+#else
30779 list_for_each_entry(page, &pgd_list, lru) {
30780+ pgd_t *pgd;
30781 spinlock_t *pgt_lock;
30782 pmd_t *ret;
30783
30784@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
30785 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
30786
30787 spin_lock(pgt_lock);
30788- ret = vmalloc_sync_one(page_address(page), address);
30789+ pgd = page_address(page);
30790+#endif
30791+
30792+ ret = vmalloc_sync_one(pgd, address);
30793+
30794+#ifndef CONFIG_PAX_PER_CPU_PGD
30795 spin_unlock(pgt_lock);
30796+#endif
30797
30798 if (!ret)
30799 break;
30800@@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30801 * an interrupt in the middle of a task switch..
30802 */
30803 pgd_paddr = read_cr3();
30804+
30805+#ifdef CONFIG_PAX_PER_CPU_PGD
30806+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30807+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30808+#endif
30809+
30810 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30811 if (!pmd_k)
30812 return -1;
30813@@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30814 * happen within a race in page table update. In the later
30815 * case just flush:
30816 */
30817- pgd = pgd_offset(current->active_mm, address);
30818+
30819 pgd_ref = pgd_offset_k(address);
30820 if (pgd_none(*pgd_ref))
30821 return -1;
30822
30823+#ifdef CONFIG_PAX_PER_CPU_PGD
30824+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30825+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30826+ if (pgd_none(*pgd)) {
30827+ set_pgd(pgd, *pgd_ref);
30828+ arch_flush_lazy_mmu_mode();
30829+ } else {
30830+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30831+ }
30832+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30833+#else
30834+ pgd = pgd_offset(current->active_mm, address);
30835+#endif
30836+
30837 if (pgd_none(*pgd)) {
30838 set_pgd(pgd, *pgd_ref);
30839 arch_flush_lazy_mmu_mode();
30840@@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30841 static int is_errata100(struct pt_regs *regs, unsigned long address)
30842 {
30843 #ifdef CONFIG_X86_64
30844- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30845+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30846 return 1;
30847 #endif
30848 return 0;
30849@@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30850 }
30851
30852 static const char nx_warning[] = KERN_CRIT
30853-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30854+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30855
30856 static void
30857 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30858@@ -582,15 +666,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30859 if (!oops_may_print())
30860 return;
30861
30862- if (error_code & PF_INSTR) {
30863+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30864 unsigned int level;
30865
30866 pte_t *pte = lookup_address(address, &level);
30867
30868 if (pte && pte_present(*pte) && !pte_exec(*pte))
30869- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30870+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30871 }
30872
30873+#ifdef CONFIG_PAX_KERNEXEC
30874+ if (init_mm.start_code <= address && address < init_mm.end_code) {
30875+ if (current->signal->curr_ip)
30876+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30877+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
30878+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30879+ else
30880+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30881+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30882+ }
30883+#endif
30884+
30885 printk(KERN_ALERT "BUG: unable to handle kernel ");
30886 if (address < PAGE_SIZE)
30887 printk(KERN_CONT "NULL pointer dereference");
30888@@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30889 return;
30890 }
30891 #endif
30892+
30893+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30894+ if (pax_is_fetch_fault(regs, error_code, address)) {
30895+
30896+#ifdef CONFIG_PAX_EMUTRAMP
30897+ switch (pax_handle_fetch_fault(regs)) {
30898+ case 2:
30899+ return;
30900+ }
30901+#endif
30902+
30903+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30904+ do_group_exit(SIGKILL);
30905+ }
30906+#endif
30907+
30908 /* Kernel addresses are always protection faults: */
30909 if (address >= TASK_SIZE)
30910 error_code |= PF_PROT;
30911@@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30912 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30913 printk(KERN_ERR
30914 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30915- tsk->comm, tsk->pid, address);
30916+ tsk->comm, task_pid_nr(tsk), address);
30917 code = BUS_MCEERR_AR;
30918 }
30919 #endif
30920@@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30921 return 1;
30922 }
30923
30924+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30925+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30926+{
30927+ pte_t *pte;
30928+ pmd_t *pmd;
30929+ spinlock_t *ptl;
30930+ unsigned char pte_mask;
30931+
30932+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30933+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
30934+ return 0;
30935+
30936+ /* PaX: it's our fault, let's handle it if we can */
30937+
30938+ /* PaX: take a look at read faults before acquiring any locks */
30939+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30940+ /* instruction fetch attempt from a protected page in user mode */
30941+ up_read(&mm->mmap_sem);
30942+
30943+#ifdef CONFIG_PAX_EMUTRAMP
30944+ switch (pax_handle_fetch_fault(regs)) {
30945+ case 2:
30946+ return 1;
30947+ }
30948+#endif
30949+
30950+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30951+ do_group_exit(SIGKILL);
30952+ }
30953+
30954+ pmd = pax_get_pmd(mm, address);
30955+ if (unlikely(!pmd))
30956+ return 0;
30957+
30958+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30959+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30960+ pte_unmap_unlock(pte, ptl);
30961+ return 0;
30962+ }
30963+
30964+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30965+ /* write attempt to a protected page in user mode */
30966+ pte_unmap_unlock(pte, ptl);
30967+ return 0;
30968+ }
30969+
30970+#ifdef CONFIG_SMP
30971+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30972+#else
30973+ if (likely(address > get_limit(regs->cs)))
30974+#endif
30975+ {
30976+ set_pte(pte, pte_mkread(*pte));
30977+ __flush_tlb_one(address);
30978+ pte_unmap_unlock(pte, ptl);
30979+ up_read(&mm->mmap_sem);
30980+ return 1;
30981+ }
30982+
30983+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
30984+
30985+ /*
30986+ * PaX: fill DTLB with user rights and retry
30987+ */
30988+ __asm__ __volatile__ (
30989+ "orb %2,(%1)\n"
30990+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
30991+/*
30992+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
30993+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
30994+ * page fault when examined during a TLB load attempt. this is true not only
30995+ * for PTEs holding a non-present entry but also present entries that will
30996+ * raise a page fault (such as those set up by PaX, or the copy-on-write
30997+ * mechanism). in effect it means that we do *not* need to flush the TLBs
30998+ * for our target pages since their PTEs are simply not in the TLBs at all.
30999+
31000+ * the best thing in omitting it is that we gain around 15-20% speed in the
31001+ * fast path of the page fault handler and can get rid of tracing since we
31002+ * can no longer flush unintended entries.
31003+ */
31004+ "invlpg (%0)\n"
31005+#endif
31006+ __copyuser_seg"testb $0,(%0)\n"
31007+ "xorb %3,(%1)\n"
31008+ :
31009+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
31010+ : "memory", "cc");
31011+ pte_unmap_unlock(pte, ptl);
31012+ up_read(&mm->mmap_sem);
31013+ return 1;
31014+}
31015+#endif
31016+
31017 /*
31018 * Handle a spurious fault caused by a stale TLB entry.
31019 *
31020@@ -976,6 +1181,9 @@ int show_unhandled_signals = 1;
31021 static inline int
31022 access_error(unsigned long error_code, struct vm_area_struct *vma)
31023 {
31024+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
31025+ return 1;
31026+
31027 if (error_code & PF_WRITE) {
31028 /* write, present and write, not present: */
31029 if (unlikely(!(vma->vm_flags & VM_WRITE)))
31030@@ -1004,7 +1212,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
31031 if (error_code & PF_USER)
31032 return false;
31033
31034- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
31035+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
31036 return false;
31037
31038 return true;
31039@@ -1031,6 +1239,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31040 /* Get the faulting address: */
31041 address = read_cr2();
31042
31043+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31044+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
31045+ if (!search_exception_tables(regs->ip)) {
31046+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31047+ bad_area_nosemaphore(regs, error_code, address);
31048+ return;
31049+ }
31050+ if (address < pax_user_shadow_base) {
31051+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31052+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
31053+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
31054+ } else
31055+ address -= pax_user_shadow_base;
31056+ }
31057+#endif
31058+
31059 /*
31060 * Detect and handle instructions that would cause a page fault for
31061 * both a tracked kernel page and a userspace page.
31062@@ -1110,7 +1334,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31063 * User-mode registers count as a user access even for any
31064 * potential system fault or CPU buglet:
31065 */
31066- if (user_mode_vm(regs)) {
31067+ if (user_mode(regs)) {
31068 local_irq_enable();
31069 error_code |= PF_USER;
31070 flags |= FAULT_FLAG_USER;
31071@@ -1157,6 +1381,11 @@ retry:
31072 might_sleep();
31073 }
31074
31075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31076+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
31077+ return;
31078+#endif
31079+
31080 vma = find_vma(mm, address);
31081 if (unlikely(!vma)) {
31082 bad_area(regs, error_code, address);
31083@@ -1168,18 +1397,24 @@ retry:
31084 bad_area(regs, error_code, address);
31085 return;
31086 }
31087- if (error_code & PF_USER) {
31088- /*
31089- * Accessing the stack below %sp is always a bug.
31090- * The large cushion allows instructions like enter
31091- * and pusha to work. ("enter $65535, $31" pushes
31092- * 32 pointers and then decrements %sp by 65535.)
31093- */
31094- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
31095- bad_area(regs, error_code, address);
31096- return;
31097- }
31098+ /*
31099+ * Accessing the stack below %sp is always a bug.
31100+ * The large cushion allows instructions like enter
31101+ * and pusha to work. ("enter $65535, $31" pushes
31102+ * 32 pointers and then decrements %sp by 65535.)
31103+ */
31104+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
31105+ bad_area(regs, error_code, address);
31106+ return;
31107 }
31108+
31109+#ifdef CONFIG_PAX_SEGMEXEC
31110+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
31111+ bad_area(regs, error_code, address);
31112+ return;
31113+ }
31114+#endif
31115+
31116 if (unlikely(expand_stack(vma, address))) {
31117 bad_area(regs, error_code, address);
31118 return;
31119@@ -1273,3 +1508,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
31120 __do_page_fault(regs, error_code);
31121 exception_exit(prev_state);
31122 }
31123+
31124+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31125+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
31126+{
31127+ struct mm_struct *mm = current->mm;
31128+ unsigned long ip = regs->ip;
31129+
31130+ if (v8086_mode(regs))
31131+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
31132+
31133+#ifdef CONFIG_PAX_PAGEEXEC
31134+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
31135+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
31136+ return true;
31137+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
31138+ return true;
31139+ return false;
31140+ }
31141+#endif
31142+
31143+#ifdef CONFIG_PAX_SEGMEXEC
31144+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
31145+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
31146+ return true;
31147+ return false;
31148+ }
31149+#endif
31150+
31151+ return false;
31152+}
31153+#endif
31154+
31155+#ifdef CONFIG_PAX_EMUTRAMP
31156+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
31157+{
31158+ int err;
31159+
31160+ do { /* PaX: libffi trampoline emulation */
31161+ unsigned char mov, jmp;
31162+ unsigned int addr1, addr2;
31163+
31164+#ifdef CONFIG_X86_64
31165+ if ((regs->ip + 9) >> 32)
31166+ break;
31167+#endif
31168+
31169+ err = get_user(mov, (unsigned char __user *)regs->ip);
31170+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31171+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31172+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31173+
31174+ if (err)
31175+ break;
31176+
31177+ if (mov == 0xB8 && jmp == 0xE9) {
31178+ regs->ax = addr1;
31179+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31180+ return 2;
31181+ }
31182+ } while (0);
31183+
31184+ do { /* PaX: gcc trampoline emulation #1 */
31185+ unsigned char mov1, mov2;
31186+ unsigned short jmp;
31187+ unsigned int addr1, addr2;
31188+
31189+#ifdef CONFIG_X86_64
31190+ if ((regs->ip + 11) >> 32)
31191+ break;
31192+#endif
31193+
31194+ err = get_user(mov1, (unsigned char __user *)regs->ip);
31195+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31196+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
31197+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31198+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
31199+
31200+ if (err)
31201+ break;
31202+
31203+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
31204+ regs->cx = addr1;
31205+ regs->ax = addr2;
31206+ regs->ip = addr2;
31207+ return 2;
31208+ }
31209+ } while (0);
31210+
31211+ do { /* PaX: gcc trampoline emulation #2 */
31212+ unsigned char mov, jmp;
31213+ unsigned int addr1, addr2;
31214+
31215+#ifdef CONFIG_X86_64
31216+ if ((regs->ip + 9) >> 32)
31217+ break;
31218+#endif
31219+
31220+ err = get_user(mov, (unsigned char __user *)regs->ip);
31221+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31222+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31223+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31224+
31225+ if (err)
31226+ break;
31227+
31228+ if (mov == 0xB9 && jmp == 0xE9) {
31229+ regs->cx = addr1;
31230+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31231+ return 2;
31232+ }
31233+ } while (0);
31234+
31235+ return 1; /* PaX in action */
31236+}
31237+
31238+#ifdef CONFIG_X86_64
31239+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
31240+{
31241+ int err;
31242+
31243+ do { /* PaX: libffi trampoline emulation */
31244+ unsigned short mov1, mov2, jmp1;
31245+ unsigned char stcclc, jmp2;
31246+ unsigned long addr1, addr2;
31247+
31248+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31249+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31250+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31251+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31252+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
31253+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
31254+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
31255+
31256+ if (err)
31257+ break;
31258+
31259+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31260+ regs->r11 = addr1;
31261+ regs->r10 = addr2;
31262+ if (stcclc == 0xF8)
31263+ regs->flags &= ~X86_EFLAGS_CF;
31264+ else
31265+ regs->flags |= X86_EFLAGS_CF;
31266+ regs->ip = addr1;
31267+ return 2;
31268+ }
31269+ } while (0);
31270+
31271+ do { /* PaX: gcc trampoline emulation #1 */
31272+ unsigned short mov1, mov2, jmp1;
31273+ unsigned char jmp2;
31274+ unsigned int addr1;
31275+ unsigned long addr2;
31276+
31277+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31278+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
31279+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
31280+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
31281+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
31282+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
31283+
31284+ if (err)
31285+ break;
31286+
31287+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31288+ regs->r11 = addr1;
31289+ regs->r10 = addr2;
31290+ regs->ip = addr1;
31291+ return 2;
31292+ }
31293+ } while (0);
31294+
31295+ do { /* PaX: gcc trampoline emulation #2 */
31296+ unsigned short mov1, mov2, jmp1;
31297+ unsigned char jmp2;
31298+ unsigned long addr1, addr2;
31299+
31300+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31301+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31302+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31303+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31304+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
31305+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
31306+
31307+ if (err)
31308+ break;
31309+
31310+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31311+ regs->r11 = addr1;
31312+ regs->r10 = addr2;
31313+ regs->ip = addr1;
31314+ return 2;
31315+ }
31316+ } while (0);
31317+
31318+ return 1; /* PaX in action */
31319+}
31320+#endif
31321+
31322+/*
31323+ * PaX: decide what to do with offenders (regs->ip = fault address)
31324+ *
31325+ * returns 1 when task should be killed
31326+ * 2 when gcc trampoline was detected
31327+ */
31328+static int pax_handle_fetch_fault(struct pt_regs *regs)
31329+{
31330+ if (v8086_mode(regs))
31331+ return 1;
31332+
31333+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
31334+ return 1;
31335+
31336+#ifdef CONFIG_X86_32
31337+ return pax_handle_fetch_fault_32(regs);
31338+#else
31339+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
31340+ return pax_handle_fetch_fault_32(regs);
31341+ else
31342+ return pax_handle_fetch_fault_64(regs);
31343+#endif
31344+}
31345+#endif
31346+
31347+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31348+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
31349+{
31350+ long i;
31351+
31352+ printk(KERN_ERR "PAX: bytes at PC: ");
31353+ for (i = 0; i < 20; i++) {
31354+ unsigned char c;
31355+ if (get_user(c, (unsigned char __force_user *)pc+i))
31356+ printk(KERN_CONT "?? ");
31357+ else
31358+ printk(KERN_CONT "%02x ", c);
31359+ }
31360+ printk("\n");
31361+
31362+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
31363+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
31364+ unsigned long c;
31365+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
31366+#ifdef CONFIG_X86_32
31367+ printk(KERN_CONT "???????? ");
31368+#else
31369+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
31370+ printk(KERN_CONT "???????? ???????? ");
31371+ else
31372+ printk(KERN_CONT "???????????????? ");
31373+#endif
31374+ } else {
31375+#ifdef CONFIG_X86_64
31376+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
31377+ printk(KERN_CONT "%08x ", (unsigned int)c);
31378+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
31379+ } else
31380+#endif
31381+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
31382+ }
31383+ }
31384+ printk("\n");
31385+}
31386+#endif
31387+
31388+/**
31389+ * probe_kernel_write(): safely attempt to write to a location
31390+ * @dst: address to write to
31391+ * @src: pointer to the data that shall be written
31392+ * @size: size of the data chunk
31393+ *
31394+ * Safely write to address @dst from the buffer at @src. If a kernel fault
31395+ * happens, handle that and return -EFAULT.
31396+ */
31397+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
31398+{
31399+ long ret;
31400+ mm_segment_t old_fs = get_fs();
31401+
31402+ set_fs(KERNEL_DS);
31403+ pagefault_disable();
31404+ pax_open_kernel();
31405+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
31406+ pax_close_kernel();
31407+ pagefault_enable();
31408+ set_fs(old_fs);
31409+
31410+ return ret ? -EFAULT : 0;
31411+}
31412diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
31413index 0596e8e..9de0b1c 100644
31414--- a/arch/x86/mm/gup.c
31415+++ b/arch/x86/mm/gup.c
31416@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
31417 addr = start;
31418 len = (unsigned long) nr_pages << PAGE_SHIFT;
31419 end = start + len;
31420- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31421+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31422 (void __user *)start, len)))
31423 return 0;
31424
31425@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
31426 goto slow_irqon;
31427 #endif
31428
31429+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31430+ (void __user *)start, len)))
31431+ return 0;
31432+
31433 /*
31434 * XXX: batch / limit 'nr', to avoid large irq off latency
31435 * needs some instrumenting to determine the common sizes used by
31436diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
31437index 4500142..53a363c 100644
31438--- a/arch/x86/mm/highmem_32.c
31439+++ b/arch/x86/mm/highmem_32.c
31440@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31441 idx = type + KM_TYPE_NR*smp_processor_id();
31442 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31443 BUG_ON(!pte_none(*(kmap_pte-idx)));
31444+
31445+ pax_open_kernel();
31446 set_pte(kmap_pte-idx, mk_pte(page, prot));
31447+ pax_close_kernel();
31448+
31449 arch_flush_lazy_mmu_mode();
31450
31451 return (void *)vaddr;
31452diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
31453index 9d980d8..6bbfacb 100644
31454--- a/arch/x86/mm/hugetlbpage.c
31455+++ b/arch/x86/mm/hugetlbpage.c
31456@@ -92,23 +92,30 @@ int pmd_huge_support(void)
31457 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
31458 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
31459 unsigned long addr, unsigned long len,
31460- unsigned long pgoff, unsigned long flags)
31461+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31462 {
31463 struct hstate *h = hstate_file(file);
31464 struct vm_unmapped_area_info info;
31465-
31466+
31467 info.flags = 0;
31468 info.length = len;
31469 info.low_limit = TASK_UNMAPPED_BASE;
31470+
31471+#ifdef CONFIG_PAX_RANDMMAP
31472+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31473+ info.low_limit += current->mm->delta_mmap;
31474+#endif
31475+
31476 info.high_limit = TASK_SIZE;
31477 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31478 info.align_offset = 0;
31479+ info.threadstack_offset = offset;
31480 return vm_unmapped_area(&info);
31481 }
31482
31483 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31484 unsigned long addr0, unsigned long len,
31485- unsigned long pgoff, unsigned long flags)
31486+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31487 {
31488 struct hstate *h = hstate_file(file);
31489 struct vm_unmapped_area_info info;
31490@@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31491 info.high_limit = current->mm->mmap_base;
31492 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31493 info.align_offset = 0;
31494+ info.threadstack_offset = offset;
31495 addr = vm_unmapped_area(&info);
31496
31497 /*
31498@@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31499 VM_BUG_ON(addr != -ENOMEM);
31500 info.flags = 0;
31501 info.low_limit = TASK_UNMAPPED_BASE;
31502+
31503+#ifdef CONFIG_PAX_RANDMMAP
31504+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31505+ info.low_limit += current->mm->delta_mmap;
31506+#endif
31507+
31508 info.high_limit = TASK_SIZE;
31509 addr = vm_unmapped_area(&info);
31510 }
31511@@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31512 struct hstate *h = hstate_file(file);
31513 struct mm_struct *mm = current->mm;
31514 struct vm_area_struct *vma;
31515+ unsigned long pax_task_size = TASK_SIZE;
31516+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
31517
31518 if (len & ~huge_page_mask(h))
31519 return -EINVAL;
31520- if (len > TASK_SIZE)
31521+
31522+#ifdef CONFIG_PAX_SEGMEXEC
31523+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31524+ pax_task_size = SEGMEXEC_TASK_SIZE;
31525+#endif
31526+
31527+ pax_task_size -= PAGE_SIZE;
31528+
31529+ if (len > pax_task_size)
31530 return -ENOMEM;
31531
31532 if (flags & MAP_FIXED) {
31533@@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31534 return addr;
31535 }
31536
31537+#ifdef CONFIG_PAX_RANDMMAP
31538+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31539+#endif
31540+
31541 if (addr) {
31542 addr = ALIGN(addr, huge_page_size(h));
31543 vma = find_vma(mm, addr);
31544- if (TASK_SIZE - len >= addr &&
31545- (!vma || addr + len <= vma->vm_start))
31546+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
31547 return addr;
31548 }
31549 if (mm->get_unmapped_area == arch_get_unmapped_area)
31550 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
31551- pgoff, flags);
31552+ pgoff, flags, offset);
31553 else
31554 return hugetlb_get_unmapped_area_topdown(file, addr, len,
31555- pgoff, flags);
31556+ pgoff, flags, offset);
31557 }
31558
31559 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
31560diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
31561index f971306..e83e0f6 100644
31562--- a/arch/x86/mm/init.c
31563+++ b/arch/x86/mm/init.c
31564@@ -4,6 +4,7 @@
31565 #include <linux/swap.h>
31566 #include <linux/memblock.h>
31567 #include <linux/bootmem.h> /* for max_low_pfn */
31568+#include <linux/tboot.h>
31569
31570 #include <asm/cacheflush.h>
31571 #include <asm/e820.h>
31572@@ -17,6 +18,8 @@
31573 #include <asm/proto.h>
31574 #include <asm/dma.h> /* for MAX_DMA_PFN */
31575 #include <asm/microcode.h>
31576+#include <asm/desc.h>
31577+#include <asm/bios_ebda.h>
31578
31579 #include "mm_internal.h"
31580
31581@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
31582 early_ioremap_page_table_range_init();
31583 #endif
31584
31585+#ifdef CONFIG_PAX_PER_CPU_PGD
31586+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
31587+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31588+ KERNEL_PGD_PTRS);
31589+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
31590+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31591+ KERNEL_PGD_PTRS);
31592+ load_cr3(get_cpu_pgd(0, kernel));
31593+#else
31594 load_cr3(swapper_pg_dir);
31595+#endif
31596+
31597 __flush_tlb_all();
31598
31599 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
31600@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
31601 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
31602 * mmio resources as well as potential bios/acpi data regions.
31603 */
31604+
31605+#ifdef CONFIG_GRKERNSEC_KMEM
31606+static unsigned int ebda_start __read_only;
31607+static unsigned int ebda_end __read_only;
31608+#endif
31609+
31610 int devmem_is_allowed(unsigned long pagenr)
31611 {
31612- if (pagenr < 256)
31613+#ifdef CONFIG_GRKERNSEC_KMEM
31614+ /* allow BDA */
31615+ if (!pagenr)
31616 return 1;
31617+ /* allow EBDA */
31618+ if (pagenr >= ebda_start && pagenr < ebda_end)
31619+ return 1;
31620+ /* if tboot is in use, allow access to its hardcoded serial log range */
31621+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
31622+ return 1;
31623+#else
31624+ if (!pagenr)
31625+ return 1;
31626+#ifdef CONFIG_VM86
31627+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
31628+ return 1;
31629+#endif
31630+#endif
31631+
31632+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
31633+ return 1;
31634+#ifdef CONFIG_GRKERNSEC_KMEM
31635+ /* throw out everything else below 1MB */
31636+ if (pagenr <= 256)
31637+ return 0;
31638+#endif
31639 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
31640 return 0;
31641 if (!page_is_ram(pagenr))
31642@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
31643 #endif
31644 }
31645
31646+#ifdef CONFIG_GRKERNSEC_KMEM
31647+static inline void gr_init_ebda(void)
31648+{
31649+ unsigned int ebda_addr;
31650+ unsigned int ebda_size = 0;
31651+
31652+ ebda_addr = get_bios_ebda();
31653+ if (ebda_addr) {
31654+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
31655+ ebda_size <<= 10;
31656+ }
31657+ if (ebda_addr && ebda_size) {
31658+ ebda_start = ebda_addr >> PAGE_SHIFT;
31659+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
31660+ } else {
31661+ ebda_start = 0x9f000 >> PAGE_SHIFT;
31662+ ebda_end = 0xa0000 >> PAGE_SHIFT;
31663+ }
31664+}
31665+#else
31666+static inline void gr_init_ebda(void) { }
31667+#endif
31668+
31669 void free_initmem(void)
31670 {
31671+#ifdef CONFIG_PAX_KERNEXEC
31672+#ifdef CONFIG_X86_32
31673+ /* PaX: limit KERNEL_CS to actual size */
31674+ unsigned long addr, limit;
31675+ struct desc_struct d;
31676+ int cpu;
31677+#else
31678+ pgd_t *pgd;
31679+ pud_t *pud;
31680+ pmd_t *pmd;
31681+ unsigned long addr, end;
31682+#endif
31683+#endif
31684+
31685+ gr_init_ebda();
31686+
31687+#ifdef CONFIG_PAX_KERNEXEC
31688+#ifdef CONFIG_X86_32
31689+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
31690+ limit = (limit - 1UL) >> PAGE_SHIFT;
31691+
31692+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
31693+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
31694+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
31695+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
31696+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
31697+ }
31698+
31699+ /* PaX: make KERNEL_CS read-only */
31700+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
31701+ if (!paravirt_enabled())
31702+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
31703+/*
31704+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
31705+ pgd = pgd_offset_k(addr);
31706+ pud = pud_offset(pgd, addr);
31707+ pmd = pmd_offset(pud, addr);
31708+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31709+ }
31710+*/
31711+#ifdef CONFIG_X86_PAE
31712+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
31713+/*
31714+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
31715+ pgd = pgd_offset_k(addr);
31716+ pud = pud_offset(pgd, addr);
31717+ pmd = pmd_offset(pud, addr);
31718+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31719+ }
31720+*/
31721+#endif
31722+
31723+#ifdef CONFIG_MODULES
31724+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
31725+#endif
31726+
31727+#else
31728+ /* PaX: make kernel code/rodata read-only, rest non-executable */
31729+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
31730+ pgd = pgd_offset_k(addr);
31731+ pud = pud_offset(pgd, addr);
31732+ pmd = pmd_offset(pud, addr);
31733+ if (!pmd_present(*pmd))
31734+ continue;
31735+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
31736+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31737+ else
31738+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31739+ }
31740+
31741+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
31742+ end = addr + KERNEL_IMAGE_SIZE;
31743+ for (; addr < end; addr += PMD_SIZE) {
31744+ pgd = pgd_offset_k(addr);
31745+ pud = pud_offset(pgd, addr);
31746+ pmd = pmd_offset(pud, addr);
31747+ if (!pmd_present(*pmd))
31748+ continue;
31749+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
31750+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31751+ }
31752+#endif
31753+
31754+ flush_tlb_all();
31755+#endif
31756+
31757 free_init_pages("unused kernel",
31758 (unsigned long)(&__init_begin),
31759 (unsigned long)(&__init_end));
31760diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
31761index 4287f1f..3b99c71 100644
31762--- a/arch/x86/mm/init_32.c
31763+++ b/arch/x86/mm/init_32.c
31764@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
31765 bool __read_mostly __vmalloc_start_set = false;
31766
31767 /*
31768- * Creates a middle page table and puts a pointer to it in the
31769- * given global directory entry. This only returns the gd entry
31770- * in non-PAE compilation mode, since the middle layer is folded.
31771- */
31772-static pmd_t * __init one_md_table_init(pgd_t *pgd)
31773-{
31774- pud_t *pud;
31775- pmd_t *pmd_table;
31776-
31777-#ifdef CONFIG_X86_PAE
31778- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
31779- pmd_table = (pmd_t *)alloc_low_page();
31780- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
31781- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
31782- pud = pud_offset(pgd, 0);
31783- BUG_ON(pmd_table != pmd_offset(pud, 0));
31784-
31785- return pmd_table;
31786- }
31787-#endif
31788- pud = pud_offset(pgd, 0);
31789- pmd_table = pmd_offset(pud, 0);
31790-
31791- return pmd_table;
31792-}
31793-
31794-/*
31795 * Create a page table and place a pointer to it in a middle page
31796 * directory entry:
31797 */
31798@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
31799 pte_t *page_table = (pte_t *)alloc_low_page();
31800
31801 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31802+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31803+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31804+#else
31805 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31806+#endif
31807 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31808 }
31809
31810 return pte_offset_kernel(pmd, 0);
31811 }
31812
31813+static pmd_t * __init one_md_table_init(pgd_t *pgd)
31814+{
31815+ pud_t *pud;
31816+ pmd_t *pmd_table;
31817+
31818+ pud = pud_offset(pgd, 0);
31819+ pmd_table = pmd_offset(pud, 0);
31820+
31821+ return pmd_table;
31822+}
31823+
31824 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31825 {
31826 int pgd_idx = pgd_index(vaddr);
31827@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31828 int pgd_idx, pmd_idx;
31829 unsigned long vaddr;
31830 pgd_t *pgd;
31831+ pud_t *pud;
31832 pmd_t *pmd;
31833 pte_t *pte = NULL;
31834 unsigned long count = page_table_range_init_count(start, end);
31835@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31836 pgd = pgd_base + pgd_idx;
31837
31838 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31839- pmd = one_md_table_init(pgd);
31840- pmd = pmd + pmd_index(vaddr);
31841+ pud = pud_offset(pgd, vaddr);
31842+ pmd = pmd_offset(pud, vaddr);
31843+
31844+#ifdef CONFIG_X86_PAE
31845+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31846+#endif
31847+
31848 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31849 pmd++, pmd_idx++) {
31850 pte = page_table_kmap_check(one_page_table_init(pmd),
31851@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31852 }
31853 }
31854
31855-static inline int is_kernel_text(unsigned long addr)
31856+static inline int is_kernel_text(unsigned long start, unsigned long end)
31857 {
31858- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31859- return 1;
31860- return 0;
31861+ if ((start > ktla_ktva((unsigned long)_etext) ||
31862+ end <= ktla_ktva((unsigned long)_stext)) &&
31863+ (start > ktla_ktva((unsigned long)_einittext) ||
31864+ end <= ktla_ktva((unsigned long)_sinittext)) &&
31865+
31866+#ifdef CONFIG_ACPI_SLEEP
31867+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31868+#endif
31869+
31870+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31871+ return 0;
31872+ return 1;
31873 }
31874
31875 /*
31876@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31877 unsigned long last_map_addr = end;
31878 unsigned long start_pfn, end_pfn;
31879 pgd_t *pgd_base = swapper_pg_dir;
31880- int pgd_idx, pmd_idx, pte_ofs;
31881+ unsigned int pgd_idx, pmd_idx, pte_ofs;
31882 unsigned long pfn;
31883 pgd_t *pgd;
31884+ pud_t *pud;
31885 pmd_t *pmd;
31886 pte_t *pte;
31887 unsigned pages_2m, pages_4k;
31888@@ -291,8 +295,13 @@ repeat:
31889 pfn = start_pfn;
31890 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31891 pgd = pgd_base + pgd_idx;
31892- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31893- pmd = one_md_table_init(pgd);
31894+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31895+ pud = pud_offset(pgd, 0);
31896+ pmd = pmd_offset(pud, 0);
31897+
31898+#ifdef CONFIG_X86_PAE
31899+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31900+#endif
31901
31902 if (pfn >= end_pfn)
31903 continue;
31904@@ -304,14 +313,13 @@ repeat:
31905 #endif
31906 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31907 pmd++, pmd_idx++) {
31908- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31909+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31910
31911 /*
31912 * Map with big pages if possible, otherwise
31913 * create normal page tables:
31914 */
31915 if (use_pse) {
31916- unsigned int addr2;
31917 pgprot_t prot = PAGE_KERNEL_LARGE;
31918 /*
31919 * first pass will use the same initial
31920@@ -322,11 +330,7 @@ repeat:
31921 _PAGE_PSE);
31922
31923 pfn &= PMD_MASK >> PAGE_SHIFT;
31924- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31925- PAGE_OFFSET + PAGE_SIZE-1;
31926-
31927- if (is_kernel_text(addr) ||
31928- is_kernel_text(addr2))
31929+ if (is_kernel_text(address, address + PMD_SIZE))
31930 prot = PAGE_KERNEL_LARGE_EXEC;
31931
31932 pages_2m++;
31933@@ -343,7 +347,7 @@ repeat:
31934 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31935 pte += pte_ofs;
31936 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31937- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31938+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31939 pgprot_t prot = PAGE_KERNEL;
31940 /*
31941 * first pass will use the same initial
31942@@ -351,7 +355,7 @@ repeat:
31943 */
31944 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31945
31946- if (is_kernel_text(addr))
31947+ if (is_kernel_text(address, address + PAGE_SIZE))
31948 prot = PAGE_KERNEL_EXEC;
31949
31950 pages_4k++;
31951@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31952
31953 pud = pud_offset(pgd, va);
31954 pmd = pmd_offset(pud, va);
31955- if (!pmd_present(*pmd))
31956+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31957 break;
31958
31959 /* should not be large page here */
31960@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31961
31962 static void __init pagetable_init(void)
31963 {
31964- pgd_t *pgd_base = swapper_pg_dir;
31965-
31966- permanent_kmaps_init(pgd_base);
31967+ permanent_kmaps_init(swapper_pg_dir);
31968 }
31969
31970-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31971+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31972 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31973
31974 /* user-defined highmem size */
31975@@ -787,10 +789,10 @@ void __init mem_init(void)
31976 ((unsigned long)&__init_end -
31977 (unsigned long)&__init_begin) >> 10,
31978
31979- (unsigned long)&_etext, (unsigned long)&_edata,
31980- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
31981+ (unsigned long)&_sdata, (unsigned long)&_edata,
31982+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
31983
31984- (unsigned long)&_text, (unsigned long)&_etext,
31985+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
31986 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
31987
31988 /*
31989@@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
31990 if (!kernel_set_to_readonly)
31991 return;
31992
31993+ start = ktla_ktva(start);
31994 pr_debug("Set kernel text: %lx - %lx for read write\n",
31995 start, start+size);
31996
31997@@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
31998 if (!kernel_set_to_readonly)
31999 return;
32000
32001+ start = ktla_ktva(start);
32002 pr_debug("Set kernel text: %lx - %lx for read only\n",
32003 start, start+size);
32004
32005@@ -922,6 +926,7 @@ void mark_rodata_ro(void)
32006 unsigned long start = PFN_ALIGN(_text);
32007 unsigned long size = PFN_ALIGN(_etext) - start;
32008
32009+ start = ktla_ktva(start);
32010 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
32011 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
32012 size >> 10);
32013diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
32014index 104d56a..62ba13f1 100644
32015--- a/arch/x86/mm/init_64.c
32016+++ b/arch/x86/mm/init_64.c
32017@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
32018 * around without checking the pgd every time.
32019 */
32020
32021-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
32022+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
32023 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32024
32025 int force_personality32;
32026@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32027
32028 for (address = start; address <= end; address += PGDIR_SIZE) {
32029 const pgd_t *pgd_ref = pgd_offset_k(address);
32030+
32031+#ifdef CONFIG_PAX_PER_CPU_PGD
32032+ unsigned long cpu;
32033+#else
32034 struct page *page;
32035+#endif
32036
32037 if (pgd_none(*pgd_ref))
32038 continue;
32039
32040 spin_lock(&pgd_lock);
32041+
32042+#ifdef CONFIG_PAX_PER_CPU_PGD
32043+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32044+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
32045+
32046+ if (pgd_none(*pgd))
32047+ set_pgd(pgd, *pgd_ref);
32048+ else
32049+ BUG_ON(pgd_page_vaddr(*pgd)
32050+ != pgd_page_vaddr(*pgd_ref));
32051+ pgd = pgd_offset_cpu(cpu, kernel, address);
32052+#else
32053 list_for_each_entry(page, &pgd_list, lru) {
32054 pgd_t *pgd;
32055 spinlock_t *pgt_lock;
32056@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32057 /* the pgt_lock only for Xen */
32058 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32059 spin_lock(pgt_lock);
32060+#endif
32061
32062 if (pgd_none(*pgd))
32063 set_pgd(pgd, *pgd_ref);
32064@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32065 BUG_ON(pgd_page_vaddr(*pgd)
32066 != pgd_page_vaddr(*pgd_ref));
32067
32068+#ifndef CONFIG_PAX_PER_CPU_PGD
32069 spin_unlock(pgt_lock);
32070+#endif
32071+
32072 }
32073 spin_unlock(&pgd_lock);
32074 }
32075@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
32076 {
32077 if (pgd_none(*pgd)) {
32078 pud_t *pud = (pud_t *)spp_getpage();
32079- pgd_populate(&init_mm, pgd, pud);
32080+ pgd_populate_kernel(&init_mm, pgd, pud);
32081 if (pud != pud_offset(pgd, 0))
32082 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
32083 pud, pud_offset(pgd, 0));
32084@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
32085 {
32086 if (pud_none(*pud)) {
32087 pmd_t *pmd = (pmd_t *) spp_getpage();
32088- pud_populate(&init_mm, pud, pmd);
32089+ pud_populate_kernel(&init_mm, pud, pmd);
32090 if (pmd != pmd_offset(pud, 0))
32091 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
32092 pmd, pmd_offset(pud, 0));
32093@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
32094 pmd = fill_pmd(pud, vaddr);
32095 pte = fill_pte(pmd, vaddr);
32096
32097+ pax_open_kernel();
32098 set_pte(pte, new_pte);
32099+ pax_close_kernel();
32100
32101 /*
32102 * It's enough to flush this one mapping.
32103@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
32104 pgd = pgd_offset_k((unsigned long)__va(phys));
32105 if (pgd_none(*pgd)) {
32106 pud = (pud_t *) spp_getpage();
32107- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
32108- _PAGE_USER));
32109+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
32110 }
32111 pud = pud_offset(pgd, (unsigned long)__va(phys));
32112 if (pud_none(*pud)) {
32113 pmd = (pmd_t *) spp_getpage();
32114- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
32115- _PAGE_USER));
32116+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
32117 }
32118 pmd = pmd_offset(pud, phys);
32119 BUG_ON(!pmd_none(*pmd));
32120@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
32121 prot);
32122
32123 spin_lock(&init_mm.page_table_lock);
32124- pud_populate(&init_mm, pud, pmd);
32125+ pud_populate_kernel(&init_mm, pud, pmd);
32126 spin_unlock(&init_mm.page_table_lock);
32127 }
32128 __flush_tlb_all();
32129@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
32130 page_size_mask);
32131
32132 spin_lock(&init_mm.page_table_lock);
32133- pgd_populate(&init_mm, pgd, pud);
32134+ pgd_populate_kernel(&init_mm, pgd, pud);
32135 spin_unlock(&init_mm.page_table_lock);
32136 pgd_changed = true;
32137 }
32138@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
32139 static struct vm_area_struct gate_vma = {
32140 .vm_start = VSYSCALL_START,
32141 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
32142- .vm_page_prot = PAGE_READONLY_EXEC,
32143- .vm_flags = VM_READ | VM_EXEC
32144+ .vm_page_prot = PAGE_READONLY,
32145+ .vm_flags = VM_READ
32146 };
32147
32148 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
32149@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
32150
32151 const char *arch_vma_name(struct vm_area_struct *vma)
32152 {
32153- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
32154+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
32155 return "[vdso]";
32156 if (vma == &gate_vma)
32157 return "[vsyscall]";
32158diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
32159index 7b179b4..6bd17777 100644
32160--- a/arch/x86/mm/iomap_32.c
32161+++ b/arch/x86/mm/iomap_32.c
32162@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
32163 type = kmap_atomic_idx_push();
32164 idx = type + KM_TYPE_NR * smp_processor_id();
32165 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32166+
32167+ pax_open_kernel();
32168 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
32169+ pax_close_kernel();
32170+
32171 arch_flush_lazy_mmu_mode();
32172
32173 return (void *)vaddr;
32174diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
32175index 799580c..72f9fe0 100644
32176--- a/arch/x86/mm/ioremap.c
32177+++ b/arch/x86/mm/ioremap.c
32178@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
32179 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
32180 int is_ram = page_is_ram(pfn);
32181
32182- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
32183+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
32184 return NULL;
32185 WARN_ON_ONCE(is_ram);
32186 }
32187@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
32188 *
32189 * Caller must ensure there is only one unmapping for the same pointer.
32190 */
32191-void iounmap(volatile void __iomem *addr)
32192+void iounmap(const volatile void __iomem *addr)
32193 {
32194 struct vm_struct *p, *o;
32195
32196@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32197
32198 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
32199 if (page_is_ram(start >> PAGE_SHIFT))
32200+#ifdef CONFIG_HIGHMEM
32201+ if ((start >> PAGE_SHIFT) < max_low_pfn)
32202+#endif
32203 return __va(phys);
32204
32205 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
32206@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32207 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
32208 {
32209 if (page_is_ram(phys >> PAGE_SHIFT))
32210+#ifdef CONFIG_HIGHMEM
32211+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
32212+#endif
32213 return;
32214
32215 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
32216@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
32217 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32218
32219 static __initdata int after_paging_init;
32220-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
32221+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
32222
32223 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
32224 {
32225@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
32226 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
32227
32228 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
32229- memset(bm_pte, 0, sizeof(bm_pte));
32230- pmd_populate_kernel(&init_mm, pmd, bm_pte);
32231+ pmd_populate_user(&init_mm, pmd, bm_pte);
32232
32233 /*
32234 * The boot-ioremap range spans multiple pmds, for which
32235diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
32236index d87dd6d..bf3fa66 100644
32237--- a/arch/x86/mm/kmemcheck/kmemcheck.c
32238+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
32239@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
32240 * memory (e.g. tracked pages)? For now, we need this to avoid
32241 * invoking kmemcheck for PnP BIOS calls.
32242 */
32243- if (regs->flags & X86_VM_MASK)
32244+ if (v8086_mode(regs))
32245 return false;
32246- if (regs->cs != __KERNEL_CS)
32247+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
32248 return false;
32249
32250 pte = kmemcheck_pte_lookup(address);
32251diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
32252index 25e7e13..1964579 100644
32253--- a/arch/x86/mm/mmap.c
32254+++ b/arch/x86/mm/mmap.c
32255@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
32256 * Leave an at least ~128 MB hole with possible stack randomization.
32257 */
32258 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
32259-#define MAX_GAP (TASK_SIZE/6*5)
32260+#define MAX_GAP (pax_task_size/6*5)
32261
32262 static int mmap_is_legacy(void)
32263 {
32264@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
32265 return rnd << PAGE_SHIFT;
32266 }
32267
32268-static unsigned long mmap_base(void)
32269+static unsigned long mmap_base(struct mm_struct *mm)
32270 {
32271 unsigned long gap = rlimit(RLIMIT_STACK);
32272+ unsigned long pax_task_size = TASK_SIZE;
32273+
32274+#ifdef CONFIG_PAX_SEGMEXEC
32275+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32276+ pax_task_size = SEGMEXEC_TASK_SIZE;
32277+#endif
32278
32279 if (gap < MIN_GAP)
32280 gap = MIN_GAP;
32281 else if (gap > MAX_GAP)
32282 gap = MAX_GAP;
32283
32284- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
32285+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
32286 }
32287
32288 /*
32289 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
32290 * does, but not when emulating X86_32
32291 */
32292-static unsigned long mmap_legacy_base(void)
32293+static unsigned long mmap_legacy_base(struct mm_struct *mm)
32294 {
32295- if (mmap_is_ia32())
32296+ if (mmap_is_ia32()) {
32297+
32298+#ifdef CONFIG_PAX_SEGMEXEC
32299+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32300+ return SEGMEXEC_TASK_UNMAPPED_BASE;
32301+ else
32302+#endif
32303+
32304 return TASK_UNMAPPED_BASE;
32305- else
32306+ } else
32307 return TASK_UNMAPPED_BASE + mmap_rnd();
32308 }
32309
32310@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
32311 */
32312 void arch_pick_mmap_layout(struct mm_struct *mm)
32313 {
32314- mm->mmap_legacy_base = mmap_legacy_base();
32315- mm->mmap_base = mmap_base();
32316+ mm->mmap_legacy_base = mmap_legacy_base(mm);
32317+ mm->mmap_base = mmap_base(mm);
32318+
32319+#ifdef CONFIG_PAX_RANDMMAP
32320+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
32321+ mm->mmap_legacy_base += mm->delta_mmap;
32322+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
32323+ }
32324+#endif
32325
32326 if (mmap_is_legacy()) {
32327 mm->mmap_base = mm->mmap_legacy_base;
32328diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
32329index 0057a7a..95c7edd 100644
32330--- a/arch/x86/mm/mmio-mod.c
32331+++ b/arch/x86/mm/mmio-mod.c
32332@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
32333 break;
32334 default:
32335 {
32336- unsigned char *ip = (unsigned char *)instptr;
32337+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
32338 my_trace->opcode = MMIO_UNKNOWN_OP;
32339 my_trace->width = 0;
32340 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
32341@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
32342 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32343 void __iomem *addr)
32344 {
32345- static atomic_t next_id;
32346+ static atomic_unchecked_t next_id;
32347 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
32348 /* These are page-unaligned. */
32349 struct mmiotrace_map map = {
32350@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32351 .private = trace
32352 },
32353 .phys = offset,
32354- .id = atomic_inc_return(&next_id)
32355+ .id = atomic_inc_return_unchecked(&next_id)
32356 };
32357 map.map_id = trace->id;
32358
32359@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
32360 ioremap_trace_core(offset, size, addr);
32361 }
32362
32363-static void iounmap_trace_core(volatile void __iomem *addr)
32364+static void iounmap_trace_core(const volatile void __iomem *addr)
32365 {
32366 struct mmiotrace_map map = {
32367 .phys = 0,
32368@@ -328,7 +328,7 @@ not_enabled:
32369 }
32370 }
32371
32372-void mmiotrace_iounmap(volatile void __iomem *addr)
32373+void mmiotrace_iounmap(const volatile void __iomem *addr)
32374 {
32375 might_sleep();
32376 if (is_enabled()) /* recheck and proper locking in *_core() */
32377diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
32378index 24aec58..c39fe8b 100644
32379--- a/arch/x86/mm/numa.c
32380+++ b/arch/x86/mm/numa.c
32381@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
32382 return true;
32383 }
32384
32385-static int __init numa_register_memblks(struct numa_meminfo *mi)
32386+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
32387 {
32388 unsigned long uninitialized_var(pfn_align);
32389 int i, nid;
32390diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
32391index d0b1773..4c3327c 100644
32392--- a/arch/x86/mm/pageattr-test.c
32393+++ b/arch/x86/mm/pageattr-test.c
32394@@ -36,7 +36,7 @@ enum {
32395
32396 static int pte_testbit(pte_t pte)
32397 {
32398- return pte_flags(pte) & _PAGE_UNUSED1;
32399+ return pte_flags(pte) & _PAGE_CPA_TEST;
32400 }
32401
32402 struct split_state {
32403diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
32404index bb32480..75f2f5e 100644
32405--- a/arch/x86/mm/pageattr.c
32406+++ b/arch/x86/mm/pageattr.c
32407@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32408 */
32409 #ifdef CONFIG_PCI_BIOS
32410 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
32411- pgprot_val(forbidden) |= _PAGE_NX;
32412+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32413 #endif
32414
32415 /*
32416@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32417 * Does not cover __inittext since that is gone later on. On
32418 * 64bit we do not enforce !NX on the low mapping
32419 */
32420- if (within(address, (unsigned long)_text, (unsigned long)_etext))
32421- pgprot_val(forbidden) |= _PAGE_NX;
32422+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
32423+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32424
32425+#ifdef CONFIG_DEBUG_RODATA
32426 /*
32427 * The .rodata section needs to be read-only. Using the pfn
32428 * catches all aliases.
32429@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32430 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
32431 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
32432 pgprot_val(forbidden) |= _PAGE_RW;
32433+#endif
32434
32435 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
32436 /*
32437@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32438 }
32439 #endif
32440
32441+#ifdef CONFIG_PAX_KERNEXEC
32442+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
32443+ pgprot_val(forbidden) |= _PAGE_RW;
32444+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32445+ }
32446+#endif
32447+
32448 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
32449
32450 return prot;
32451@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
32452 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
32453 {
32454 /* change init_mm */
32455+ pax_open_kernel();
32456 set_pte_atomic(kpte, pte);
32457+
32458 #ifdef CONFIG_X86_32
32459 if (!SHARED_KERNEL_PMD) {
32460+
32461+#ifdef CONFIG_PAX_PER_CPU_PGD
32462+ unsigned long cpu;
32463+#else
32464 struct page *page;
32465+#endif
32466
32467+#ifdef CONFIG_PAX_PER_CPU_PGD
32468+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32469+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
32470+#else
32471 list_for_each_entry(page, &pgd_list, lru) {
32472- pgd_t *pgd;
32473+ pgd_t *pgd = (pgd_t *)page_address(page);
32474+#endif
32475+
32476 pud_t *pud;
32477 pmd_t *pmd;
32478
32479- pgd = (pgd_t *)page_address(page) + pgd_index(address);
32480+ pgd += pgd_index(address);
32481 pud = pud_offset(pgd, address);
32482 pmd = pmd_offset(pud, address);
32483 set_pte_atomic((pte_t *)pmd, pte);
32484 }
32485 }
32486 #endif
32487+ pax_close_kernel();
32488 }
32489
32490 static int
32491diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
32492index 6574388..87e9bef 100644
32493--- a/arch/x86/mm/pat.c
32494+++ b/arch/x86/mm/pat.c
32495@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
32496
32497 if (!entry) {
32498 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
32499- current->comm, current->pid, start, end - 1);
32500+ current->comm, task_pid_nr(current), start, end - 1);
32501 return -EINVAL;
32502 }
32503
32504@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32505
32506 while (cursor < to) {
32507 if (!devmem_is_allowed(pfn)) {
32508- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
32509- current->comm, from, to - 1);
32510+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
32511+ current->comm, from, to - 1, cursor);
32512 return 0;
32513 }
32514 cursor += PAGE_SIZE;
32515@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
32516 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
32517 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
32518 "for [mem %#010Lx-%#010Lx]\n",
32519- current->comm, current->pid,
32520+ current->comm, task_pid_nr(current),
32521 cattr_name(flags),
32522 base, (unsigned long long)(base + size-1));
32523 return -EINVAL;
32524@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32525 flags = lookup_memtype(paddr);
32526 if (want_flags != flags) {
32527 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
32528- current->comm, current->pid,
32529+ current->comm, task_pid_nr(current),
32530 cattr_name(want_flags),
32531 (unsigned long long)paddr,
32532 (unsigned long long)(paddr + size - 1),
32533@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32534 free_memtype(paddr, paddr + size);
32535 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
32536 " for [mem %#010Lx-%#010Lx], got %s\n",
32537- current->comm, current->pid,
32538+ current->comm, task_pid_nr(current),
32539 cattr_name(want_flags),
32540 (unsigned long long)paddr,
32541 (unsigned long long)(paddr + size - 1),
32542diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
32543index 415f6c4..d319983 100644
32544--- a/arch/x86/mm/pat_rbtree.c
32545+++ b/arch/x86/mm/pat_rbtree.c
32546@@ -160,7 +160,7 @@ success:
32547
32548 failure:
32549 printk(KERN_INFO "%s:%d conflicting memory types "
32550- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
32551+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
32552 end, cattr_name(found_type), cattr_name(match->type));
32553 return -EBUSY;
32554 }
32555diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
32556index 9f0614d..92ae64a 100644
32557--- a/arch/x86/mm/pf_in.c
32558+++ b/arch/x86/mm/pf_in.c
32559@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
32560 int i;
32561 enum reason_type rv = OTHERS;
32562
32563- p = (unsigned char *)ins_addr;
32564+ p = (unsigned char *)ktla_ktva(ins_addr);
32565 p += skip_prefix(p, &prf);
32566 p += get_opcode(p, &opcode);
32567
32568@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
32569 struct prefix_bits prf;
32570 int i;
32571
32572- p = (unsigned char *)ins_addr;
32573+ p = (unsigned char *)ktla_ktva(ins_addr);
32574 p += skip_prefix(p, &prf);
32575 p += get_opcode(p, &opcode);
32576
32577@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
32578 struct prefix_bits prf;
32579 int i;
32580
32581- p = (unsigned char *)ins_addr;
32582+ p = (unsigned char *)ktla_ktva(ins_addr);
32583 p += skip_prefix(p, &prf);
32584 p += get_opcode(p, &opcode);
32585
32586@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
32587 struct prefix_bits prf;
32588 int i;
32589
32590- p = (unsigned char *)ins_addr;
32591+ p = (unsigned char *)ktla_ktva(ins_addr);
32592 p += skip_prefix(p, &prf);
32593 p += get_opcode(p, &opcode);
32594 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
32595@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
32596 struct prefix_bits prf;
32597 int i;
32598
32599- p = (unsigned char *)ins_addr;
32600+ p = (unsigned char *)ktla_ktva(ins_addr);
32601 p += skip_prefix(p, &prf);
32602 p += get_opcode(p, &opcode);
32603 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
32604diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
32605index c96314a..433b127 100644
32606--- a/arch/x86/mm/pgtable.c
32607+++ b/arch/x86/mm/pgtable.c
32608@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
32609 list_del(&page->lru);
32610 }
32611
32612-#define UNSHARED_PTRS_PER_PGD \
32613- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32614+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32615+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
32616
32617+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
32618+{
32619+ unsigned int count = USER_PGD_PTRS;
32620
32621+ if (!pax_user_shadow_base)
32622+ return;
32623+
32624+ while (count--)
32625+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
32626+}
32627+#endif
32628+
32629+#ifdef CONFIG_PAX_PER_CPU_PGD
32630+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
32631+{
32632+ unsigned int count = USER_PGD_PTRS;
32633+
32634+ while (count--) {
32635+ pgd_t pgd;
32636+
32637+#ifdef CONFIG_X86_64
32638+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
32639+#else
32640+ pgd = *src++;
32641+#endif
32642+
32643+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32644+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
32645+#endif
32646+
32647+ *dst++ = pgd;
32648+ }
32649+
32650+}
32651+#endif
32652+
32653+#ifdef CONFIG_X86_64
32654+#define pxd_t pud_t
32655+#define pyd_t pgd_t
32656+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
32657+#define pgtable_pxd_page_ctor(page) true
32658+#define pgtable_pxd_page_dtor(page)
32659+#define pxd_free(mm, pud) pud_free((mm), (pud))
32660+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
32661+#define pyd_offset(mm, address) pgd_offset((mm), (address))
32662+#define PYD_SIZE PGDIR_SIZE
32663+#else
32664+#define pxd_t pmd_t
32665+#define pyd_t pud_t
32666+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
32667+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
32668+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
32669+#define pxd_free(mm, pud) pmd_free((mm), (pud))
32670+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
32671+#define pyd_offset(mm, address) pud_offset((mm), (address))
32672+#define PYD_SIZE PUD_SIZE
32673+#endif
32674+
32675+#ifdef CONFIG_PAX_PER_CPU_PGD
32676+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
32677+static inline void pgd_dtor(pgd_t *pgd) {}
32678+#else
32679 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
32680 {
32681 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
32682@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
32683 pgd_list_del(pgd);
32684 spin_unlock(&pgd_lock);
32685 }
32686+#endif
32687
32688 /*
32689 * List of all pgd's needed for non-PAE so it can invalidate entries
32690@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
32691 * -- nyc
32692 */
32693
32694-#ifdef CONFIG_X86_PAE
32695+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
32696 /*
32697 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
32698 * updating the top-level pagetable entries to guarantee the
32699@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
32700 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
32701 * and initialize the kernel pmds here.
32702 */
32703-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
32704+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32705
32706 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32707 {
32708@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32709 */
32710 flush_tlb_mm(mm);
32711 }
32712+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
32713+#define PREALLOCATED_PXDS USER_PGD_PTRS
32714 #else /* !CONFIG_X86_PAE */
32715
32716 /* No need to prepopulate any pagetable entries in non-PAE modes. */
32717-#define PREALLOCATED_PMDS 0
32718+#define PREALLOCATED_PXDS 0
32719
32720 #endif /* CONFIG_X86_PAE */
32721
32722-static void free_pmds(pmd_t *pmds[])
32723+static void free_pxds(pxd_t *pxds[])
32724 {
32725 int i;
32726
32727- for(i = 0; i < PREALLOCATED_PMDS; i++)
32728- if (pmds[i]) {
32729- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
32730- free_page((unsigned long)pmds[i]);
32731+ for(i = 0; i < PREALLOCATED_PXDS; i++)
32732+ if (pxds[i]) {
32733+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
32734+ free_page((unsigned long)pxds[i]);
32735 }
32736 }
32737
32738-static int preallocate_pmds(pmd_t *pmds[])
32739+static int preallocate_pxds(pxd_t *pxds[])
32740 {
32741 int i;
32742 bool failed = false;
32743
32744- for(i = 0; i < PREALLOCATED_PMDS; i++) {
32745- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
32746- if (!pmd)
32747+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
32748+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
32749+ if (!pxd)
32750 failed = true;
32751- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
32752- free_page((unsigned long)pmd);
32753- pmd = NULL;
32754+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
32755+ free_page((unsigned long)pxd);
32756+ pxd = NULL;
32757 failed = true;
32758 }
32759- pmds[i] = pmd;
32760+ pxds[i] = pxd;
32761 }
32762
32763 if (failed) {
32764- free_pmds(pmds);
32765+ free_pxds(pxds);
32766 return -ENOMEM;
32767 }
32768
32769@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
32770 * preallocate which never got a corresponding vma will need to be
32771 * freed manually.
32772 */
32773-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
32774+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
32775 {
32776 int i;
32777
32778- for(i = 0; i < PREALLOCATED_PMDS; i++) {
32779+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
32780 pgd_t pgd = pgdp[i];
32781
32782 if (pgd_val(pgd) != 0) {
32783- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
32784+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
32785
32786- pgdp[i] = native_make_pgd(0);
32787+ set_pgd(pgdp + i, native_make_pgd(0));
32788
32789- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
32790- pmd_free(mm, pmd);
32791+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
32792+ pxd_free(mm, pxd);
32793 }
32794 }
32795 }
32796
32797-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
32798+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
32799 {
32800- pud_t *pud;
32801+ pyd_t *pyd;
32802 int i;
32803
32804- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32805+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32806 return;
32807
32808- pud = pud_offset(pgd, 0);
32809-
32810- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
32811- pmd_t *pmd = pmds[i];
32812+#ifdef CONFIG_X86_64
32813+ pyd = pyd_offset(mm, 0L);
32814+#else
32815+ pyd = pyd_offset(pgd, 0L);
32816+#endif
32817
32818+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
32819+ pxd_t *pxd = pxds[i];
32820 if (i >= KERNEL_PGD_BOUNDARY)
32821- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32822- sizeof(pmd_t) * PTRS_PER_PMD);
32823+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32824+ sizeof(pxd_t) * PTRS_PER_PMD);
32825
32826- pud_populate(mm, pud, pmd);
32827+ pyd_populate(mm, pyd, pxd);
32828 }
32829 }
32830
32831 pgd_t *pgd_alloc(struct mm_struct *mm)
32832 {
32833 pgd_t *pgd;
32834- pmd_t *pmds[PREALLOCATED_PMDS];
32835+ pxd_t *pxds[PREALLOCATED_PXDS];
32836
32837 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32838
32839@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32840
32841 mm->pgd = pgd;
32842
32843- if (preallocate_pmds(pmds) != 0)
32844+ if (preallocate_pxds(pxds) != 0)
32845 goto out_free_pgd;
32846
32847 if (paravirt_pgd_alloc(mm) != 0)
32848- goto out_free_pmds;
32849+ goto out_free_pxds;
32850
32851 /*
32852 * Make sure that pre-populating the pmds is atomic with
32853@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32854 spin_lock(&pgd_lock);
32855
32856 pgd_ctor(mm, pgd);
32857- pgd_prepopulate_pmd(mm, pgd, pmds);
32858+ pgd_prepopulate_pxd(mm, pgd, pxds);
32859
32860 spin_unlock(&pgd_lock);
32861
32862 return pgd;
32863
32864-out_free_pmds:
32865- free_pmds(pmds);
32866+out_free_pxds:
32867+ free_pxds(pxds);
32868 out_free_pgd:
32869 free_page((unsigned long)pgd);
32870 out:
32871@@ -313,7 +380,7 @@ out:
32872
32873 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32874 {
32875- pgd_mop_up_pmds(mm, pgd);
32876+ pgd_mop_up_pxds(mm, pgd);
32877 pgd_dtor(pgd);
32878 paravirt_pgd_free(mm, pgd);
32879 free_page((unsigned long)pgd);
32880diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32881index a69bcb8..19068ab 100644
32882--- a/arch/x86/mm/pgtable_32.c
32883+++ b/arch/x86/mm/pgtable_32.c
32884@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32885 return;
32886 }
32887 pte = pte_offset_kernel(pmd, vaddr);
32888+
32889+ pax_open_kernel();
32890 if (pte_val(pteval))
32891 set_pte_at(&init_mm, vaddr, pte, pteval);
32892 else
32893 pte_clear(&init_mm, vaddr, pte);
32894+ pax_close_kernel();
32895
32896 /*
32897 * It's enough to flush this one mapping.
32898diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32899index e666cbb..61788c45 100644
32900--- a/arch/x86/mm/physaddr.c
32901+++ b/arch/x86/mm/physaddr.c
32902@@ -10,7 +10,7 @@
32903 #ifdef CONFIG_X86_64
32904
32905 #ifdef CONFIG_DEBUG_VIRTUAL
32906-unsigned long __phys_addr(unsigned long x)
32907+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32908 {
32909 unsigned long y = x - __START_KERNEL_map;
32910
32911@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32912 #else
32913
32914 #ifdef CONFIG_DEBUG_VIRTUAL
32915-unsigned long __phys_addr(unsigned long x)
32916+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32917 {
32918 unsigned long phys_addr = x - PAGE_OFFSET;
32919 /* VMALLOC_* aren't constants */
32920diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32921index 90555bf..f5f1828 100644
32922--- a/arch/x86/mm/setup_nx.c
32923+++ b/arch/x86/mm/setup_nx.c
32924@@ -5,8 +5,10 @@
32925 #include <asm/pgtable.h>
32926 #include <asm/proto.h>
32927
32928+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32929 static int disable_nx;
32930
32931+#ifndef CONFIG_PAX_PAGEEXEC
32932 /*
32933 * noexec = on|off
32934 *
32935@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32936 return 0;
32937 }
32938 early_param("noexec", noexec_setup);
32939+#endif
32940+
32941+#endif
32942
32943 void x86_configure_nx(void)
32944 {
32945+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32946 if (cpu_has_nx && !disable_nx)
32947 __supported_pte_mask |= _PAGE_NX;
32948 else
32949+#endif
32950 __supported_pte_mask &= ~_PAGE_NX;
32951 }
32952
32953diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32954index ae699b3..f1b2ad2 100644
32955--- a/arch/x86/mm/tlb.c
32956+++ b/arch/x86/mm/tlb.c
32957@@ -48,7 +48,11 @@ void leave_mm(int cpu)
32958 BUG();
32959 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32960 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32961+
32962+#ifndef CONFIG_PAX_PER_CPU_PGD
32963 load_cr3(swapper_pg_dir);
32964+#endif
32965+
32966 }
32967 }
32968 EXPORT_SYMBOL_GPL(leave_mm);
32969diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32970new file mode 100644
32971index 0000000..dace51c
32972--- /dev/null
32973+++ b/arch/x86/mm/uderef_64.c
32974@@ -0,0 +1,37 @@
32975+#include <linux/mm.h>
32976+#include <asm/pgtable.h>
32977+#include <asm/uaccess.h>
32978+
32979+#ifdef CONFIG_PAX_MEMORY_UDEREF
32980+/* PaX: due to the special call convention these functions must
32981+ * - remain leaf functions under all configurations,
32982+ * - never be called directly, only dereferenced from the wrappers.
32983+ */
32984+void __pax_open_userland(void)
32985+{
32986+ unsigned int cpu;
32987+
32988+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32989+ return;
32990+
32991+ cpu = raw_get_cpu();
32992+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
32993+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
32994+ raw_put_cpu_no_resched();
32995+}
32996+EXPORT_SYMBOL(__pax_open_userland);
32997+
32998+void __pax_close_userland(void)
32999+{
33000+ unsigned int cpu;
33001+
33002+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
33003+ return;
33004+
33005+ cpu = raw_get_cpu();
33006+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
33007+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
33008+ raw_put_cpu_no_resched();
33009+}
33010+EXPORT_SYMBOL(__pax_close_userland);
33011+#endif
33012diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
33013index 877b9a1..a8ecf42 100644
33014--- a/arch/x86/net/bpf_jit.S
33015+++ b/arch/x86/net/bpf_jit.S
33016@@ -9,6 +9,7 @@
33017 */
33018 #include <linux/linkage.h>
33019 #include <asm/dwarf2.h>
33020+#include <asm/alternative-asm.h>
33021
33022 /*
33023 * Calling convention :
33024@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
33025 jle bpf_slow_path_word
33026 mov (SKBDATA,%rsi),%eax
33027 bswap %eax /* ntohl() */
33028+ pax_force_retaddr
33029 ret
33030
33031 sk_load_half:
33032@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
33033 jle bpf_slow_path_half
33034 movzwl (SKBDATA,%rsi),%eax
33035 rol $8,%ax # ntohs()
33036+ pax_force_retaddr
33037 ret
33038
33039 sk_load_byte:
33040@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
33041 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
33042 jle bpf_slow_path_byte
33043 movzbl (SKBDATA,%rsi),%eax
33044+ pax_force_retaddr
33045 ret
33046
33047 /**
33048@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
33049 movzbl (SKBDATA,%rsi),%ebx
33050 and $15,%bl
33051 shl $2,%bl
33052+ pax_force_retaddr
33053 ret
33054
33055 /* rsi contains offset and can be scratched */
33056@@ -109,6 +114,7 @@ bpf_slow_path_word:
33057 js bpf_error
33058 mov -12(%rbp),%eax
33059 bswap %eax
33060+ pax_force_retaddr
33061 ret
33062
33063 bpf_slow_path_half:
33064@@ -117,12 +123,14 @@ bpf_slow_path_half:
33065 mov -12(%rbp),%ax
33066 rol $8,%ax
33067 movzwl %ax,%eax
33068+ pax_force_retaddr
33069 ret
33070
33071 bpf_slow_path_byte:
33072 bpf_slow_path_common(1)
33073 js bpf_error
33074 movzbl -12(%rbp),%eax
33075+ pax_force_retaddr
33076 ret
33077
33078 bpf_slow_path_byte_msh:
33079@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
33080 and $15,%al
33081 shl $2,%al
33082 xchg %eax,%ebx
33083+ pax_force_retaddr
33084 ret
33085
33086 #define sk_negative_common(SIZE) \
33087@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
33088 sk_negative_common(4)
33089 mov (%rax), %eax
33090 bswap %eax
33091+ pax_force_retaddr
33092 ret
33093
33094 bpf_slow_path_half_neg:
33095@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
33096 mov (%rax),%ax
33097 rol $8,%ax
33098 movzwl %ax,%eax
33099+ pax_force_retaddr
33100 ret
33101
33102 bpf_slow_path_byte_neg:
33103@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
33104 .globl sk_load_byte_negative_offset
33105 sk_negative_common(1)
33106 movzbl (%rax), %eax
33107+ pax_force_retaddr
33108 ret
33109
33110 bpf_slow_path_byte_msh_neg:
33111@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
33112 and $15,%al
33113 shl $2,%al
33114 xchg %eax,%ebx
33115+ pax_force_retaddr
33116 ret
33117
33118 bpf_error:
33119@@ -197,4 +210,5 @@ bpf_error:
33120 xor %eax,%eax
33121 mov -8(%rbp),%rbx
33122 leaveq
33123+ pax_force_retaddr
33124 ret
33125diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
33126index 4ed75dd..8dfe0d5 100644
33127--- a/arch/x86/net/bpf_jit_comp.c
33128+++ b/arch/x86/net/bpf_jit_comp.c
33129@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
33130 return ptr + len;
33131 }
33132
33133+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33134+#define MAX_INSTR_CODE_SIZE 96
33135+#else
33136+#define MAX_INSTR_CODE_SIZE 64
33137+#endif
33138+
33139 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
33140
33141 #define EMIT1(b1) EMIT(b1, 1)
33142 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
33143 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
33144 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
33145+
33146+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33147+/* original constant will appear in ecx */
33148+#define DILUTE_CONST_SEQUENCE(_off, _key) \
33149+do { \
33150+ /* mov ecx, randkey */ \
33151+ EMIT1(0xb9); \
33152+ EMIT(_key, 4); \
33153+ /* xor ecx, randkey ^ off */ \
33154+ EMIT2(0x81, 0xf1); \
33155+ EMIT((_key) ^ (_off), 4); \
33156+} while (0)
33157+
33158+#define EMIT1_off32(b1, _off) \
33159+do { \
33160+ switch (b1) { \
33161+ case 0x05: /* add eax, imm32 */ \
33162+ case 0x2d: /* sub eax, imm32 */ \
33163+ case 0x25: /* and eax, imm32 */ \
33164+ case 0x0d: /* or eax, imm32 */ \
33165+ case 0xb8: /* mov eax, imm32 */ \
33166+ case 0x35: /* xor eax, imm32 */ \
33167+ case 0x3d: /* cmp eax, imm32 */ \
33168+ case 0xa9: /* test eax, imm32 */ \
33169+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33170+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
33171+ break; \
33172+ case 0xbb: /* mov ebx, imm32 */ \
33173+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33174+ /* mov ebx, ecx */ \
33175+ EMIT2(0x89, 0xcb); \
33176+ break; \
33177+ case 0xbe: /* mov esi, imm32 */ \
33178+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33179+ /* mov esi, ecx */ \
33180+ EMIT2(0x89, 0xce); \
33181+ break; \
33182+ case 0xe8: /* call rel imm32, always to known funcs */ \
33183+ EMIT1(b1); \
33184+ EMIT(_off, 4); \
33185+ break; \
33186+ case 0xe9: /* jmp rel imm32 */ \
33187+ EMIT1(b1); \
33188+ EMIT(_off, 4); \
33189+ /* prevent fall-through, we're not called if off = 0 */ \
33190+ EMIT(0xcccccccc, 4); \
33191+ EMIT(0xcccccccc, 4); \
33192+ break; \
33193+ default: \
33194+ BUILD_BUG(); \
33195+ } \
33196+} while (0)
33197+
33198+#define EMIT2_off32(b1, b2, _off) \
33199+do { \
33200+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
33201+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
33202+ EMIT(randkey, 4); \
33203+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
33204+ EMIT((_off) - randkey, 4); \
33205+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
33206+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33207+ /* imul eax, ecx */ \
33208+ EMIT3(0x0f, 0xaf, 0xc1); \
33209+ } else { \
33210+ BUILD_BUG(); \
33211+ } \
33212+} while (0)
33213+#else
33214 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
33215+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
33216+#endif
33217
33218 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
33219 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
33220@@ -91,6 +168,24 @@ do { \
33221 #define X86_JBE 0x76
33222 #define X86_JA 0x77
33223
33224+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33225+#define APPEND_FLOW_VERIFY() \
33226+do { \
33227+ /* mov ecx, randkey */ \
33228+ EMIT1(0xb9); \
33229+ EMIT(randkey, 4); \
33230+ /* cmp ecx, randkey */ \
33231+ EMIT2(0x81, 0xf9); \
33232+ EMIT(randkey, 4); \
33233+ /* jz after 8 int 3s */ \
33234+ EMIT2(0x74, 0x08); \
33235+ EMIT(0xcccccccc, 4); \
33236+ EMIT(0xcccccccc, 4); \
33237+} while (0)
33238+#else
33239+#define APPEND_FLOW_VERIFY() do { } while (0)
33240+#endif
33241+
33242 #define EMIT_COND_JMP(op, offset) \
33243 do { \
33244 if (is_near(offset)) \
33245@@ -98,6 +193,7 @@ do { \
33246 else { \
33247 EMIT2(0x0f, op + 0x10); \
33248 EMIT(offset, 4); /* jxx .+off32 */ \
33249+ APPEND_FLOW_VERIFY(); \
33250 } \
33251 } while (0)
33252
33253@@ -145,55 +241,54 @@ static int pkt_type_offset(void)
33254 return -1;
33255 }
33256
33257-struct bpf_binary_header {
33258- unsigned int pages;
33259- /* Note : for security reasons, bpf code will follow a randomly
33260- * sized amount of int3 instructions
33261- */
33262- u8 image[];
33263-};
33264-
33265-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
33266+/* Note : for security reasons, bpf code will follow a randomly
33267+ * sized amount of int3 instructions
33268+ */
33269+static u8 *bpf_alloc_binary(unsigned int proglen,
33270 u8 **image_ptr)
33271 {
33272 unsigned int sz, hole;
33273- struct bpf_binary_header *header;
33274+ u8 *header;
33275
33276 /* Most of BPF filters are really small,
33277 * but if some of them fill a page, allow at least
33278 * 128 extra bytes to insert a random section of int3
33279 */
33280- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
33281- header = module_alloc(sz);
33282+ sz = round_up(proglen + 128, PAGE_SIZE);
33283+ header = module_alloc_exec(sz);
33284 if (!header)
33285 return NULL;
33286
33287+ pax_open_kernel();
33288 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
33289+ pax_close_kernel();
33290
33291- header->pages = sz / PAGE_SIZE;
33292- hole = sz - (proglen + sizeof(*header));
33293+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
33294
33295 /* insert a random number of int3 instructions before BPF code */
33296- *image_ptr = &header->image[prandom_u32() % hole];
33297+ *image_ptr = &header[prandom_u32() % hole];
33298 return header;
33299 }
33300
33301 void bpf_jit_compile(struct sk_filter *fp)
33302 {
33303- u8 temp[64];
33304+ u8 temp[MAX_INSTR_CODE_SIZE];
33305 u8 *prog;
33306 unsigned int proglen, oldproglen = 0;
33307 int ilen, i;
33308 int t_offset, f_offset;
33309 u8 t_op, f_op, seen = 0, pass;
33310 u8 *image = NULL;
33311- struct bpf_binary_header *header = NULL;
33312+ u8 *header = NULL;
33313 u8 *func;
33314 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
33315 unsigned int cleanup_addr; /* epilogue code offset */
33316 unsigned int *addrs;
33317 const struct sock_filter *filter = fp->insns;
33318 int flen = fp->len;
33319+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33320+ unsigned int randkey;
33321+#endif
33322
33323 if (!bpf_jit_enable)
33324 return;
33325@@ -202,11 +297,15 @@ void bpf_jit_compile(struct sk_filter *fp)
33326 if (addrs == NULL)
33327 return;
33328
33329+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33330+ randkey = get_random_int();
33331+#endif
33332+
33333 /* Before first pass, make a rough estimation of addrs[]
33334- * each bpf instruction is translated to less than 64 bytes
33335+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
33336 */
33337 for (proglen = 0, i = 0; i < flen; i++) {
33338- proglen += 64;
33339+ proglen += MAX_INSTR_CODE_SIZE;
33340 addrs[i] = proglen;
33341 }
33342 cleanup_addr = proglen; /* epilogue address */
33343@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
33344 case BPF_S_ALU_MUL_K: /* A *= K */
33345 if (is_imm8(K))
33346 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
33347- else {
33348- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
33349- EMIT(K, 4);
33350- }
33351+ else
33352+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
33353 break;
33354 case BPF_S_ALU_DIV_X: /* A /= X; */
33355 seen |= SEEN_XREG;
33356@@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33357 break;
33358 }
33359 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33360+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33361+ DILUTE_CONST_SEQUENCE(K, randkey);
33362+#else
33363 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33364+#endif
33365 EMIT2(0xf7, 0xf1); /* div %ecx */
33366 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
33367 break;
33368@@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33369 if (K == 1)
33370 break;
33371 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33372+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33373+ DILUTE_CONST_SEQUENCE(K, randkey);
33374+#else
33375 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33376+#endif
33377 EMIT2(0xf7, 0xf1); /* div %ecx */
33378 break;
33379 case BPF_S_ALU_AND_X:
33380@@ -643,8 +748,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
33381 if (is_imm8(K)) {
33382 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
33383 } else {
33384- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
33385- EMIT(K, 4);
33386+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
33387 }
33388 } else {
33389 EMIT2(0x89,0xde); /* mov %ebx,%esi */
33390@@ -734,10 +838,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33391 if (unlikely(proglen + ilen > oldproglen)) {
33392 pr_err("bpb_jit_compile fatal error\n");
33393 kfree(addrs);
33394- module_free(NULL, header);
33395+ module_free_exec(NULL, image);
33396 return;
33397 }
33398+ pax_open_kernel();
33399 memcpy(image + proglen, temp, ilen);
33400+ pax_close_kernel();
33401 }
33402 proglen += ilen;
33403 addrs[i] = proglen;
33404@@ -770,7 +876,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33405
33406 if (image) {
33407 bpf_flush_icache(header, image + proglen);
33408- set_memory_ro((unsigned long)header, header->pages);
33409 fp->bpf_func = (void *)image;
33410 }
33411 out:
33412@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
33413 {
33414 struct sk_filter *fp = container_of(work, struct sk_filter, work);
33415 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
33416- struct bpf_binary_header *header = (void *)addr;
33417
33418- set_memory_rw(addr, header->pages);
33419- module_free(NULL, header);
33420+ set_memory_rw(addr, 1);
33421+ module_free_exec(NULL, (void *)addr);
33422 kfree(fp);
33423 }
33424
33425diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
33426index 5d04be5..2beeaa2 100644
33427--- a/arch/x86/oprofile/backtrace.c
33428+++ b/arch/x86/oprofile/backtrace.c
33429@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
33430 struct stack_frame_ia32 *fp;
33431 unsigned long bytes;
33432
33433- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33434+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33435 if (bytes != 0)
33436 return NULL;
33437
33438- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
33439+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
33440
33441 oprofile_add_trace(bufhead[0].return_address);
33442
33443@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
33444 struct stack_frame bufhead[2];
33445 unsigned long bytes;
33446
33447- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33448+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33449 if (bytes != 0)
33450 return NULL;
33451
33452@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
33453 {
33454 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
33455
33456- if (!user_mode_vm(regs)) {
33457+ if (!user_mode(regs)) {
33458 unsigned long stack = kernel_stack_pointer(regs);
33459 if (depth)
33460 dump_trace(NULL, regs, (unsigned long *)stack, 0,
33461diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
33462index 6890d84..1dad1f1 100644
33463--- a/arch/x86/oprofile/nmi_int.c
33464+++ b/arch/x86/oprofile/nmi_int.c
33465@@ -23,6 +23,7 @@
33466 #include <asm/nmi.h>
33467 #include <asm/msr.h>
33468 #include <asm/apic.h>
33469+#include <asm/pgtable.h>
33470
33471 #include "op_counter.h"
33472 #include "op_x86_model.h"
33473@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
33474 if (ret)
33475 return ret;
33476
33477- if (!model->num_virt_counters)
33478- model->num_virt_counters = model->num_counters;
33479+ if (!model->num_virt_counters) {
33480+ pax_open_kernel();
33481+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
33482+ pax_close_kernel();
33483+ }
33484
33485 mux_init(ops);
33486
33487diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
33488index 50d86c0..7985318 100644
33489--- a/arch/x86/oprofile/op_model_amd.c
33490+++ b/arch/x86/oprofile/op_model_amd.c
33491@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
33492 num_counters = AMD64_NUM_COUNTERS;
33493 }
33494
33495- op_amd_spec.num_counters = num_counters;
33496- op_amd_spec.num_controls = num_counters;
33497- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33498+ pax_open_kernel();
33499+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
33500+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
33501+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33502+ pax_close_kernel();
33503
33504 return 0;
33505 }
33506diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
33507index d90528e..0127e2b 100644
33508--- a/arch/x86/oprofile/op_model_ppro.c
33509+++ b/arch/x86/oprofile/op_model_ppro.c
33510@@ -19,6 +19,7 @@
33511 #include <asm/msr.h>
33512 #include <asm/apic.h>
33513 #include <asm/nmi.h>
33514+#include <asm/pgtable.h>
33515
33516 #include "op_x86_model.h"
33517 #include "op_counter.h"
33518@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
33519
33520 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
33521
33522- op_arch_perfmon_spec.num_counters = num_counters;
33523- op_arch_perfmon_spec.num_controls = num_counters;
33524+ pax_open_kernel();
33525+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
33526+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
33527+ pax_close_kernel();
33528 }
33529
33530 static int arch_perfmon_init(struct oprofile_operations *ignore)
33531diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
33532index 71e8a67..6a313bb 100644
33533--- a/arch/x86/oprofile/op_x86_model.h
33534+++ b/arch/x86/oprofile/op_x86_model.h
33535@@ -52,7 +52,7 @@ struct op_x86_model_spec {
33536 void (*switch_ctrl)(struct op_x86_model_spec const *model,
33537 struct op_msrs const * const msrs);
33538 #endif
33539-};
33540+} __do_const;
33541
33542 struct op_counter_config;
33543
33544diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
33545index 51384ca..a25f51e 100644
33546--- a/arch/x86/pci/intel_mid_pci.c
33547+++ b/arch/x86/pci/intel_mid_pci.c
33548@@ -241,7 +241,7 @@ int __init intel_mid_pci_init(void)
33549 pr_info("Intel MID platform detected, using MID PCI ops\n");
33550 pci_mmcfg_late_init();
33551 pcibios_enable_irq = intel_mid_pci_irq_enable;
33552- pci_root_ops = intel_mid_pci_ops;
33553+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
33554 pci_soc_mode = 1;
33555 /* Continue with standard init */
33556 return 1;
33557diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
33558index 372e9b8..e775a6c 100644
33559--- a/arch/x86/pci/irq.c
33560+++ b/arch/x86/pci/irq.c
33561@@ -50,7 +50,7 @@ struct irq_router {
33562 struct irq_router_handler {
33563 u16 vendor;
33564 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
33565-};
33566+} __do_const;
33567
33568 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
33569 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
33570@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
33571 return 0;
33572 }
33573
33574-static __initdata struct irq_router_handler pirq_routers[] = {
33575+static __initconst const struct irq_router_handler pirq_routers[] = {
33576 { PCI_VENDOR_ID_INTEL, intel_router_probe },
33577 { PCI_VENDOR_ID_AL, ali_router_probe },
33578 { PCI_VENDOR_ID_ITE, ite_router_probe },
33579@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
33580 static void __init pirq_find_router(struct irq_router *r)
33581 {
33582 struct irq_routing_table *rt = pirq_table;
33583- struct irq_router_handler *h;
33584+ const struct irq_router_handler *h;
33585
33586 #ifdef CONFIG_PCI_BIOS
33587 if (!rt->signature) {
33588@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
33589 return 0;
33590 }
33591
33592-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
33593+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
33594 {
33595 .callback = fix_broken_hp_bios_irq9,
33596 .ident = "HP Pavilion N5400 Series Laptop",
33597diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
33598index c77b24a..c979855 100644
33599--- a/arch/x86/pci/pcbios.c
33600+++ b/arch/x86/pci/pcbios.c
33601@@ -79,7 +79,7 @@ union bios32 {
33602 static struct {
33603 unsigned long address;
33604 unsigned short segment;
33605-} bios32_indirect = { 0, __KERNEL_CS };
33606+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
33607
33608 /*
33609 * Returns the entry point for the given service, NULL on error
33610@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
33611 unsigned long length; /* %ecx */
33612 unsigned long entry; /* %edx */
33613 unsigned long flags;
33614+ struct desc_struct d, *gdt;
33615
33616 local_irq_save(flags);
33617- __asm__("lcall *(%%edi); cld"
33618+
33619+ gdt = get_cpu_gdt_table(smp_processor_id());
33620+
33621+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
33622+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33623+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
33624+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33625+
33626+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
33627 : "=a" (return_code),
33628 "=b" (address),
33629 "=c" (length),
33630 "=d" (entry)
33631 : "0" (service),
33632 "1" (0),
33633- "D" (&bios32_indirect));
33634+ "D" (&bios32_indirect),
33635+ "r"(__PCIBIOS_DS)
33636+ : "memory");
33637+
33638+ pax_open_kernel();
33639+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
33640+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
33641+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
33642+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
33643+ pax_close_kernel();
33644+
33645 local_irq_restore(flags);
33646
33647 switch (return_code) {
33648- case 0:
33649- return address + entry;
33650- case 0x80: /* Not present */
33651- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33652- return 0;
33653- default: /* Shouldn't happen */
33654- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33655- service, return_code);
33656+ case 0: {
33657+ int cpu;
33658+ unsigned char flags;
33659+
33660+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
33661+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
33662+ printk(KERN_WARNING "bios32_service: not valid\n");
33663 return 0;
33664+ }
33665+ address = address + PAGE_OFFSET;
33666+ length += 16UL; /* some BIOSs underreport this... */
33667+ flags = 4;
33668+ if (length >= 64*1024*1024) {
33669+ length >>= PAGE_SHIFT;
33670+ flags |= 8;
33671+ }
33672+
33673+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33674+ gdt = get_cpu_gdt_table(cpu);
33675+ pack_descriptor(&d, address, length, 0x9b, flags);
33676+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33677+ pack_descriptor(&d, address, length, 0x93, flags);
33678+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33679+ }
33680+ return entry;
33681+ }
33682+ case 0x80: /* Not present */
33683+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33684+ return 0;
33685+ default: /* Shouldn't happen */
33686+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33687+ service, return_code);
33688+ return 0;
33689 }
33690 }
33691
33692 static struct {
33693 unsigned long address;
33694 unsigned short segment;
33695-} pci_indirect = { 0, __KERNEL_CS };
33696+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
33697
33698-static int pci_bios_present;
33699+static int pci_bios_present __read_only;
33700
33701 static int check_pcibios(void)
33702 {
33703@@ -131,11 +174,13 @@ static int check_pcibios(void)
33704 unsigned long flags, pcibios_entry;
33705
33706 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
33707- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
33708+ pci_indirect.address = pcibios_entry;
33709
33710 local_irq_save(flags);
33711- __asm__(
33712- "lcall *(%%edi); cld\n\t"
33713+ __asm__("movw %w6, %%ds\n\t"
33714+ "lcall *%%ss:(%%edi); cld\n\t"
33715+ "push %%ss\n\t"
33716+ "pop %%ds\n\t"
33717 "jc 1f\n\t"
33718 "xor %%ah, %%ah\n"
33719 "1:"
33720@@ -144,7 +189,8 @@ static int check_pcibios(void)
33721 "=b" (ebx),
33722 "=c" (ecx)
33723 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
33724- "D" (&pci_indirect)
33725+ "D" (&pci_indirect),
33726+ "r" (__PCIBIOS_DS)
33727 : "memory");
33728 local_irq_restore(flags);
33729
33730@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33731
33732 switch (len) {
33733 case 1:
33734- __asm__("lcall *(%%esi); cld\n\t"
33735+ __asm__("movw %w6, %%ds\n\t"
33736+ "lcall *%%ss:(%%esi); cld\n\t"
33737+ "push %%ss\n\t"
33738+ "pop %%ds\n\t"
33739 "jc 1f\n\t"
33740 "xor %%ah, %%ah\n"
33741 "1:"
33742@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33743 : "1" (PCIBIOS_READ_CONFIG_BYTE),
33744 "b" (bx),
33745 "D" ((long)reg),
33746- "S" (&pci_indirect));
33747+ "S" (&pci_indirect),
33748+ "r" (__PCIBIOS_DS));
33749 /*
33750 * Zero-extend the result beyond 8 bits, do not trust the
33751 * BIOS having done it:
33752@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33753 *value &= 0xff;
33754 break;
33755 case 2:
33756- __asm__("lcall *(%%esi); cld\n\t"
33757+ __asm__("movw %w6, %%ds\n\t"
33758+ "lcall *%%ss:(%%esi); cld\n\t"
33759+ "push %%ss\n\t"
33760+ "pop %%ds\n\t"
33761 "jc 1f\n\t"
33762 "xor %%ah, %%ah\n"
33763 "1:"
33764@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33765 : "1" (PCIBIOS_READ_CONFIG_WORD),
33766 "b" (bx),
33767 "D" ((long)reg),
33768- "S" (&pci_indirect));
33769+ "S" (&pci_indirect),
33770+ "r" (__PCIBIOS_DS));
33771 /*
33772 * Zero-extend the result beyond 16 bits, do not trust the
33773 * BIOS having done it:
33774@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33775 *value &= 0xffff;
33776 break;
33777 case 4:
33778- __asm__("lcall *(%%esi); cld\n\t"
33779+ __asm__("movw %w6, %%ds\n\t"
33780+ "lcall *%%ss:(%%esi); cld\n\t"
33781+ "push %%ss\n\t"
33782+ "pop %%ds\n\t"
33783 "jc 1f\n\t"
33784 "xor %%ah, %%ah\n"
33785 "1:"
33786@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33787 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33788 "b" (bx),
33789 "D" ((long)reg),
33790- "S" (&pci_indirect));
33791+ "S" (&pci_indirect),
33792+ "r" (__PCIBIOS_DS));
33793 break;
33794 }
33795
33796@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33797
33798 switch (len) {
33799 case 1:
33800- __asm__("lcall *(%%esi); cld\n\t"
33801+ __asm__("movw %w6, %%ds\n\t"
33802+ "lcall *%%ss:(%%esi); cld\n\t"
33803+ "push %%ss\n\t"
33804+ "pop %%ds\n\t"
33805 "jc 1f\n\t"
33806 "xor %%ah, %%ah\n"
33807 "1:"
33808@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33809 "c" (value),
33810 "b" (bx),
33811 "D" ((long)reg),
33812- "S" (&pci_indirect));
33813+ "S" (&pci_indirect),
33814+ "r" (__PCIBIOS_DS));
33815 break;
33816 case 2:
33817- __asm__("lcall *(%%esi); cld\n\t"
33818+ __asm__("movw %w6, %%ds\n\t"
33819+ "lcall *%%ss:(%%esi); cld\n\t"
33820+ "push %%ss\n\t"
33821+ "pop %%ds\n\t"
33822 "jc 1f\n\t"
33823 "xor %%ah, %%ah\n"
33824 "1:"
33825@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33826 "c" (value),
33827 "b" (bx),
33828 "D" ((long)reg),
33829- "S" (&pci_indirect));
33830+ "S" (&pci_indirect),
33831+ "r" (__PCIBIOS_DS));
33832 break;
33833 case 4:
33834- __asm__("lcall *(%%esi); cld\n\t"
33835+ __asm__("movw %w6, %%ds\n\t"
33836+ "lcall *%%ss:(%%esi); cld\n\t"
33837+ "push %%ss\n\t"
33838+ "pop %%ds\n\t"
33839 "jc 1f\n\t"
33840 "xor %%ah, %%ah\n"
33841 "1:"
33842@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33843 "c" (value),
33844 "b" (bx),
33845 "D" ((long)reg),
33846- "S" (&pci_indirect));
33847+ "S" (&pci_indirect),
33848+ "r" (__PCIBIOS_DS));
33849 break;
33850 }
33851
33852@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33853
33854 DBG("PCI: Fetching IRQ routing table... ");
33855 __asm__("push %%es\n\t"
33856+ "movw %w8, %%ds\n\t"
33857 "push %%ds\n\t"
33858 "pop %%es\n\t"
33859- "lcall *(%%esi); cld\n\t"
33860+ "lcall *%%ss:(%%esi); cld\n\t"
33861 "pop %%es\n\t"
33862+ "push %%ss\n\t"
33863+ "pop %%ds\n"
33864 "jc 1f\n\t"
33865 "xor %%ah, %%ah\n"
33866 "1:"
33867@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33868 "1" (0),
33869 "D" ((long) &opt),
33870 "S" (&pci_indirect),
33871- "m" (opt)
33872+ "m" (opt),
33873+ "r" (__PCIBIOS_DS)
33874 : "memory");
33875 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33876 if (ret & 0xff00)
33877@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33878 {
33879 int ret;
33880
33881- __asm__("lcall *(%%esi); cld\n\t"
33882+ __asm__("movw %w5, %%ds\n\t"
33883+ "lcall *%%ss:(%%esi); cld\n\t"
33884+ "push %%ss\n\t"
33885+ "pop %%ds\n"
33886 "jc 1f\n\t"
33887 "xor %%ah, %%ah\n"
33888 "1:"
33889@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33890 : "0" (PCIBIOS_SET_PCI_HW_INT),
33891 "b" ((dev->bus->number << 8) | dev->devfn),
33892 "c" ((irq << 8) | (pin + 10)),
33893- "S" (&pci_indirect));
33894+ "S" (&pci_indirect),
33895+ "r" (__PCIBIOS_DS));
33896 return !(ret & 0xff00);
33897 }
33898 EXPORT_SYMBOL(pcibios_set_irq_routing);
33899diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33900index 40e4469..d915bf9 100644
33901--- a/arch/x86/platform/efi/efi_32.c
33902+++ b/arch/x86/platform/efi/efi_32.c
33903@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33904 {
33905 struct desc_ptr gdt_descr;
33906
33907+#ifdef CONFIG_PAX_KERNEXEC
33908+ struct desc_struct d;
33909+#endif
33910+
33911 local_irq_save(efi_rt_eflags);
33912
33913 load_cr3(initial_page_table);
33914 __flush_tlb_all();
33915
33916+#ifdef CONFIG_PAX_KERNEXEC
33917+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33918+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33919+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33920+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33921+#endif
33922+
33923 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33924 gdt_descr.size = GDT_SIZE - 1;
33925 load_gdt(&gdt_descr);
33926@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33927 {
33928 struct desc_ptr gdt_descr;
33929
33930+#ifdef CONFIG_PAX_KERNEXEC
33931+ struct desc_struct d;
33932+
33933+ memset(&d, 0, sizeof d);
33934+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33935+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33936+#endif
33937+
33938 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33939 gdt_descr.size = GDT_SIZE - 1;
33940 load_gdt(&gdt_descr);
33941
33942+#ifdef CONFIG_PAX_PER_CPU_PGD
33943+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33944+#else
33945 load_cr3(swapper_pg_dir);
33946+#endif
33947+
33948 __flush_tlb_all();
33949
33950 local_irq_restore(efi_rt_eflags);
33951diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33952index 39a0e7f1..872396e 100644
33953--- a/arch/x86/platform/efi/efi_64.c
33954+++ b/arch/x86/platform/efi/efi_64.c
33955@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33956 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33957 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33958 }
33959+
33960+#ifdef CONFIG_PAX_PER_CPU_PGD
33961+ load_cr3(swapper_pg_dir);
33962+#endif
33963+
33964 __flush_tlb_all();
33965 }
33966
33967@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33968 for (pgd = 0; pgd < n_pgds; pgd++)
33969 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33970 kfree(save_pgd);
33971+
33972+#ifdef CONFIG_PAX_PER_CPU_PGD
33973+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33974+#endif
33975+
33976 __flush_tlb_all();
33977 local_irq_restore(efi_flags);
33978 early_code_mapping_set_exec(0);
33979diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
33980index fbe66e6..eae5e38 100644
33981--- a/arch/x86/platform/efi/efi_stub_32.S
33982+++ b/arch/x86/platform/efi/efi_stub_32.S
33983@@ -6,7 +6,9 @@
33984 */
33985
33986 #include <linux/linkage.h>
33987+#include <linux/init.h>
33988 #include <asm/page_types.h>
33989+#include <asm/segment.h>
33990
33991 /*
33992 * efi_call_phys(void *, ...) is a function with variable parameters.
33993@@ -20,7 +22,7 @@
33994 * service functions will comply with gcc calling convention, too.
33995 */
33996
33997-.text
33998+__INIT
33999 ENTRY(efi_call_phys)
34000 /*
34001 * 0. The function can only be called in Linux kernel. So CS has been
34002@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34003 * The mapping of lower virtual memory has been created in prelog and
34004 * epilog.
34005 */
34006- movl $1f, %edx
34007- subl $__PAGE_OFFSET, %edx
34008- jmp *%edx
34009+#ifdef CONFIG_PAX_KERNEXEC
34010+ movl $(__KERNEXEC_EFI_DS), %edx
34011+ mov %edx, %ds
34012+ mov %edx, %es
34013+ mov %edx, %ss
34014+ addl $2f,(1f)
34015+ ljmp *(1f)
34016+
34017+__INITDATA
34018+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34019+.previous
34020+
34021+2:
34022+ subl $2b,(1b)
34023+#else
34024+ jmp 1f-__PAGE_OFFSET
34025 1:
34026+#endif
34027
34028 /*
34029 * 2. Now on the top of stack is the return
34030@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34031 * parameter 2, ..., param n. To make things easy, we save the return
34032 * address of efi_call_phys in a global variable.
34033 */
34034- popl %edx
34035- movl %edx, saved_return_addr
34036- /* get the function pointer into ECX*/
34037- popl %ecx
34038- movl %ecx, efi_rt_function_ptr
34039- movl $2f, %edx
34040- subl $__PAGE_OFFSET, %edx
34041- pushl %edx
34042+ popl (saved_return_addr)
34043+ popl (efi_rt_function_ptr)
34044
34045 /*
34046 * 3. Clear PG bit in %CR0.
34047@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34048 /*
34049 * 5. Call the physical function.
34050 */
34051- jmp *%ecx
34052+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34053
34054-2:
34055 /*
34056 * 6. After EFI runtime service returns, control will return to
34057 * following instruction. We'd better readjust stack pointer first.
34058@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34059 movl %cr0, %edx
34060 orl $0x80000000, %edx
34061 movl %edx, %cr0
34062- jmp 1f
34063-1:
34064+
34065 /*
34066 * 8. Now restore the virtual mode from flat mode by
34067 * adding EIP with PAGE_OFFSET.
34068 */
34069- movl $1f, %edx
34070- jmp *%edx
34071+#ifdef CONFIG_PAX_KERNEXEC
34072+ movl $(__KERNEL_DS), %edx
34073+ mov %edx, %ds
34074+ mov %edx, %es
34075+ mov %edx, %ss
34076+ ljmp $(__KERNEL_CS),$1f
34077+#else
34078+ jmp 1f+__PAGE_OFFSET
34079+#endif
34080 1:
34081
34082 /*
34083 * 9. Balance the stack. And because EAX contain the return value,
34084 * we'd better not clobber it.
34085 */
34086- leal efi_rt_function_ptr, %edx
34087- movl (%edx), %ecx
34088- pushl %ecx
34089+ pushl (efi_rt_function_ptr)
34090
34091 /*
34092- * 10. Push the saved return address onto the stack and return.
34093+ * 10. Return to the saved return address.
34094 */
34095- leal saved_return_addr, %edx
34096- movl (%edx), %ecx
34097- pushl %ecx
34098- ret
34099+ jmpl *(saved_return_addr)
34100 ENDPROC(efi_call_phys)
34101 .previous
34102
34103-.data
34104+__INITDATA
34105 saved_return_addr:
34106 .long 0
34107 efi_rt_function_ptr:
34108diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34109index 4c07cca..2c8427d 100644
34110--- a/arch/x86/platform/efi/efi_stub_64.S
34111+++ b/arch/x86/platform/efi/efi_stub_64.S
34112@@ -7,6 +7,7 @@
34113 */
34114
34115 #include <linux/linkage.h>
34116+#include <asm/alternative-asm.h>
34117
34118 #define SAVE_XMM \
34119 mov %rsp, %rax; \
34120@@ -40,6 +41,7 @@ ENTRY(efi_call0)
34121 call *%rdi
34122 addq $32, %rsp
34123 RESTORE_XMM
34124+ pax_force_retaddr 0, 1
34125 ret
34126 ENDPROC(efi_call0)
34127
34128@@ -50,6 +52,7 @@ ENTRY(efi_call1)
34129 call *%rdi
34130 addq $32, %rsp
34131 RESTORE_XMM
34132+ pax_force_retaddr 0, 1
34133 ret
34134 ENDPROC(efi_call1)
34135
34136@@ -60,6 +63,7 @@ ENTRY(efi_call2)
34137 call *%rdi
34138 addq $32, %rsp
34139 RESTORE_XMM
34140+ pax_force_retaddr 0, 1
34141 ret
34142 ENDPROC(efi_call2)
34143
34144@@ -71,6 +75,7 @@ ENTRY(efi_call3)
34145 call *%rdi
34146 addq $32, %rsp
34147 RESTORE_XMM
34148+ pax_force_retaddr 0, 1
34149 ret
34150 ENDPROC(efi_call3)
34151
34152@@ -83,6 +88,7 @@ ENTRY(efi_call4)
34153 call *%rdi
34154 addq $32, %rsp
34155 RESTORE_XMM
34156+ pax_force_retaddr 0, 1
34157 ret
34158 ENDPROC(efi_call4)
34159
34160@@ -96,6 +102,7 @@ ENTRY(efi_call5)
34161 call *%rdi
34162 addq $48, %rsp
34163 RESTORE_XMM
34164+ pax_force_retaddr 0, 1
34165 ret
34166 ENDPROC(efi_call5)
34167
34168@@ -112,5 +119,6 @@ ENTRY(efi_call6)
34169 call *%rdi
34170 addq $48, %rsp
34171 RESTORE_XMM
34172+ pax_force_retaddr 0, 1
34173 ret
34174 ENDPROC(efi_call6)
34175diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34176index f90e290..435f0dd 100644
34177--- a/arch/x86/platform/intel-mid/intel-mid.c
34178+++ b/arch/x86/platform/intel-mid/intel-mid.c
34179@@ -65,9 +65,10 @@ static void intel_mid_power_off(void)
34180 {
34181 }
34182
34183-static void intel_mid_reboot(void)
34184+static void __noreturn intel_mid_reboot(void)
34185 {
34186 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34187+ BUG();
34188 }
34189
34190 static unsigned long __init intel_mid_calibrate_tsc(void)
34191diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
34192index d6ee929..3637cb5 100644
34193--- a/arch/x86/platform/olpc/olpc_dt.c
34194+++ b/arch/x86/platform/olpc/olpc_dt.c
34195@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
34196 return res;
34197 }
34198
34199-static struct of_pdt_ops prom_olpc_ops __initdata = {
34200+static struct of_pdt_ops prom_olpc_ops __initconst = {
34201 .nextprop = olpc_dt_nextprop,
34202 .getproplen = olpc_dt_getproplen,
34203 .getproperty = olpc_dt_getproperty,
34204diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
34205index 424f4c9..f2a2988 100644
34206--- a/arch/x86/power/cpu.c
34207+++ b/arch/x86/power/cpu.c
34208@@ -137,11 +137,8 @@ static void do_fpu_end(void)
34209 static void fix_processor_context(void)
34210 {
34211 int cpu = smp_processor_id();
34212- struct tss_struct *t = &per_cpu(init_tss, cpu);
34213-#ifdef CONFIG_X86_64
34214- struct desc_struct *desc = get_cpu_gdt_table(cpu);
34215- tss_desc tss;
34216-#endif
34217+ struct tss_struct *t = init_tss + cpu;
34218+
34219 set_tss_desc(cpu, t); /*
34220 * This just modifies memory; should not be
34221 * necessary. But... This is necessary, because
34222@@ -150,10 +147,6 @@ static void fix_processor_context(void)
34223 */
34224
34225 #ifdef CONFIG_X86_64
34226- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
34227- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
34228- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
34229-
34230 syscall_init(); /* This sets MSR_*STAR and related */
34231 #endif
34232 load_TR_desc(); /* This does ltr */
34233diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
34234index a44f457..9140171 100644
34235--- a/arch/x86/realmode/init.c
34236+++ b/arch/x86/realmode/init.c
34237@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
34238 __va(real_mode_header->trampoline_header);
34239
34240 #ifdef CONFIG_X86_32
34241- trampoline_header->start = __pa_symbol(startup_32_smp);
34242+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
34243+
34244+#ifdef CONFIG_PAX_KERNEXEC
34245+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
34246+#endif
34247+
34248+ trampoline_header->boot_cs = __BOOT_CS;
34249 trampoline_header->gdt_limit = __BOOT_DS + 7;
34250 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
34251 #else
34252@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
34253 *trampoline_cr4_features = read_cr4();
34254
34255 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
34256- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
34257+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
34258 trampoline_pgd[511] = init_level4_pgt[511].pgd;
34259 #endif
34260 }
34261diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
34262index 9cac825..4890b25 100644
34263--- a/arch/x86/realmode/rm/Makefile
34264+++ b/arch/x86/realmode/rm/Makefile
34265@@ -79,5 +79,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
34266 $(call cc-option, -fno-unit-at-a-time)) \
34267 $(call cc-option, -fno-stack-protector) \
34268 $(call cc-option, -mpreferred-stack-boundary=2)
34269+ifdef CONSTIFY_PLUGIN
34270+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
34271+endif
34272 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
34273 GCOV_PROFILE := n
34274diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
34275index a28221d..93c40f1 100644
34276--- a/arch/x86/realmode/rm/header.S
34277+++ b/arch/x86/realmode/rm/header.S
34278@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
34279 #endif
34280 /* APM/BIOS reboot */
34281 .long pa_machine_real_restart_asm
34282-#ifdef CONFIG_X86_64
34283+#ifdef CONFIG_X86_32
34284+ .long __KERNEL_CS
34285+#else
34286 .long __KERNEL32_CS
34287 #endif
34288 END(real_mode_header)
34289diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
34290index c1b2791..f9e31c7 100644
34291--- a/arch/x86/realmode/rm/trampoline_32.S
34292+++ b/arch/x86/realmode/rm/trampoline_32.S
34293@@ -25,6 +25,12 @@
34294 #include <asm/page_types.h>
34295 #include "realmode.h"
34296
34297+#ifdef CONFIG_PAX_KERNEXEC
34298+#define ta(X) (X)
34299+#else
34300+#define ta(X) (pa_ ## X)
34301+#endif
34302+
34303 .text
34304 .code16
34305
34306@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
34307
34308 cli # We should be safe anyway
34309
34310- movl tr_start, %eax # where we need to go
34311-
34312 movl $0xA5A5A5A5, trampoline_status
34313 # write marker for master knows we're running
34314
34315@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
34316 movw $1, %dx # protected mode (PE) bit
34317 lmsw %dx # into protected mode
34318
34319- ljmpl $__BOOT_CS, $pa_startup_32
34320+ ljmpl *(trampoline_header)
34321
34322 .section ".text32","ax"
34323 .code32
34324@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
34325 .balign 8
34326 GLOBAL(trampoline_header)
34327 tr_start: .space 4
34328- tr_gdt_pad: .space 2
34329+ tr_boot_cs: .space 2
34330 tr_gdt: .space 6
34331 END(trampoline_header)
34332
34333diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
34334index bb360dc..d0fd8f8 100644
34335--- a/arch/x86/realmode/rm/trampoline_64.S
34336+++ b/arch/x86/realmode/rm/trampoline_64.S
34337@@ -94,6 +94,7 @@ ENTRY(startup_32)
34338 movl %edx, %gs
34339
34340 movl pa_tr_cr4, %eax
34341+ andl $~X86_CR4_PCIDE, %eax
34342 movl %eax, %cr4 # Enable PAE mode
34343
34344 # Setup trampoline 4 level pagetables
34345@@ -107,7 +108,7 @@ ENTRY(startup_32)
34346 wrmsr
34347
34348 # Enable paging and in turn activate Long Mode
34349- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
34350+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
34351 movl %eax, %cr0
34352
34353 /*
34354diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
34355index e812034..c747134 100644
34356--- a/arch/x86/tools/Makefile
34357+++ b/arch/x86/tools/Makefile
34358@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
34359
34360 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
34361
34362-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
34363+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
34364 hostprogs-y += relocs
34365 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
34366 relocs: $(obj)/relocs
34367diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
34368index f7bab68..b6d9886 100644
34369--- a/arch/x86/tools/relocs.c
34370+++ b/arch/x86/tools/relocs.c
34371@@ -1,5 +1,7 @@
34372 /* This is included from relocs_32/64.c */
34373
34374+#include "../../../include/generated/autoconf.h"
34375+
34376 #define ElfW(type) _ElfW(ELF_BITS, type)
34377 #define _ElfW(bits, type) __ElfW(bits, type)
34378 #define __ElfW(bits, type) Elf##bits##_##type
34379@@ -11,6 +13,7 @@
34380 #define Elf_Sym ElfW(Sym)
34381
34382 static Elf_Ehdr ehdr;
34383+static Elf_Phdr *phdr;
34384
34385 struct relocs {
34386 uint32_t *offset;
34387@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
34388 }
34389 }
34390
34391+static void read_phdrs(FILE *fp)
34392+{
34393+ unsigned int i;
34394+
34395+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
34396+ if (!phdr) {
34397+ die("Unable to allocate %d program headers\n",
34398+ ehdr.e_phnum);
34399+ }
34400+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
34401+ die("Seek to %d failed: %s\n",
34402+ ehdr.e_phoff, strerror(errno));
34403+ }
34404+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
34405+ die("Cannot read ELF program headers: %s\n",
34406+ strerror(errno));
34407+ }
34408+ for(i = 0; i < ehdr.e_phnum; i++) {
34409+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
34410+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
34411+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
34412+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
34413+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
34414+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
34415+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
34416+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
34417+ }
34418+
34419+}
34420+
34421 static void read_shdrs(FILE *fp)
34422 {
34423- int i;
34424+ unsigned int i;
34425 Elf_Shdr shdr;
34426
34427 secs = calloc(ehdr.e_shnum, sizeof(struct section));
34428@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
34429
34430 static void read_strtabs(FILE *fp)
34431 {
34432- int i;
34433+ unsigned int i;
34434 for (i = 0; i < ehdr.e_shnum; i++) {
34435 struct section *sec = &secs[i];
34436 if (sec->shdr.sh_type != SHT_STRTAB) {
34437@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
34438
34439 static void read_symtabs(FILE *fp)
34440 {
34441- int i,j;
34442+ unsigned int i,j;
34443 for (i = 0; i < ehdr.e_shnum; i++) {
34444 struct section *sec = &secs[i];
34445 if (sec->shdr.sh_type != SHT_SYMTAB) {
34446@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
34447 }
34448
34449
34450-static void read_relocs(FILE *fp)
34451+static void read_relocs(FILE *fp, int use_real_mode)
34452 {
34453- int i,j;
34454+ unsigned int i,j;
34455+ uint32_t base;
34456+
34457 for (i = 0; i < ehdr.e_shnum; i++) {
34458 struct section *sec = &secs[i];
34459 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34460@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
34461 die("Cannot read symbol table: %s\n",
34462 strerror(errno));
34463 }
34464+ base = 0;
34465+
34466+#ifdef CONFIG_X86_32
34467+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
34468+ if (phdr[j].p_type != PT_LOAD )
34469+ continue;
34470+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
34471+ continue;
34472+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
34473+ break;
34474+ }
34475+#endif
34476+
34477 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
34478 Elf_Rel *rel = &sec->reltab[j];
34479- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
34480+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
34481 rel->r_info = elf_xword_to_cpu(rel->r_info);
34482 #if (SHT_REL_TYPE == SHT_RELA)
34483 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
34484@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
34485
34486 static void print_absolute_symbols(void)
34487 {
34488- int i;
34489+ unsigned int i;
34490 const char *format;
34491
34492 if (ELF_BITS == 64)
34493@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
34494 for (i = 0; i < ehdr.e_shnum; i++) {
34495 struct section *sec = &secs[i];
34496 char *sym_strtab;
34497- int j;
34498+ unsigned int j;
34499
34500 if (sec->shdr.sh_type != SHT_SYMTAB) {
34501 continue;
34502@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
34503
34504 static void print_absolute_relocs(void)
34505 {
34506- int i, printed = 0;
34507+ unsigned int i, printed = 0;
34508 const char *format;
34509
34510 if (ELF_BITS == 64)
34511@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
34512 struct section *sec_applies, *sec_symtab;
34513 char *sym_strtab;
34514 Elf_Sym *sh_symtab;
34515- int j;
34516+ unsigned int j;
34517 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34518 continue;
34519 }
34520@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
34521 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
34522 Elf_Sym *sym, const char *symname))
34523 {
34524- int i;
34525+ unsigned int i;
34526 /* Walk through the relocations */
34527 for (i = 0; i < ehdr.e_shnum; i++) {
34528 char *sym_strtab;
34529 Elf_Sym *sh_symtab;
34530 struct section *sec_applies, *sec_symtab;
34531- int j;
34532+ unsigned int j;
34533 struct section *sec = &secs[i];
34534
34535 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34536@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34537 {
34538 unsigned r_type = ELF32_R_TYPE(rel->r_info);
34539 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
34540+ char *sym_strtab = sec->link->link->strtab;
34541+
34542+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
34543+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
34544+ return 0;
34545+
34546+#ifdef CONFIG_PAX_KERNEXEC
34547+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
34548+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
34549+ return 0;
34550+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
34551+ return 0;
34552+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
34553+ return 0;
34554+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
34555+ return 0;
34556+#endif
34557
34558 switch (r_type) {
34559 case R_386_NONE:
34560@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
34561
34562 static void emit_relocs(int as_text, int use_real_mode)
34563 {
34564- int i;
34565+ unsigned int i;
34566 int (*write_reloc)(uint32_t, FILE *) = write32;
34567 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34568 const char *symname);
34569@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
34570 {
34571 regex_init(use_real_mode);
34572 read_ehdr(fp);
34573+ read_phdrs(fp);
34574 read_shdrs(fp);
34575 read_strtabs(fp);
34576 read_symtabs(fp);
34577- read_relocs(fp);
34578+ read_relocs(fp, use_real_mode);
34579 if (ELF_BITS == 64)
34580 percpu_init();
34581 if (show_absolute_syms) {
34582diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
34583index 80ffa5b..a33bd15 100644
34584--- a/arch/x86/um/tls_32.c
34585+++ b/arch/x86/um/tls_32.c
34586@@ -260,7 +260,7 @@ out:
34587 if (unlikely(task == current &&
34588 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
34589 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
34590- "without flushed TLS.", current->pid);
34591+ "without flushed TLS.", task_pid_nr(current));
34592 }
34593
34594 return 0;
34595diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
34596index fd14be1..e3c79c0 100644
34597--- a/arch/x86/vdso/Makefile
34598+++ b/arch/x86/vdso/Makefile
34599@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
34600 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
34601 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
34602
34603-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34604+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34605 GCOV_PROFILE := n
34606
34607 #
34608diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
34609index d6bfb87..876ee18 100644
34610--- a/arch/x86/vdso/vdso32-setup.c
34611+++ b/arch/x86/vdso/vdso32-setup.c
34612@@ -25,6 +25,7 @@
34613 #include <asm/tlbflush.h>
34614 #include <asm/vdso.h>
34615 #include <asm/proto.h>
34616+#include <asm/mman.h>
34617
34618 enum {
34619 VDSO_DISABLED = 0,
34620@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
34621 void enable_sep_cpu(void)
34622 {
34623 int cpu = get_cpu();
34624- struct tss_struct *tss = &per_cpu(init_tss, cpu);
34625+ struct tss_struct *tss = init_tss + cpu;
34626
34627 if (!boot_cpu_has(X86_FEATURE_SEP)) {
34628 put_cpu();
34629@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
34630 gate_vma.vm_start = FIXADDR_USER_START;
34631 gate_vma.vm_end = FIXADDR_USER_END;
34632 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
34633- gate_vma.vm_page_prot = __P101;
34634+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
34635
34636 return 0;
34637 }
34638@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34639 if (compat)
34640 addr = VDSO_HIGH_BASE;
34641 else {
34642- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
34643+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
34644 if (IS_ERR_VALUE(addr)) {
34645 ret = addr;
34646 goto up_fail;
34647 }
34648 }
34649
34650- current->mm->context.vdso = (void *)addr;
34651+ current->mm->context.vdso = addr;
34652
34653 if (compat_uses_vma || !compat) {
34654 /*
34655@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34656 }
34657
34658 current_thread_info()->sysenter_return =
34659- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34660+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34661
34662 up_fail:
34663 if (ret)
34664- current->mm->context.vdso = NULL;
34665+ current->mm->context.vdso = 0;
34666
34667 up_write(&mm->mmap_sem);
34668
34669@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
34670
34671 const char *arch_vma_name(struct vm_area_struct *vma)
34672 {
34673- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
34674+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
34675 return "[vdso]";
34676+
34677+#ifdef CONFIG_PAX_SEGMEXEC
34678+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
34679+ return "[vdso]";
34680+#endif
34681+
34682 return NULL;
34683 }
34684
34685@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
34686 * Check to see if the corresponding task was created in compat vdso
34687 * mode.
34688 */
34689- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
34690+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
34691 return &gate_vma;
34692 return NULL;
34693 }
34694diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
34695index 431e875..cbb23f3 100644
34696--- a/arch/x86/vdso/vma.c
34697+++ b/arch/x86/vdso/vma.c
34698@@ -16,8 +16,6 @@
34699 #include <asm/vdso.h>
34700 #include <asm/page.h>
34701
34702-unsigned int __read_mostly vdso_enabled = 1;
34703-
34704 extern char vdso_start[], vdso_end[];
34705 extern unsigned short vdso_sync_cpuid;
34706
34707@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
34708 * unaligned here as a result of stack start randomization.
34709 */
34710 addr = PAGE_ALIGN(addr);
34711- addr = align_vdso_addr(addr);
34712
34713 return addr;
34714 }
34715@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
34716 unsigned size)
34717 {
34718 struct mm_struct *mm = current->mm;
34719- unsigned long addr;
34720+ unsigned long addr = 0;
34721 int ret;
34722
34723- if (!vdso_enabled)
34724- return 0;
34725-
34726 down_write(&mm->mmap_sem);
34727+
34728+#ifdef CONFIG_PAX_RANDMMAP
34729+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
34730+#endif
34731+
34732 addr = vdso_addr(mm->start_stack, size);
34733+ addr = align_vdso_addr(addr);
34734 addr = get_unmapped_area(NULL, addr, size, 0, 0);
34735 if (IS_ERR_VALUE(addr)) {
34736 ret = addr;
34737 goto up_fail;
34738 }
34739
34740- current->mm->context.vdso = (void *)addr;
34741+ mm->context.vdso = addr;
34742
34743 ret = install_special_mapping(mm, addr, size,
34744 VM_READ|VM_EXEC|
34745 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
34746 pages);
34747- if (ret) {
34748- current->mm->context.vdso = NULL;
34749- goto up_fail;
34750- }
34751+ if (ret)
34752+ mm->context.vdso = 0;
34753
34754 up_fail:
34755 up_write(&mm->mmap_sem);
34756@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34757 vdsox32_size);
34758 }
34759 #endif
34760-
34761-static __init int vdso_setup(char *s)
34762-{
34763- vdso_enabled = simple_strtoul(s, NULL, 0);
34764- return 0;
34765-}
34766-__setup("vdso=", vdso_setup);
34767diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
34768index fa6ade7..73da73a5 100644
34769--- a/arch/x86/xen/enlighten.c
34770+++ b/arch/x86/xen/enlighten.c
34771@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
34772
34773 struct shared_info xen_dummy_shared_info;
34774
34775-void *xen_initial_gdt;
34776-
34777 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34778 __read_mostly int xen_have_vector_callback;
34779 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34780@@ -541,8 +539,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34781 {
34782 unsigned long va = dtr->address;
34783 unsigned int size = dtr->size + 1;
34784- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34785- unsigned long frames[pages];
34786+ unsigned long frames[65536 / PAGE_SIZE];
34787 int f;
34788
34789 /*
34790@@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34791 {
34792 unsigned long va = dtr->address;
34793 unsigned int size = dtr->size + 1;
34794- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34795- unsigned long frames[pages];
34796+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34797 int f;
34798
34799 /*
34800@@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34801 * 8-byte entries, or 16 4k pages..
34802 */
34803
34804- BUG_ON(size > 65536);
34805+ BUG_ON(size > GDT_SIZE);
34806 BUG_ON(va & ~PAGE_MASK);
34807
34808 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34809@@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34810 return 0;
34811 }
34812
34813-static void set_xen_basic_apic_ops(void)
34814+static void __init set_xen_basic_apic_ops(void)
34815 {
34816 apic->read = xen_apic_read;
34817 apic->write = xen_apic_write;
34818@@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34819 #endif
34820 };
34821
34822-static void xen_reboot(int reason)
34823+static __noreturn void xen_reboot(int reason)
34824 {
34825 struct sched_shutdown r = { .reason = reason };
34826
34827- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34828- BUG();
34829+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34830+ BUG();
34831 }
34832
34833-static void xen_restart(char *msg)
34834+static __noreturn void xen_restart(char *msg)
34835 {
34836 xen_reboot(SHUTDOWN_reboot);
34837 }
34838
34839-static void xen_emergency_restart(void)
34840+static __noreturn void xen_emergency_restart(void)
34841 {
34842 xen_reboot(SHUTDOWN_reboot);
34843 }
34844
34845-static void xen_machine_halt(void)
34846+static __noreturn void xen_machine_halt(void)
34847 {
34848 xen_reboot(SHUTDOWN_poweroff);
34849 }
34850
34851-static void xen_machine_power_off(void)
34852+static __noreturn void xen_machine_power_off(void)
34853 {
34854 if (pm_power_off)
34855 pm_power_off();
34856@@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(void)
34857 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34858
34859 /* Work out if we support NX */
34860- x86_configure_nx();
34861+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34862+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34863+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34864+ unsigned l, h;
34865+
34866+ __supported_pte_mask |= _PAGE_NX;
34867+ rdmsr(MSR_EFER, l, h);
34868+ l |= EFER_NX;
34869+ wrmsr(MSR_EFER, l, h);
34870+ }
34871+#endif
34872
34873 xen_setup_features();
34874
34875@@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(void)
34876
34877 machine_ops = xen_machine_ops;
34878
34879- /*
34880- * The only reliable way to retain the initial address of the
34881- * percpu gdt_page is to remember it here, so we can go and
34882- * mark it RW later, when the initial percpu area is freed.
34883- */
34884- xen_initial_gdt = &per_cpu(gdt_page, 0);
34885-
34886 xen_smp_init();
34887
34888 #ifdef CONFIG_ACPI_NUMA
34889diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34890index ce563be..7327d91 100644
34891--- a/arch/x86/xen/mmu.c
34892+++ b/arch/x86/xen/mmu.c
34893@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
34894 return val;
34895 }
34896
34897-static pteval_t pte_pfn_to_mfn(pteval_t val)
34898+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
34899 {
34900 if (val & _PAGE_PRESENT) {
34901 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
34902@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34903 /* L3_k[510] -> level2_kernel_pgt
34904 * L3_i[511] -> level2_fixmap_pgt */
34905 convert_pfn_mfn(level3_kernel_pgt);
34906+ convert_pfn_mfn(level3_vmalloc_start_pgt);
34907+ convert_pfn_mfn(level3_vmalloc_end_pgt);
34908+ convert_pfn_mfn(level3_vmemmap_pgt);
34909
34910 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34911 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34912@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34913 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34914 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34915 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34916+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34917+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34918+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34919 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34920 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34921+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34922 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34923 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34924
34925@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34926 pv_mmu_ops.set_pud = xen_set_pud;
34927 #if PAGETABLE_LEVELS == 4
34928 pv_mmu_ops.set_pgd = xen_set_pgd;
34929+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34930 #endif
34931
34932 /* This will work as long as patching hasn't happened yet
34933@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34934 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34935 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34936 .set_pgd = xen_set_pgd_hyper,
34937+ .set_pgd_batched = xen_set_pgd_hyper,
34938
34939 .alloc_pud = xen_alloc_pmd_init,
34940 .release_pud = xen_release_pmd_init,
34941diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
34942index c36b325..b0f1518 100644
34943--- a/arch/x86/xen/smp.c
34944+++ b/arch/x86/xen/smp.c
34945@@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
34946 native_smp_prepare_boot_cpu();
34947
34948 if (xen_pv_domain()) {
34949- /* We've switched to the "real" per-cpu gdt, so make sure the
34950- old memory can be recycled */
34951- make_lowmem_page_readwrite(xen_initial_gdt);
34952-
34953 #ifdef CONFIG_X86_32
34954 /*
34955 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
34956 * expects __USER_DS
34957 */
34958- loadsegment(ds, __USER_DS);
34959- loadsegment(es, __USER_DS);
34960+ loadsegment(ds, __KERNEL_DS);
34961+ loadsegment(es, __KERNEL_DS);
34962 #endif
34963
34964 xen_filter_cpu_maps();
34965@@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34966 ctxt->user_regs.ss = __KERNEL_DS;
34967 #ifdef CONFIG_X86_32
34968 ctxt->user_regs.fs = __KERNEL_PERCPU;
34969- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34970+ savesegment(gs, ctxt->user_regs.gs);
34971 #else
34972 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34973 #endif
34974@@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34975
34976 {
34977 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34978- ctxt->user_regs.ds = __USER_DS;
34979- ctxt->user_regs.es = __USER_DS;
34980+ ctxt->user_regs.ds = __KERNEL_DS;
34981+ ctxt->user_regs.es = __KERNEL_DS;
34982
34983 xen_copy_trap_info(ctxt->trap_ctxt);
34984
34985@@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
34986 int rc;
34987
34988 per_cpu(current_task, cpu) = idle;
34989+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
34990 #ifdef CONFIG_X86_32
34991 irq_ctx_init(cpu);
34992 #else
34993 clear_tsk_thread_flag(idle, TIF_FORK);
34994- per_cpu(kernel_stack, cpu) =
34995- (unsigned long)task_stack_page(idle) -
34996- KERNEL_STACK_OFFSET + THREAD_SIZE;
34997+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
34998 #endif
34999 xen_setup_runstate_info(cpu);
35000 xen_setup_timer(cpu);
35001@@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35002
35003 void __init xen_smp_init(void)
35004 {
35005- smp_ops = xen_smp_ops;
35006+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35007 xen_fill_possible_map();
35008 }
35009
35010diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35011index 33ca6e4..0ded929 100644
35012--- a/arch/x86/xen/xen-asm_32.S
35013+++ b/arch/x86/xen/xen-asm_32.S
35014@@ -84,14 +84,14 @@ ENTRY(xen_iret)
35015 ESP_OFFSET=4 # bytes pushed onto stack
35016
35017 /*
35018- * Store vcpu_info pointer for easy access. Do it this way to
35019- * avoid having to reload %fs
35020+ * Store vcpu_info pointer for easy access.
35021 */
35022 #ifdef CONFIG_SMP
35023- GET_THREAD_INFO(%eax)
35024- movl %ss:TI_cpu(%eax), %eax
35025- movl %ss:__per_cpu_offset(,%eax,4), %eax
35026- mov %ss:xen_vcpu(%eax), %eax
35027+ push %fs
35028+ mov $(__KERNEL_PERCPU), %eax
35029+ mov %eax, %fs
35030+ mov PER_CPU_VAR(xen_vcpu), %eax
35031+ pop %fs
35032 #else
35033 movl %ss:xen_vcpu, %eax
35034 #endif
35035diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35036index 7faed58..ba4427c 100644
35037--- a/arch/x86/xen/xen-head.S
35038+++ b/arch/x86/xen/xen-head.S
35039@@ -19,6 +19,17 @@ ENTRY(startup_xen)
35040 #ifdef CONFIG_X86_32
35041 mov %esi,xen_start_info
35042 mov $init_thread_union+THREAD_SIZE,%esp
35043+#ifdef CONFIG_SMP
35044+ movl $cpu_gdt_table,%edi
35045+ movl $__per_cpu_load,%eax
35046+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35047+ rorl $16,%eax
35048+ movb %al,__KERNEL_PERCPU + 4(%edi)
35049+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35050+ movl $__per_cpu_end - 1,%eax
35051+ subl $__per_cpu_start,%eax
35052+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35053+#endif
35054 #else
35055 mov %rsi,xen_start_info
35056 mov $init_thread_union+THREAD_SIZE,%rsp
35057diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35058index 95f8c61..611d6e8 100644
35059--- a/arch/x86/xen/xen-ops.h
35060+++ b/arch/x86/xen/xen-ops.h
35061@@ -10,8 +10,6 @@
35062 extern const char xen_hypervisor_callback[];
35063 extern const char xen_failsafe_callback[];
35064
35065-extern void *xen_initial_gdt;
35066-
35067 struct trap_info;
35068 void xen_copy_trap_info(struct trap_info *traps);
35069
35070diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35071index 525bd3d..ef888b1 100644
35072--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35073+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35074@@ -119,9 +119,9 @@
35075 ----------------------------------------------------------------------*/
35076
35077 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35078-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35079 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35080 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35081+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35082
35083 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35084 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35085diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35086index 2f33760..835e50a 100644
35087--- a/arch/xtensa/variants/fsf/include/variant/core.h
35088+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35089@@ -11,6 +11,7 @@
35090 #ifndef _XTENSA_CORE_H
35091 #define _XTENSA_CORE_H
35092
35093+#include <linux/const.h>
35094
35095 /****************************************************************************
35096 Parameters Useful for Any Code, USER or PRIVILEGED
35097@@ -112,9 +113,9 @@
35098 ----------------------------------------------------------------------*/
35099
35100 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35101-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35102 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35103 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35104+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35105
35106 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35107 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35108diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
35109index af00795..2bb8105 100644
35110--- a/arch/xtensa/variants/s6000/include/variant/core.h
35111+++ b/arch/xtensa/variants/s6000/include/variant/core.h
35112@@ -11,6 +11,7 @@
35113 #ifndef _XTENSA_CORE_CONFIGURATION_H
35114 #define _XTENSA_CORE_CONFIGURATION_H
35115
35116+#include <linux/const.h>
35117
35118 /****************************************************************************
35119 Parameters Useful for Any Code, USER or PRIVILEGED
35120@@ -118,9 +119,9 @@
35121 ----------------------------------------------------------------------*/
35122
35123 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35124-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35125 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35126 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35127+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35128
35129 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
35130 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
35131diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
35132index 4e491d9..c8e18e4 100644
35133--- a/block/blk-cgroup.c
35134+++ b/block/blk-cgroup.c
35135@@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
35136 static struct cgroup_subsys_state *
35137 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35138 {
35139- static atomic64_t id_seq = ATOMIC64_INIT(0);
35140+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
35141 struct blkcg *blkcg;
35142
35143 if (!parent_css) {
35144@@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35145
35146 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
35147 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
35148- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
35149+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
35150 done:
35151 spin_lock_init(&blkcg->lock);
35152 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
35153diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
35154index 1855bf5..af12b06 100644
35155--- a/block/blk-iopoll.c
35156+++ b/block/blk-iopoll.c
35157@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
35158 }
35159 EXPORT_SYMBOL(blk_iopoll_complete);
35160
35161-static void blk_iopoll_softirq(struct softirq_action *h)
35162+static __latent_entropy void blk_iopoll_softirq(void)
35163 {
35164 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
35165 int rearm = 0, budget = blk_iopoll_budget;
35166diff --git a/block/blk-map.c b/block/blk-map.c
35167index 623e1cd..ca1e109 100644
35168--- a/block/blk-map.c
35169+++ b/block/blk-map.c
35170@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
35171 if (!len || !kbuf)
35172 return -EINVAL;
35173
35174- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
35175+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
35176 if (do_copy)
35177 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
35178 else
35179diff --git a/block/blk-softirq.c b/block/blk-softirq.c
35180index 57790c1..5e988dd 100644
35181--- a/block/blk-softirq.c
35182+++ b/block/blk-softirq.c
35183@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
35184 * Softirq action handler - move entries to local list and loop over them
35185 * while passing them to the queue registered handler.
35186 */
35187-static void blk_done_softirq(struct softirq_action *h)
35188+static __latent_entropy void blk_done_softirq(void)
35189 {
35190 struct list_head *cpu_list, local_list;
35191
35192diff --git a/block/bsg.c b/block/bsg.c
35193index 420a5a9..23834aa 100644
35194--- a/block/bsg.c
35195+++ b/block/bsg.c
35196@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
35197 struct sg_io_v4 *hdr, struct bsg_device *bd,
35198 fmode_t has_write_perm)
35199 {
35200+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35201+ unsigned char *cmdptr;
35202+
35203 if (hdr->request_len > BLK_MAX_CDB) {
35204 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
35205 if (!rq->cmd)
35206 return -ENOMEM;
35207- }
35208+ cmdptr = rq->cmd;
35209+ } else
35210+ cmdptr = tmpcmd;
35211
35212- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
35213+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
35214 hdr->request_len))
35215 return -EFAULT;
35216
35217+ if (cmdptr != rq->cmd)
35218+ memcpy(rq->cmd, cmdptr, hdr->request_len);
35219+
35220 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
35221 if (blk_verify_command(rq->cmd, has_write_perm))
35222 return -EPERM;
35223diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
35224index fbd5a67..f24fd95 100644
35225--- a/block/compat_ioctl.c
35226+++ b/block/compat_ioctl.c
35227@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
35228 cgc = compat_alloc_user_space(sizeof(*cgc));
35229 cgc32 = compat_ptr(arg);
35230
35231- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
35232+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
35233 get_user(data, &cgc32->buffer) ||
35234 put_user(compat_ptr(data), &cgc->buffer) ||
35235 copy_in_user(&cgc->buflen, &cgc32->buflen,
35236@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
35237 err |= __get_user(f->spec1, &uf->spec1);
35238 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
35239 err |= __get_user(name, &uf->name);
35240- f->name = compat_ptr(name);
35241+ f->name = (void __force_kernel *)compat_ptr(name);
35242 if (err) {
35243 err = -EFAULT;
35244 goto out;
35245diff --git a/block/genhd.c b/block/genhd.c
35246index 791f419..89f21c4 100644
35247--- a/block/genhd.c
35248+++ b/block/genhd.c
35249@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
35250
35251 /*
35252 * Register device numbers dev..(dev+range-1)
35253- * range must be nonzero
35254+ * Noop if @range is zero.
35255 * The hash chain is sorted on range, so that subranges can override.
35256 */
35257 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
35258 struct kobject *(*probe)(dev_t, int *, void *),
35259 int (*lock)(dev_t, void *), void *data)
35260 {
35261- kobj_map(bdev_map, devt, range, module, probe, lock, data);
35262+ if (range)
35263+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
35264 }
35265
35266 EXPORT_SYMBOL(blk_register_region);
35267
35268+/* undo blk_register_region(), noop if @range is zero */
35269 void blk_unregister_region(dev_t devt, unsigned long range)
35270 {
35271- kobj_unmap(bdev_map, devt, range);
35272+ if (range)
35273+ kobj_unmap(bdev_map, devt, range);
35274 }
35275
35276 EXPORT_SYMBOL(blk_unregister_region);
35277diff --git a/block/partitions/efi.c b/block/partitions/efi.c
35278index dc51f46..d5446a8 100644
35279--- a/block/partitions/efi.c
35280+++ b/block/partitions/efi.c
35281@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
35282 if (!gpt)
35283 return NULL;
35284
35285+ if (!le32_to_cpu(gpt->num_partition_entries))
35286+ return NULL;
35287+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
35288+ if (!pte)
35289+ return NULL;
35290+
35291 count = le32_to_cpu(gpt->num_partition_entries) *
35292 le32_to_cpu(gpt->sizeof_partition_entry);
35293- if (!count)
35294- return NULL;
35295- pte = kmalloc(count, GFP_KERNEL);
35296- if (!pte)
35297- return NULL;
35298-
35299 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
35300 (u8 *) pte, count) < count) {
35301 kfree(pte);
35302diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
35303index 625e3e4..b5339f9 100644
35304--- a/block/scsi_ioctl.c
35305+++ b/block/scsi_ioctl.c
35306@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
35307 return put_user(0, p);
35308 }
35309
35310-static int sg_get_timeout(struct request_queue *q)
35311+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
35312 {
35313 return jiffies_to_clock_t(q->sg_timeout);
35314 }
35315@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
35316 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
35317 struct sg_io_hdr *hdr, fmode_t mode)
35318 {
35319- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
35320+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35321+ unsigned char *cmdptr;
35322+
35323+ if (rq->cmd != rq->__cmd)
35324+ cmdptr = rq->cmd;
35325+ else
35326+ cmdptr = tmpcmd;
35327+
35328+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
35329 return -EFAULT;
35330+
35331+ if (cmdptr != rq->cmd)
35332+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
35333+
35334 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
35335 return -EPERM;
35336
35337@@ -415,6 +427,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35338 int err;
35339 unsigned int in_len, out_len, bytes, opcode, cmdlen;
35340 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
35341+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35342+ unsigned char *cmdptr;
35343
35344 if (!sic)
35345 return -EINVAL;
35346@@ -448,9 +462,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35347 */
35348 err = -EFAULT;
35349 rq->cmd_len = cmdlen;
35350- if (copy_from_user(rq->cmd, sic->data, cmdlen))
35351+
35352+ if (rq->cmd != rq->__cmd)
35353+ cmdptr = rq->cmd;
35354+ else
35355+ cmdptr = tmpcmd;
35356+
35357+ if (copy_from_user(cmdptr, sic->data, cmdlen))
35358 goto error;
35359
35360+ if (rq->cmd != cmdptr)
35361+ memcpy(rq->cmd, cmdptr, cmdlen);
35362+
35363 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
35364 goto error;
35365
35366diff --git a/crypto/cryptd.c b/crypto/cryptd.c
35367index 7bdd61b..afec999 100644
35368--- a/crypto/cryptd.c
35369+++ b/crypto/cryptd.c
35370@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
35371
35372 struct cryptd_blkcipher_request_ctx {
35373 crypto_completion_t complete;
35374-};
35375+} __no_const;
35376
35377 struct cryptd_hash_ctx {
35378 struct crypto_shash *child;
35379@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
35380
35381 struct cryptd_aead_request_ctx {
35382 crypto_completion_t complete;
35383-};
35384+} __no_const;
35385
35386 static void cryptd_queue_worker(struct work_struct *work);
35387
35388diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
35389index f8c920c..ab2cb5a 100644
35390--- a/crypto/pcrypt.c
35391+++ b/crypto/pcrypt.c
35392@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
35393 int ret;
35394
35395 pinst->kobj.kset = pcrypt_kset;
35396- ret = kobject_add(&pinst->kobj, NULL, name);
35397+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
35398 if (!ret)
35399 kobject_uevent(&pinst->kobj, KOBJ_ADD);
35400
35401diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
35402index 15dddc1..b61cf0c 100644
35403--- a/drivers/acpi/acpica/hwxfsleep.c
35404+++ b/drivers/acpi/acpica/hwxfsleep.c
35405@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
35406 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
35407
35408 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
35409- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35410- acpi_hw_extended_sleep},
35411- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35412- acpi_hw_extended_wake_prep},
35413- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
35414+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35415+ .extended_function = acpi_hw_extended_sleep},
35416+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35417+ .extended_function = acpi_hw_extended_wake_prep},
35418+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
35419+ .extended_function = acpi_hw_extended_wake}
35420 };
35421
35422 /*
35423diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
35424index 21ba34a..cb05966 100644
35425--- a/drivers/acpi/apei/apei-internal.h
35426+++ b/drivers/acpi/apei/apei-internal.h
35427@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
35428 struct apei_exec_ins_type {
35429 u32 flags;
35430 apei_exec_ins_func_t run;
35431-};
35432+} __do_const;
35433
35434 struct apei_exec_context {
35435 u32 ip;
35436diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
35437index a30bc31..b91c4d5 100644
35438--- a/drivers/acpi/apei/ghes.c
35439+++ b/drivers/acpi/apei/ghes.c
35440@@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
35441 const struct acpi_hest_generic *generic,
35442 const struct acpi_generic_status *estatus)
35443 {
35444- static atomic_t seqno;
35445+ static atomic_unchecked_t seqno;
35446 unsigned int curr_seqno;
35447 char pfx_seq[64];
35448
35449@@ -509,7 +509,7 @@ static void __ghes_print_estatus(const char *pfx,
35450 else
35451 pfx = KERN_ERR;
35452 }
35453- curr_seqno = atomic_inc_return(&seqno);
35454+ curr_seqno = atomic_inc_return_unchecked(&seqno);
35455 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
35456 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
35457 pfx_seq, generic->header.source_id);
35458diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
35459index a83e3c6..c3d617f 100644
35460--- a/drivers/acpi/bgrt.c
35461+++ b/drivers/acpi/bgrt.c
35462@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
35463 if (!bgrt_image)
35464 return -ENODEV;
35465
35466- bin_attr_image.private = bgrt_image;
35467- bin_attr_image.size = bgrt_image_size;
35468+ pax_open_kernel();
35469+ *(void **)&bin_attr_image.private = bgrt_image;
35470+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
35471+ pax_close_kernel();
35472
35473 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
35474 if (!bgrt_kobj)
35475diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
35476index 078c4f7..410e272 100644
35477--- a/drivers/acpi/blacklist.c
35478+++ b/drivers/acpi/blacklist.c
35479@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
35480 u32 is_critical_error;
35481 };
35482
35483-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
35484+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
35485
35486 /*
35487 * POLICY: If *anything* doesn't work, put it on the blacklist.
35488@@ -164,7 +164,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
35489 return 0;
35490 }
35491
35492-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
35493+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
35494 {
35495 .callback = dmi_disable_osi_vista,
35496 .ident = "Fujitsu Siemens",
35497diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
35498index 12b62f2..dc2aac8 100644
35499--- a/drivers/acpi/custom_method.c
35500+++ b/drivers/acpi/custom_method.c
35501@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
35502 struct acpi_table_header table;
35503 acpi_status status;
35504
35505+#ifdef CONFIG_GRKERNSEC_KMEM
35506+ return -EPERM;
35507+#endif
35508+
35509 if (!(*ppos)) {
35510 /* parse the table header to get the table length */
35511 if (count <= sizeof(struct acpi_table_header))
35512diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
35513index 644516d..643937e 100644
35514--- a/drivers/acpi/processor_idle.c
35515+++ b/drivers/acpi/processor_idle.c
35516@@ -963,7 +963,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
35517 {
35518 int i, count = CPUIDLE_DRIVER_STATE_START;
35519 struct acpi_processor_cx *cx;
35520- struct cpuidle_state *state;
35521+ cpuidle_state_no_const *state;
35522 struct cpuidle_driver *drv = &acpi_idle_driver;
35523
35524 if (!pr->flags.power_setup_done)
35525diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
35526index 6dbc3ca..b8b59a0 100644
35527--- a/drivers/acpi/sysfs.c
35528+++ b/drivers/acpi/sysfs.c
35529@@ -425,11 +425,11 @@ static u32 num_counters;
35530 static struct attribute **all_attrs;
35531 static u32 acpi_gpe_count;
35532
35533-static struct attribute_group interrupt_stats_attr_group = {
35534+static attribute_group_no_const interrupt_stats_attr_group = {
35535 .name = "interrupts",
35536 };
35537
35538-static struct kobj_attribute *counter_attrs;
35539+static kobj_attribute_no_const *counter_attrs;
35540
35541 static void delete_gpe_attr_array(void)
35542 {
35543diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
35544index c482f8c..c832240 100644
35545--- a/drivers/ata/libahci.c
35546+++ b/drivers/ata/libahci.c
35547@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
35548 }
35549 EXPORT_SYMBOL_GPL(ahci_kick_engine);
35550
35551-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35552+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35553 struct ata_taskfile *tf, int is_cmd, u16 flags,
35554 unsigned long timeout_msec)
35555 {
35556diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
35557index 1a3dbd1..dfc6e5c 100644
35558--- a/drivers/ata/libata-core.c
35559+++ b/drivers/ata/libata-core.c
35560@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
35561 static void ata_dev_xfermask(struct ata_device *dev);
35562 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
35563
35564-atomic_t ata_print_id = ATOMIC_INIT(0);
35565+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
35566
35567 struct ata_force_param {
35568 const char *name;
35569@@ -4850,7 +4850,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
35570 struct ata_port *ap;
35571 unsigned int tag;
35572
35573- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35574+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35575 ap = qc->ap;
35576
35577 qc->flags = 0;
35578@@ -4866,7 +4866,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
35579 struct ata_port *ap;
35580 struct ata_link *link;
35581
35582- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35583+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35584 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
35585 ap = qc->ap;
35586 link = qc->dev->link;
35587@@ -5985,6 +5985,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35588 return;
35589
35590 spin_lock(&lock);
35591+ pax_open_kernel();
35592
35593 for (cur = ops->inherits; cur; cur = cur->inherits) {
35594 void **inherit = (void **)cur;
35595@@ -5998,8 +5999,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35596 if (IS_ERR(*pp))
35597 *pp = NULL;
35598
35599- ops->inherits = NULL;
35600+ *(struct ata_port_operations **)&ops->inherits = NULL;
35601
35602+ pax_close_kernel();
35603 spin_unlock(&lock);
35604 }
35605
35606@@ -6192,7 +6194,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
35607
35608 /* give ports names and add SCSI hosts */
35609 for (i = 0; i < host->n_ports; i++) {
35610- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
35611+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
35612 host->ports[i]->local_port_no = i + 1;
35613 }
35614
35615diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
35616index ef8567d..8bdbd03 100644
35617--- a/drivers/ata/libata-scsi.c
35618+++ b/drivers/ata/libata-scsi.c
35619@@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *ap)
35620
35621 if (rc)
35622 return rc;
35623- ap->print_id = atomic_inc_return(&ata_print_id);
35624+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
35625 return 0;
35626 }
35627 EXPORT_SYMBOL_GPL(ata_sas_port_init);
35628diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
35629index 45b5ab3..98446b8 100644
35630--- a/drivers/ata/libata.h
35631+++ b/drivers/ata/libata.h
35632@@ -53,7 +53,7 @@ enum {
35633 ATA_DNXFER_QUIET = (1 << 31),
35634 };
35635
35636-extern atomic_t ata_print_id;
35637+extern atomic_unchecked_t ata_print_id;
35638 extern int atapi_passthru16;
35639 extern int libata_fua;
35640 extern int libata_noacpi;
35641diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
35642index 73492dd..ca2bff5 100644
35643--- a/drivers/ata/pata_arasan_cf.c
35644+++ b/drivers/ata/pata_arasan_cf.c
35645@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
35646 /* Handle platform specific quirks */
35647 if (quirk) {
35648 if (quirk & CF_BROKEN_PIO) {
35649- ap->ops->set_piomode = NULL;
35650+ pax_open_kernel();
35651+ *(void **)&ap->ops->set_piomode = NULL;
35652+ pax_close_kernel();
35653 ap->pio_mask = 0;
35654 }
35655 if (quirk & CF_BROKEN_MWDMA)
35656diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
35657index f9b983a..887b9d8 100644
35658--- a/drivers/atm/adummy.c
35659+++ b/drivers/atm/adummy.c
35660@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
35661 vcc->pop(vcc, skb);
35662 else
35663 dev_kfree_skb_any(skb);
35664- atomic_inc(&vcc->stats->tx);
35665+ atomic_inc_unchecked(&vcc->stats->tx);
35666
35667 return 0;
35668 }
35669diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
35670index 62a7607..cc4be104 100644
35671--- a/drivers/atm/ambassador.c
35672+++ b/drivers/atm/ambassador.c
35673@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
35674 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
35675
35676 // VC layer stats
35677- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35678+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35679
35680 // free the descriptor
35681 kfree (tx_descr);
35682@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35683 dump_skb ("<<<", vc, skb);
35684
35685 // VC layer stats
35686- atomic_inc(&atm_vcc->stats->rx);
35687+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35688 __net_timestamp(skb);
35689 // end of our responsibility
35690 atm_vcc->push (atm_vcc, skb);
35691@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35692 } else {
35693 PRINTK (KERN_INFO, "dropped over-size frame");
35694 // should we count this?
35695- atomic_inc(&atm_vcc->stats->rx_drop);
35696+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35697 }
35698
35699 } else {
35700@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
35701 }
35702
35703 if (check_area (skb->data, skb->len)) {
35704- atomic_inc(&atm_vcc->stats->tx_err);
35705+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
35706 return -ENOMEM; // ?
35707 }
35708
35709diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
35710index 0e3f8f9..765a7a5 100644
35711--- a/drivers/atm/atmtcp.c
35712+++ b/drivers/atm/atmtcp.c
35713@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35714 if (vcc->pop) vcc->pop(vcc,skb);
35715 else dev_kfree_skb(skb);
35716 if (dev_data) return 0;
35717- atomic_inc(&vcc->stats->tx_err);
35718+ atomic_inc_unchecked(&vcc->stats->tx_err);
35719 return -ENOLINK;
35720 }
35721 size = skb->len+sizeof(struct atmtcp_hdr);
35722@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35723 if (!new_skb) {
35724 if (vcc->pop) vcc->pop(vcc,skb);
35725 else dev_kfree_skb(skb);
35726- atomic_inc(&vcc->stats->tx_err);
35727+ atomic_inc_unchecked(&vcc->stats->tx_err);
35728 return -ENOBUFS;
35729 }
35730 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
35731@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35732 if (vcc->pop) vcc->pop(vcc,skb);
35733 else dev_kfree_skb(skb);
35734 out_vcc->push(out_vcc,new_skb);
35735- atomic_inc(&vcc->stats->tx);
35736- atomic_inc(&out_vcc->stats->rx);
35737+ atomic_inc_unchecked(&vcc->stats->tx);
35738+ atomic_inc_unchecked(&out_vcc->stats->rx);
35739 return 0;
35740 }
35741
35742@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35743 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
35744 read_unlock(&vcc_sklist_lock);
35745 if (!out_vcc) {
35746- atomic_inc(&vcc->stats->tx_err);
35747+ atomic_inc_unchecked(&vcc->stats->tx_err);
35748 goto done;
35749 }
35750 skb_pull(skb,sizeof(struct atmtcp_hdr));
35751@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35752 __net_timestamp(new_skb);
35753 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
35754 out_vcc->push(out_vcc,new_skb);
35755- atomic_inc(&vcc->stats->tx);
35756- atomic_inc(&out_vcc->stats->rx);
35757+ atomic_inc_unchecked(&vcc->stats->tx);
35758+ atomic_inc_unchecked(&out_vcc->stats->rx);
35759 done:
35760 if (vcc->pop) vcc->pop(vcc,skb);
35761 else dev_kfree_skb(skb);
35762diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
35763index b1955ba..b179940 100644
35764--- a/drivers/atm/eni.c
35765+++ b/drivers/atm/eni.c
35766@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
35767 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
35768 vcc->dev->number);
35769 length = 0;
35770- atomic_inc(&vcc->stats->rx_err);
35771+ atomic_inc_unchecked(&vcc->stats->rx_err);
35772 }
35773 else {
35774 length = ATM_CELL_SIZE-1; /* no HEC */
35775@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35776 size);
35777 }
35778 eff = length = 0;
35779- atomic_inc(&vcc->stats->rx_err);
35780+ atomic_inc_unchecked(&vcc->stats->rx_err);
35781 }
35782 else {
35783 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
35784@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35785 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
35786 vcc->dev->number,vcc->vci,length,size << 2,descr);
35787 length = eff = 0;
35788- atomic_inc(&vcc->stats->rx_err);
35789+ atomic_inc_unchecked(&vcc->stats->rx_err);
35790 }
35791 }
35792 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
35793@@ -767,7 +767,7 @@ rx_dequeued++;
35794 vcc->push(vcc,skb);
35795 pushed++;
35796 }
35797- atomic_inc(&vcc->stats->rx);
35798+ atomic_inc_unchecked(&vcc->stats->rx);
35799 }
35800 wake_up(&eni_dev->rx_wait);
35801 }
35802@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
35803 PCI_DMA_TODEVICE);
35804 if (vcc->pop) vcc->pop(vcc,skb);
35805 else dev_kfree_skb_irq(skb);
35806- atomic_inc(&vcc->stats->tx);
35807+ atomic_inc_unchecked(&vcc->stats->tx);
35808 wake_up(&eni_dev->tx_wait);
35809 dma_complete++;
35810 }
35811diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
35812index b41c948..a002b17 100644
35813--- a/drivers/atm/firestream.c
35814+++ b/drivers/atm/firestream.c
35815@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
35816 }
35817 }
35818
35819- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35820+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35821
35822 fs_dprintk (FS_DEBUG_TXMEM, "i");
35823 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35824@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35825 #endif
35826 skb_put (skb, qe->p1 & 0xffff);
35827 ATM_SKB(skb)->vcc = atm_vcc;
35828- atomic_inc(&atm_vcc->stats->rx);
35829+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35830 __net_timestamp(skb);
35831 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35832 atm_vcc->push (atm_vcc, skb);
35833@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35834 kfree (pe);
35835 }
35836 if (atm_vcc)
35837- atomic_inc(&atm_vcc->stats->rx_drop);
35838+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35839 break;
35840 case 0x1f: /* Reassembly abort: no buffers. */
35841 /* Silently increment error counter. */
35842 if (atm_vcc)
35843- atomic_inc(&atm_vcc->stats->rx_drop);
35844+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35845 break;
35846 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35847 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35848diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35849index 204814e..cede831 100644
35850--- a/drivers/atm/fore200e.c
35851+++ b/drivers/atm/fore200e.c
35852@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35853 #endif
35854 /* check error condition */
35855 if (*entry->status & STATUS_ERROR)
35856- atomic_inc(&vcc->stats->tx_err);
35857+ atomic_inc_unchecked(&vcc->stats->tx_err);
35858 else
35859- atomic_inc(&vcc->stats->tx);
35860+ atomic_inc_unchecked(&vcc->stats->tx);
35861 }
35862 }
35863
35864@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35865 if (skb == NULL) {
35866 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35867
35868- atomic_inc(&vcc->stats->rx_drop);
35869+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35870 return -ENOMEM;
35871 }
35872
35873@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35874
35875 dev_kfree_skb_any(skb);
35876
35877- atomic_inc(&vcc->stats->rx_drop);
35878+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35879 return -ENOMEM;
35880 }
35881
35882 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35883
35884 vcc->push(vcc, skb);
35885- atomic_inc(&vcc->stats->rx);
35886+ atomic_inc_unchecked(&vcc->stats->rx);
35887
35888 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35889
35890@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35891 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35892 fore200e->atm_dev->number,
35893 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35894- atomic_inc(&vcc->stats->rx_err);
35895+ atomic_inc_unchecked(&vcc->stats->rx_err);
35896 }
35897 }
35898
35899@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35900 goto retry_here;
35901 }
35902
35903- atomic_inc(&vcc->stats->tx_err);
35904+ atomic_inc_unchecked(&vcc->stats->tx_err);
35905
35906 fore200e->tx_sat++;
35907 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35908diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35909index 8557adc..3fb5d55 100644
35910--- a/drivers/atm/he.c
35911+++ b/drivers/atm/he.c
35912@@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35913
35914 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35915 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35916- atomic_inc(&vcc->stats->rx_drop);
35917+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35918 goto return_host_buffers;
35919 }
35920
35921@@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35922 RBRQ_LEN_ERR(he_dev->rbrq_head)
35923 ? "LEN_ERR" : "",
35924 vcc->vpi, vcc->vci);
35925- atomic_inc(&vcc->stats->rx_err);
35926+ atomic_inc_unchecked(&vcc->stats->rx_err);
35927 goto return_host_buffers;
35928 }
35929
35930@@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35931 vcc->push(vcc, skb);
35932 spin_lock(&he_dev->global_lock);
35933
35934- atomic_inc(&vcc->stats->rx);
35935+ atomic_inc_unchecked(&vcc->stats->rx);
35936
35937 return_host_buffers:
35938 ++pdus_assembled;
35939@@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35940 tpd->vcc->pop(tpd->vcc, tpd->skb);
35941 else
35942 dev_kfree_skb_any(tpd->skb);
35943- atomic_inc(&tpd->vcc->stats->tx_err);
35944+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35945 }
35946 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35947 return;
35948@@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35949 vcc->pop(vcc, skb);
35950 else
35951 dev_kfree_skb_any(skb);
35952- atomic_inc(&vcc->stats->tx_err);
35953+ atomic_inc_unchecked(&vcc->stats->tx_err);
35954 return -EINVAL;
35955 }
35956
35957@@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35958 vcc->pop(vcc, skb);
35959 else
35960 dev_kfree_skb_any(skb);
35961- atomic_inc(&vcc->stats->tx_err);
35962+ atomic_inc_unchecked(&vcc->stats->tx_err);
35963 return -EINVAL;
35964 }
35965 #endif
35966@@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35967 vcc->pop(vcc, skb);
35968 else
35969 dev_kfree_skb_any(skb);
35970- atomic_inc(&vcc->stats->tx_err);
35971+ atomic_inc_unchecked(&vcc->stats->tx_err);
35972 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35973 return -ENOMEM;
35974 }
35975@@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35976 vcc->pop(vcc, skb);
35977 else
35978 dev_kfree_skb_any(skb);
35979- atomic_inc(&vcc->stats->tx_err);
35980+ atomic_inc_unchecked(&vcc->stats->tx_err);
35981 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35982 return -ENOMEM;
35983 }
35984@@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35985 __enqueue_tpd(he_dev, tpd, cid);
35986 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35987
35988- atomic_inc(&vcc->stats->tx);
35989+ atomic_inc_unchecked(&vcc->stats->tx);
35990
35991 return 0;
35992 }
35993diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
35994index 1dc0519..1aadaf7 100644
35995--- a/drivers/atm/horizon.c
35996+++ b/drivers/atm/horizon.c
35997@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
35998 {
35999 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36000 // VC layer stats
36001- atomic_inc(&vcc->stats->rx);
36002+ atomic_inc_unchecked(&vcc->stats->rx);
36003 __net_timestamp(skb);
36004 // end of our responsibility
36005 vcc->push (vcc, skb);
36006@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36007 dev->tx_iovec = NULL;
36008
36009 // VC layer stats
36010- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36011+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36012
36013 // free the skb
36014 hrz_kfree_skb (skb);
36015diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36016index 1bdf104..9dc44b1 100644
36017--- a/drivers/atm/idt77252.c
36018+++ b/drivers/atm/idt77252.c
36019@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36020 else
36021 dev_kfree_skb(skb);
36022
36023- atomic_inc(&vcc->stats->tx);
36024+ atomic_inc_unchecked(&vcc->stats->tx);
36025 }
36026
36027 atomic_dec(&scq->used);
36028@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36029 if ((sb = dev_alloc_skb(64)) == NULL) {
36030 printk("%s: Can't allocate buffers for aal0.\n",
36031 card->name);
36032- atomic_add(i, &vcc->stats->rx_drop);
36033+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36034 break;
36035 }
36036 if (!atm_charge(vcc, sb->truesize)) {
36037 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36038 card->name);
36039- atomic_add(i - 1, &vcc->stats->rx_drop);
36040+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36041 dev_kfree_skb(sb);
36042 break;
36043 }
36044@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36045 ATM_SKB(sb)->vcc = vcc;
36046 __net_timestamp(sb);
36047 vcc->push(vcc, sb);
36048- atomic_inc(&vcc->stats->rx);
36049+ atomic_inc_unchecked(&vcc->stats->rx);
36050
36051 cell += ATM_CELL_PAYLOAD;
36052 }
36053@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36054 "(CDC: %08x)\n",
36055 card->name, len, rpp->len, readl(SAR_REG_CDC));
36056 recycle_rx_pool_skb(card, rpp);
36057- atomic_inc(&vcc->stats->rx_err);
36058+ atomic_inc_unchecked(&vcc->stats->rx_err);
36059 return;
36060 }
36061 if (stat & SAR_RSQE_CRC) {
36062 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36063 recycle_rx_pool_skb(card, rpp);
36064- atomic_inc(&vcc->stats->rx_err);
36065+ atomic_inc_unchecked(&vcc->stats->rx_err);
36066 return;
36067 }
36068 if (skb_queue_len(&rpp->queue) > 1) {
36069@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36070 RXPRINTK("%s: Can't alloc RX skb.\n",
36071 card->name);
36072 recycle_rx_pool_skb(card, rpp);
36073- atomic_inc(&vcc->stats->rx_err);
36074+ atomic_inc_unchecked(&vcc->stats->rx_err);
36075 return;
36076 }
36077 if (!atm_charge(vcc, skb->truesize)) {
36078@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36079 __net_timestamp(skb);
36080
36081 vcc->push(vcc, skb);
36082- atomic_inc(&vcc->stats->rx);
36083+ atomic_inc_unchecked(&vcc->stats->rx);
36084
36085 return;
36086 }
36087@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36088 __net_timestamp(skb);
36089
36090 vcc->push(vcc, skb);
36091- atomic_inc(&vcc->stats->rx);
36092+ atomic_inc_unchecked(&vcc->stats->rx);
36093
36094 if (skb->truesize > SAR_FB_SIZE_3)
36095 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36096@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36097 if (vcc->qos.aal != ATM_AAL0) {
36098 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36099 card->name, vpi, vci);
36100- atomic_inc(&vcc->stats->rx_drop);
36101+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36102 goto drop;
36103 }
36104
36105 if ((sb = dev_alloc_skb(64)) == NULL) {
36106 printk("%s: Can't allocate buffers for AAL0.\n",
36107 card->name);
36108- atomic_inc(&vcc->stats->rx_err);
36109+ atomic_inc_unchecked(&vcc->stats->rx_err);
36110 goto drop;
36111 }
36112
36113@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
36114 ATM_SKB(sb)->vcc = vcc;
36115 __net_timestamp(sb);
36116 vcc->push(vcc, sb);
36117- atomic_inc(&vcc->stats->rx);
36118+ atomic_inc_unchecked(&vcc->stats->rx);
36119
36120 drop:
36121 skb_pull(queue, 64);
36122@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36123
36124 if (vc == NULL) {
36125 printk("%s: NULL connection in send().\n", card->name);
36126- atomic_inc(&vcc->stats->tx_err);
36127+ atomic_inc_unchecked(&vcc->stats->tx_err);
36128 dev_kfree_skb(skb);
36129 return -EINVAL;
36130 }
36131 if (!test_bit(VCF_TX, &vc->flags)) {
36132 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
36133- atomic_inc(&vcc->stats->tx_err);
36134+ atomic_inc_unchecked(&vcc->stats->tx_err);
36135 dev_kfree_skb(skb);
36136 return -EINVAL;
36137 }
36138@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36139 break;
36140 default:
36141 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
36142- atomic_inc(&vcc->stats->tx_err);
36143+ atomic_inc_unchecked(&vcc->stats->tx_err);
36144 dev_kfree_skb(skb);
36145 return -EINVAL;
36146 }
36147
36148 if (skb_shinfo(skb)->nr_frags != 0) {
36149 printk("%s: No scatter-gather yet.\n", card->name);
36150- atomic_inc(&vcc->stats->tx_err);
36151+ atomic_inc_unchecked(&vcc->stats->tx_err);
36152 dev_kfree_skb(skb);
36153 return -EINVAL;
36154 }
36155@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36156
36157 err = queue_skb(card, vc, skb, oam);
36158 if (err) {
36159- atomic_inc(&vcc->stats->tx_err);
36160+ atomic_inc_unchecked(&vcc->stats->tx_err);
36161 dev_kfree_skb(skb);
36162 return err;
36163 }
36164@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
36165 skb = dev_alloc_skb(64);
36166 if (!skb) {
36167 printk("%s: Out of memory in send_oam().\n", card->name);
36168- atomic_inc(&vcc->stats->tx_err);
36169+ atomic_inc_unchecked(&vcc->stats->tx_err);
36170 return -ENOMEM;
36171 }
36172 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
36173diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
36174index 4217f29..88f547a 100644
36175--- a/drivers/atm/iphase.c
36176+++ b/drivers/atm/iphase.c
36177@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
36178 status = (u_short) (buf_desc_ptr->desc_mode);
36179 if (status & (RX_CER | RX_PTE | RX_OFL))
36180 {
36181- atomic_inc(&vcc->stats->rx_err);
36182+ atomic_inc_unchecked(&vcc->stats->rx_err);
36183 IF_ERR(printk("IA: bad packet, dropping it");)
36184 if (status & RX_CER) {
36185 IF_ERR(printk(" cause: packet CRC error\n");)
36186@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
36187 len = dma_addr - buf_addr;
36188 if (len > iadev->rx_buf_sz) {
36189 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
36190- atomic_inc(&vcc->stats->rx_err);
36191+ atomic_inc_unchecked(&vcc->stats->rx_err);
36192 goto out_free_desc;
36193 }
36194
36195@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36196 ia_vcc = INPH_IA_VCC(vcc);
36197 if (ia_vcc == NULL)
36198 {
36199- atomic_inc(&vcc->stats->rx_err);
36200+ atomic_inc_unchecked(&vcc->stats->rx_err);
36201 atm_return(vcc, skb->truesize);
36202 dev_kfree_skb_any(skb);
36203 goto INCR_DLE;
36204@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36205 if ((length > iadev->rx_buf_sz) || (length >
36206 (skb->len - sizeof(struct cpcs_trailer))))
36207 {
36208- atomic_inc(&vcc->stats->rx_err);
36209+ atomic_inc_unchecked(&vcc->stats->rx_err);
36210 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
36211 length, skb->len);)
36212 atm_return(vcc, skb->truesize);
36213@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36214
36215 IF_RX(printk("rx_dle_intr: skb push");)
36216 vcc->push(vcc,skb);
36217- atomic_inc(&vcc->stats->rx);
36218+ atomic_inc_unchecked(&vcc->stats->rx);
36219 iadev->rx_pkt_cnt++;
36220 }
36221 INCR_DLE:
36222@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
36223 {
36224 struct k_sonet_stats *stats;
36225 stats = &PRIV(_ia_dev[board])->sonet_stats;
36226- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
36227- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
36228- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
36229- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
36230- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
36231- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
36232- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
36233- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
36234- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
36235+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
36236+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
36237+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
36238+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
36239+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
36240+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
36241+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
36242+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
36243+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
36244 }
36245 ia_cmds.status = 0;
36246 break;
36247@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36248 if ((desc == 0) || (desc > iadev->num_tx_desc))
36249 {
36250 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
36251- atomic_inc(&vcc->stats->tx);
36252+ atomic_inc_unchecked(&vcc->stats->tx);
36253 if (vcc->pop)
36254 vcc->pop(vcc, skb);
36255 else
36256@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36257 ATM_DESC(skb) = vcc->vci;
36258 skb_queue_tail(&iadev->tx_dma_q, skb);
36259
36260- atomic_inc(&vcc->stats->tx);
36261+ atomic_inc_unchecked(&vcc->stats->tx);
36262 iadev->tx_pkt_cnt++;
36263 /* Increment transaction counter */
36264 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
36265
36266 #if 0
36267 /* add flow control logic */
36268- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
36269+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
36270 if (iavcc->vc_desc_cnt > 10) {
36271 vcc->tx_quota = vcc->tx_quota * 3 / 4;
36272 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
36273diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
36274index fa7d701..1e404c7 100644
36275--- a/drivers/atm/lanai.c
36276+++ b/drivers/atm/lanai.c
36277@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
36278 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
36279 lanai_endtx(lanai, lvcc);
36280 lanai_free_skb(lvcc->tx.atmvcc, skb);
36281- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
36282+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
36283 }
36284
36285 /* Try to fill the buffer - don't call unless there is backlog */
36286@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
36287 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
36288 __net_timestamp(skb);
36289 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
36290- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
36291+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
36292 out:
36293 lvcc->rx.buf.ptr = end;
36294 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
36295@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36296 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
36297 "vcc %d\n", lanai->number, (unsigned int) s, vci);
36298 lanai->stats.service_rxnotaal5++;
36299- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36300+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36301 return 0;
36302 }
36303 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
36304@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36305 int bytes;
36306 read_unlock(&vcc_sklist_lock);
36307 DPRINTK("got trashed rx pdu on vci %d\n", vci);
36308- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36309+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36310 lvcc->stats.x.aal5.service_trash++;
36311 bytes = (SERVICE_GET_END(s) * 16) -
36312 (((unsigned long) lvcc->rx.buf.ptr) -
36313@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36314 }
36315 if (s & SERVICE_STREAM) {
36316 read_unlock(&vcc_sklist_lock);
36317- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36318+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36319 lvcc->stats.x.aal5.service_stream++;
36320 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
36321 "PDU on VCI %d!\n", lanai->number, vci);
36322@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36323 return 0;
36324 }
36325 DPRINTK("got rx crc error on vci %d\n", vci);
36326- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36327+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36328 lvcc->stats.x.aal5.service_rxcrc++;
36329 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
36330 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
36331diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
36332index 5aca5f4..ce3a6b0 100644
36333--- a/drivers/atm/nicstar.c
36334+++ b/drivers/atm/nicstar.c
36335@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36336 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
36337 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
36338 card->index);
36339- atomic_inc(&vcc->stats->tx_err);
36340+ atomic_inc_unchecked(&vcc->stats->tx_err);
36341 dev_kfree_skb_any(skb);
36342 return -EINVAL;
36343 }
36344@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36345 if (!vc->tx) {
36346 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
36347 card->index);
36348- atomic_inc(&vcc->stats->tx_err);
36349+ atomic_inc_unchecked(&vcc->stats->tx_err);
36350 dev_kfree_skb_any(skb);
36351 return -EINVAL;
36352 }
36353@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36354 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
36355 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
36356 card->index);
36357- atomic_inc(&vcc->stats->tx_err);
36358+ atomic_inc_unchecked(&vcc->stats->tx_err);
36359 dev_kfree_skb_any(skb);
36360 return -EINVAL;
36361 }
36362
36363 if (skb_shinfo(skb)->nr_frags != 0) {
36364 printk("nicstar%d: No scatter-gather yet.\n", card->index);
36365- atomic_inc(&vcc->stats->tx_err);
36366+ atomic_inc_unchecked(&vcc->stats->tx_err);
36367 dev_kfree_skb_any(skb);
36368 return -EINVAL;
36369 }
36370@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36371 }
36372
36373 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
36374- atomic_inc(&vcc->stats->tx_err);
36375+ atomic_inc_unchecked(&vcc->stats->tx_err);
36376 dev_kfree_skb_any(skb);
36377 return -EIO;
36378 }
36379- atomic_inc(&vcc->stats->tx);
36380+ atomic_inc_unchecked(&vcc->stats->tx);
36381
36382 return 0;
36383 }
36384@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36385 printk
36386 ("nicstar%d: Can't allocate buffers for aal0.\n",
36387 card->index);
36388- atomic_add(i, &vcc->stats->rx_drop);
36389+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36390 break;
36391 }
36392 if (!atm_charge(vcc, sb->truesize)) {
36393 RXPRINTK
36394 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
36395 card->index);
36396- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36397+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36398 dev_kfree_skb_any(sb);
36399 break;
36400 }
36401@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36402 ATM_SKB(sb)->vcc = vcc;
36403 __net_timestamp(sb);
36404 vcc->push(vcc, sb);
36405- atomic_inc(&vcc->stats->rx);
36406+ atomic_inc_unchecked(&vcc->stats->rx);
36407 cell += ATM_CELL_PAYLOAD;
36408 }
36409
36410@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36411 if (iovb == NULL) {
36412 printk("nicstar%d: Out of iovec buffers.\n",
36413 card->index);
36414- atomic_inc(&vcc->stats->rx_drop);
36415+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36416 recycle_rx_buf(card, skb);
36417 return;
36418 }
36419@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36420 small or large buffer itself. */
36421 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
36422 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
36423- atomic_inc(&vcc->stats->rx_err);
36424+ atomic_inc_unchecked(&vcc->stats->rx_err);
36425 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36426 NS_MAX_IOVECS);
36427 NS_PRV_IOVCNT(iovb) = 0;
36428@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36429 ("nicstar%d: Expected a small buffer, and this is not one.\n",
36430 card->index);
36431 which_list(card, skb);
36432- atomic_inc(&vcc->stats->rx_err);
36433+ atomic_inc_unchecked(&vcc->stats->rx_err);
36434 recycle_rx_buf(card, skb);
36435 vc->rx_iov = NULL;
36436 recycle_iov_buf(card, iovb);
36437@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36438 ("nicstar%d: Expected a large buffer, and this is not one.\n",
36439 card->index);
36440 which_list(card, skb);
36441- atomic_inc(&vcc->stats->rx_err);
36442+ atomic_inc_unchecked(&vcc->stats->rx_err);
36443 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36444 NS_PRV_IOVCNT(iovb));
36445 vc->rx_iov = NULL;
36446@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36447 printk(" - PDU size mismatch.\n");
36448 else
36449 printk(".\n");
36450- atomic_inc(&vcc->stats->rx_err);
36451+ atomic_inc_unchecked(&vcc->stats->rx_err);
36452 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36453 NS_PRV_IOVCNT(iovb));
36454 vc->rx_iov = NULL;
36455@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36456 /* skb points to a small buffer */
36457 if (!atm_charge(vcc, skb->truesize)) {
36458 push_rxbufs(card, skb);
36459- atomic_inc(&vcc->stats->rx_drop);
36460+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36461 } else {
36462 skb_put(skb, len);
36463 dequeue_sm_buf(card, skb);
36464@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36465 ATM_SKB(skb)->vcc = vcc;
36466 __net_timestamp(skb);
36467 vcc->push(vcc, skb);
36468- atomic_inc(&vcc->stats->rx);
36469+ atomic_inc_unchecked(&vcc->stats->rx);
36470 }
36471 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
36472 struct sk_buff *sb;
36473@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36474 if (len <= NS_SMBUFSIZE) {
36475 if (!atm_charge(vcc, sb->truesize)) {
36476 push_rxbufs(card, sb);
36477- atomic_inc(&vcc->stats->rx_drop);
36478+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36479 } else {
36480 skb_put(sb, len);
36481 dequeue_sm_buf(card, sb);
36482@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36483 ATM_SKB(sb)->vcc = vcc;
36484 __net_timestamp(sb);
36485 vcc->push(vcc, sb);
36486- atomic_inc(&vcc->stats->rx);
36487+ atomic_inc_unchecked(&vcc->stats->rx);
36488 }
36489
36490 push_rxbufs(card, skb);
36491@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36492
36493 if (!atm_charge(vcc, skb->truesize)) {
36494 push_rxbufs(card, skb);
36495- atomic_inc(&vcc->stats->rx_drop);
36496+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36497 } else {
36498 dequeue_lg_buf(card, skb);
36499 #ifdef NS_USE_DESTRUCTORS
36500@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36501 ATM_SKB(skb)->vcc = vcc;
36502 __net_timestamp(skb);
36503 vcc->push(vcc, skb);
36504- atomic_inc(&vcc->stats->rx);
36505+ atomic_inc_unchecked(&vcc->stats->rx);
36506 }
36507
36508 push_rxbufs(card, sb);
36509@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36510 printk
36511 ("nicstar%d: Out of huge buffers.\n",
36512 card->index);
36513- atomic_inc(&vcc->stats->rx_drop);
36514+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36515 recycle_iovec_rx_bufs(card,
36516 (struct iovec *)
36517 iovb->data,
36518@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36519 card->hbpool.count++;
36520 } else
36521 dev_kfree_skb_any(hb);
36522- atomic_inc(&vcc->stats->rx_drop);
36523+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36524 } else {
36525 /* Copy the small buffer to the huge buffer */
36526 sb = (struct sk_buff *)iov->iov_base;
36527@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36528 #endif /* NS_USE_DESTRUCTORS */
36529 __net_timestamp(hb);
36530 vcc->push(vcc, hb);
36531- atomic_inc(&vcc->stats->rx);
36532+ atomic_inc_unchecked(&vcc->stats->rx);
36533 }
36534 }
36535
36536diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
36537index 32784d1..4a8434a 100644
36538--- a/drivers/atm/solos-pci.c
36539+++ b/drivers/atm/solos-pci.c
36540@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
36541 }
36542 atm_charge(vcc, skb->truesize);
36543 vcc->push(vcc, skb);
36544- atomic_inc(&vcc->stats->rx);
36545+ atomic_inc_unchecked(&vcc->stats->rx);
36546 break;
36547
36548 case PKT_STATUS:
36549@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
36550 vcc = SKB_CB(oldskb)->vcc;
36551
36552 if (vcc) {
36553- atomic_inc(&vcc->stats->tx);
36554+ atomic_inc_unchecked(&vcc->stats->tx);
36555 solos_pop(vcc, oldskb);
36556 } else {
36557 dev_kfree_skb_irq(oldskb);
36558diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
36559index 0215934..ce9f5b1 100644
36560--- a/drivers/atm/suni.c
36561+++ b/drivers/atm/suni.c
36562@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
36563
36564
36565 #define ADD_LIMITED(s,v) \
36566- atomic_add((v),&stats->s); \
36567- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
36568+ atomic_add_unchecked((v),&stats->s); \
36569+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
36570
36571
36572 static void suni_hz(unsigned long from_timer)
36573diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
36574index 5120a96..e2572bd 100644
36575--- a/drivers/atm/uPD98402.c
36576+++ b/drivers/atm/uPD98402.c
36577@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
36578 struct sonet_stats tmp;
36579 int error = 0;
36580
36581- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36582+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36583 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
36584 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
36585 if (zero && !error) {
36586@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
36587
36588
36589 #define ADD_LIMITED(s,v) \
36590- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
36591- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
36592- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36593+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
36594+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
36595+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36596
36597
36598 static void stat_event(struct atm_dev *dev)
36599@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
36600 if (reason & uPD98402_INT_PFM) stat_event(dev);
36601 if (reason & uPD98402_INT_PCO) {
36602 (void) GET(PCOCR); /* clear interrupt cause */
36603- atomic_add(GET(HECCT),
36604+ atomic_add_unchecked(GET(HECCT),
36605 &PRIV(dev)->sonet_stats.uncorr_hcs);
36606 }
36607 if ((reason & uPD98402_INT_RFO) &&
36608@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
36609 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
36610 uPD98402_INT_LOS),PIMR); /* enable them */
36611 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
36612- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36613- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
36614- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
36615+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36616+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
36617+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
36618 return 0;
36619 }
36620
36621diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
36622index 969c3c2..9b72956 100644
36623--- a/drivers/atm/zatm.c
36624+++ b/drivers/atm/zatm.c
36625@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36626 }
36627 if (!size) {
36628 dev_kfree_skb_irq(skb);
36629- if (vcc) atomic_inc(&vcc->stats->rx_err);
36630+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
36631 continue;
36632 }
36633 if (!atm_charge(vcc,skb->truesize)) {
36634@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36635 skb->len = size;
36636 ATM_SKB(skb)->vcc = vcc;
36637 vcc->push(vcc,skb);
36638- atomic_inc(&vcc->stats->rx);
36639+ atomic_inc_unchecked(&vcc->stats->rx);
36640 }
36641 zout(pos & 0xffff,MTA(mbx));
36642 #if 0 /* probably a stupid idea */
36643@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
36644 skb_queue_head(&zatm_vcc->backlog,skb);
36645 break;
36646 }
36647- atomic_inc(&vcc->stats->tx);
36648+ atomic_inc_unchecked(&vcc->stats->tx);
36649 wake_up(&zatm_vcc->tx_wait);
36650 }
36651
36652diff --git a/drivers/base/bus.c b/drivers/base/bus.c
36653index 73f6c29..b0c0e13 100644
36654--- a/drivers/base/bus.c
36655+++ b/drivers/base/bus.c
36656@@ -1115,7 +1115,7 @@ int subsys_interface_register(struct subsys_interface *sif)
36657 return -EINVAL;
36658
36659 mutex_lock(&subsys->p->mutex);
36660- list_add_tail(&sif->node, &subsys->p->interfaces);
36661+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
36662 if (sif->add_dev) {
36663 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36664 while ((dev = subsys_dev_iter_next(&iter)))
36665@@ -1140,7 +1140,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
36666 subsys = sif->subsys;
36667
36668 mutex_lock(&subsys->p->mutex);
36669- list_del_init(&sif->node);
36670+ pax_list_del_init((struct list_head *)&sif->node);
36671 if (sif->remove_dev) {
36672 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36673 while ((dev = subsys_dev_iter_next(&iter)))
36674diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
36675index 0f38201..6c2b444 100644
36676--- a/drivers/base/devtmpfs.c
36677+++ b/drivers/base/devtmpfs.c
36678@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
36679 if (!thread)
36680 return 0;
36681
36682- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
36683+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
36684 if (err)
36685 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
36686 else
36687@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
36688 *err = sys_unshare(CLONE_NEWNS);
36689 if (*err)
36690 goto out;
36691- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
36692+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
36693 if (*err)
36694 goto out;
36695- sys_chdir("/.."); /* will traverse into overmounted root */
36696- sys_chroot(".");
36697+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
36698+ sys_chroot((char __force_user *)".");
36699 complete(&setup_done);
36700 while (1) {
36701 spin_lock(&req_lock);
36702diff --git a/drivers/base/node.c b/drivers/base/node.c
36703index bc9f43b..29703b8 100644
36704--- a/drivers/base/node.c
36705+++ b/drivers/base/node.c
36706@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
36707 struct node_attr {
36708 struct device_attribute attr;
36709 enum node_states state;
36710-};
36711+} __do_const;
36712
36713 static ssize_t show_node_state(struct device *dev,
36714 struct device_attribute *attr, char *buf)
36715diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
36716index bfb8955..42c9b9a 100644
36717--- a/drivers/base/power/domain.c
36718+++ b/drivers/base/power/domain.c
36719@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
36720 {
36721 struct cpuidle_driver *cpuidle_drv;
36722 struct gpd_cpu_data *cpu_data;
36723- struct cpuidle_state *idle_state;
36724+ cpuidle_state_no_const *idle_state;
36725 int ret = 0;
36726
36727 if (IS_ERR_OR_NULL(genpd) || state < 0)
36728@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
36729 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
36730 {
36731 struct gpd_cpu_data *cpu_data;
36732- struct cpuidle_state *idle_state;
36733+ cpuidle_state_no_const *idle_state;
36734 int ret = 0;
36735
36736 if (IS_ERR_OR_NULL(genpd))
36737diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
36738index 03e089a..0e9560c 100644
36739--- a/drivers/base/power/sysfs.c
36740+++ b/drivers/base/power/sysfs.c
36741@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
36742 return -EIO;
36743 }
36744 }
36745- return sprintf(buf, p);
36746+ return sprintf(buf, "%s", p);
36747 }
36748
36749 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
36750diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
36751index 2d56f41..8830f19 100644
36752--- a/drivers/base/power/wakeup.c
36753+++ b/drivers/base/power/wakeup.c
36754@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
36755 * They need to be modified together atomically, so it's better to use one
36756 * atomic variable to hold them both.
36757 */
36758-static atomic_t combined_event_count = ATOMIC_INIT(0);
36759+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
36760
36761 #define IN_PROGRESS_BITS (sizeof(int) * 4)
36762 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
36763
36764 static void split_counters(unsigned int *cnt, unsigned int *inpr)
36765 {
36766- unsigned int comb = atomic_read(&combined_event_count);
36767+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
36768
36769 *cnt = (comb >> IN_PROGRESS_BITS);
36770 *inpr = comb & MAX_IN_PROGRESS;
36771@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
36772 ws->start_prevent_time = ws->last_time;
36773
36774 /* Increment the counter of events in progress. */
36775- cec = atomic_inc_return(&combined_event_count);
36776+ cec = atomic_inc_return_unchecked(&combined_event_count);
36777
36778 trace_wakeup_source_activate(ws->name, cec);
36779 }
36780@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
36781 * Increment the counter of registered wakeup events and decrement the
36782 * couter of wakeup events in progress simultaneously.
36783 */
36784- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
36785+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
36786 trace_wakeup_source_deactivate(ws->name, cec);
36787
36788 split_counters(&cnt, &inpr);
36789diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
36790index e8d11b6..7b1b36f 100644
36791--- a/drivers/base/syscore.c
36792+++ b/drivers/base/syscore.c
36793@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
36794 void register_syscore_ops(struct syscore_ops *ops)
36795 {
36796 mutex_lock(&syscore_ops_lock);
36797- list_add_tail(&ops->node, &syscore_ops_list);
36798+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
36799 mutex_unlock(&syscore_ops_lock);
36800 }
36801 EXPORT_SYMBOL_GPL(register_syscore_ops);
36802@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
36803 void unregister_syscore_ops(struct syscore_ops *ops)
36804 {
36805 mutex_lock(&syscore_ops_lock);
36806- list_del(&ops->node);
36807+ pax_list_del((struct list_head *)&ops->node);
36808 mutex_unlock(&syscore_ops_lock);
36809 }
36810 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36811diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36812index b35fc4f..c902870 100644
36813--- a/drivers/block/cciss.c
36814+++ b/drivers/block/cciss.c
36815@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
36816 while (!list_empty(&h->reqQ)) {
36817 c = list_entry(h->reqQ.next, CommandList_struct, list);
36818 /* can't do anything if fifo is full */
36819- if ((h->access.fifo_full(h))) {
36820+ if ((h->access->fifo_full(h))) {
36821 dev_warn(&h->pdev->dev, "fifo full\n");
36822 break;
36823 }
36824@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
36825 h->Qdepth--;
36826
36827 /* Tell the controller execute command */
36828- h->access.submit_command(h, c);
36829+ h->access->submit_command(h, c);
36830
36831 /* Put job onto the completed Q */
36832 addQ(&h->cmpQ, c);
36833@@ -3447,17 +3447,17 @@ startio:
36834
36835 static inline unsigned long get_next_completion(ctlr_info_t *h)
36836 {
36837- return h->access.command_completed(h);
36838+ return h->access->command_completed(h);
36839 }
36840
36841 static inline int interrupt_pending(ctlr_info_t *h)
36842 {
36843- return h->access.intr_pending(h);
36844+ return h->access->intr_pending(h);
36845 }
36846
36847 static inline long interrupt_not_for_us(ctlr_info_t *h)
36848 {
36849- return ((h->access.intr_pending(h) == 0) ||
36850+ return ((h->access->intr_pending(h) == 0) ||
36851 (h->interrupts_enabled == 0));
36852 }
36853
36854@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
36855 u32 a;
36856
36857 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36858- return h->access.command_completed(h);
36859+ return h->access->command_completed(h);
36860
36861 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36862 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36863@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36864 trans_support & CFGTBL_Trans_use_short_tags);
36865
36866 /* Change the access methods to the performant access methods */
36867- h->access = SA5_performant_access;
36868+ h->access = &SA5_performant_access;
36869 h->transMethod = CFGTBL_Trans_Performant;
36870
36871 return;
36872@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36873 if (prod_index < 0)
36874 return -ENODEV;
36875 h->product_name = products[prod_index].product_name;
36876- h->access = *(products[prod_index].access);
36877+ h->access = products[prod_index].access;
36878
36879 if (cciss_board_disabled(h)) {
36880 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36881@@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
36882 }
36883
36884 /* make sure the board interrupts are off */
36885- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36886+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36887 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36888 if (rc)
36889 goto clean2;
36890@@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
36891 * fake ones to scoop up any residual completions.
36892 */
36893 spin_lock_irqsave(&h->lock, flags);
36894- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36895+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36896 spin_unlock_irqrestore(&h->lock, flags);
36897 free_irq(h->intr[h->intr_mode], h);
36898 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36899@@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
36900 dev_info(&h->pdev->dev, "Board READY.\n");
36901 dev_info(&h->pdev->dev,
36902 "Waiting for stale completions to drain.\n");
36903- h->access.set_intr_mask(h, CCISS_INTR_ON);
36904+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36905 msleep(10000);
36906- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36907+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36908
36909 rc = controller_reset_failed(h->cfgtable);
36910 if (rc)
36911@@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
36912 cciss_scsi_setup(h);
36913
36914 /* Turn the interrupts on so we can service requests */
36915- h->access.set_intr_mask(h, CCISS_INTR_ON);
36916+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36917
36918 /* Get the firmware version */
36919 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36920@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36921 kfree(flush_buf);
36922 if (return_code != IO_OK)
36923 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36924- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36925+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36926 free_irq(h->intr[h->intr_mode], h);
36927 }
36928
36929diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36930index 7fda30e..2f27946 100644
36931--- a/drivers/block/cciss.h
36932+++ b/drivers/block/cciss.h
36933@@ -101,7 +101,7 @@ struct ctlr_info
36934 /* information about each logical volume */
36935 drive_info_struct *drv[CISS_MAX_LUN];
36936
36937- struct access_method access;
36938+ struct access_method *access;
36939
36940 /* queue and queue Info */
36941 struct list_head reqQ;
36942@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
36943 }
36944
36945 static struct access_method SA5_access = {
36946- SA5_submit_command,
36947- SA5_intr_mask,
36948- SA5_fifo_full,
36949- SA5_intr_pending,
36950- SA5_completed,
36951+ .submit_command = SA5_submit_command,
36952+ .set_intr_mask = SA5_intr_mask,
36953+ .fifo_full = SA5_fifo_full,
36954+ .intr_pending = SA5_intr_pending,
36955+ .command_completed = SA5_completed,
36956 };
36957
36958 static struct access_method SA5B_access = {
36959- SA5_submit_command,
36960- SA5B_intr_mask,
36961- SA5_fifo_full,
36962- SA5B_intr_pending,
36963- SA5_completed,
36964+ .submit_command = SA5_submit_command,
36965+ .set_intr_mask = SA5B_intr_mask,
36966+ .fifo_full = SA5_fifo_full,
36967+ .intr_pending = SA5B_intr_pending,
36968+ .command_completed = SA5_completed,
36969 };
36970
36971 static struct access_method SA5_performant_access = {
36972- SA5_submit_command,
36973- SA5_performant_intr_mask,
36974- SA5_fifo_full,
36975- SA5_performant_intr_pending,
36976- SA5_performant_completed,
36977+ .submit_command = SA5_submit_command,
36978+ .set_intr_mask = SA5_performant_intr_mask,
36979+ .fifo_full = SA5_fifo_full,
36980+ .intr_pending = SA5_performant_intr_pending,
36981+ .command_completed = SA5_performant_completed,
36982 };
36983
36984 struct board_type {
36985diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
36986index 2b94403..fd6ad1f 100644
36987--- a/drivers/block/cpqarray.c
36988+++ b/drivers/block/cpqarray.c
36989@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36990 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
36991 goto Enomem4;
36992 }
36993- hba[i]->access.set_intr_mask(hba[i], 0);
36994+ hba[i]->access->set_intr_mask(hba[i], 0);
36995 if (request_irq(hba[i]->intr, do_ida_intr,
36996 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
36997 {
36998@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36999 add_timer(&hba[i]->timer);
37000
37001 /* Enable IRQ now that spinlock and rate limit timer are set up */
37002- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37003+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37004
37005 for(j=0; j<NWD; j++) {
37006 struct gendisk *disk = ida_gendisk[i][j];
37007@@ -694,7 +694,7 @@ DBGINFO(
37008 for(i=0; i<NR_PRODUCTS; i++) {
37009 if (board_id == products[i].board_id) {
37010 c->product_name = products[i].product_name;
37011- c->access = *(products[i].access);
37012+ c->access = products[i].access;
37013 break;
37014 }
37015 }
37016@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37017 hba[ctlr]->intr = intr;
37018 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37019 hba[ctlr]->product_name = products[j].product_name;
37020- hba[ctlr]->access = *(products[j].access);
37021+ hba[ctlr]->access = products[j].access;
37022 hba[ctlr]->ctlr = ctlr;
37023 hba[ctlr]->board_id = board_id;
37024 hba[ctlr]->pci_dev = NULL; /* not PCI */
37025@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37026
37027 while((c = h->reqQ) != NULL) {
37028 /* Can't do anything if we're busy */
37029- if (h->access.fifo_full(h) == 0)
37030+ if (h->access->fifo_full(h) == 0)
37031 return;
37032
37033 /* Get the first entry from the request Q */
37034@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37035 h->Qdepth--;
37036
37037 /* Tell the controller to do our bidding */
37038- h->access.submit_command(h, c);
37039+ h->access->submit_command(h, c);
37040
37041 /* Get onto the completion Q */
37042 addQ(&h->cmpQ, c);
37043@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37044 unsigned long flags;
37045 __u32 a,a1;
37046
37047- istat = h->access.intr_pending(h);
37048+ istat = h->access->intr_pending(h);
37049 /* Is this interrupt for us? */
37050 if (istat == 0)
37051 return IRQ_NONE;
37052@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37053 */
37054 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37055 if (istat & FIFO_NOT_EMPTY) {
37056- while((a = h->access.command_completed(h))) {
37057+ while((a = h->access->command_completed(h))) {
37058 a1 = a; a &= ~3;
37059 if ((c = h->cmpQ) == NULL)
37060 {
37061@@ -1448,11 +1448,11 @@ static int sendcmd(
37062 /*
37063 * Disable interrupt
37064 */
37065- info_p->access.set_intr_mask(info_p, 0);
37066+ info_p->access->set_intr_mask(info_p, 0);
37067 /* Make sure there is room in the command FIFO */
37068 /* Actually it should be completely empty at this time. */
37069 for (i = 200000; i > 0; i--) {
37070- temp = info_p->access.fifo_full(info_p);
37071+ temp = info_p->access->fifo_full(info_p);
37072 if (temp != 0) {
37073 break;
37074 }
37075@@ -1465,7 +1465,7 @@ DBG(
37076 /*
37077 * Send the cmd
37078 */
37079- info_p->access.submit_command(info_p, c);
37080+ info_p->access->submit_command(info_p, c);
37081 complete = pollcomplete(ctlr);
37082
37083 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37084@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37085 * we check the new geometry. Then turn interrupts back on when
37086 * we're done.
37087 */
37088- host->access.set_intr_mask(host, 0);
37089+ host->access->set_intr_mask(host, 0);
37090 getgeometry(ctlr);
37091- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
37092+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
37093
37094 for(i=0; i<NWD; i++) {
37095 struct gendisk *disk = ida_gendisk[ctlr][i];
37096@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
37097 /* Wait (up to 2 seconds) for a command to complete */
37098
37099 for (i = 200000; i > 0; i--) {
37100- done = hba[ctlr]->access.command_completed(hba[ctlr]);
37101+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
37102 if (done == 0) {
37103 udelay(10); /* a short fixed delay */
37104 } else
37105diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
37106index be73e9d..7fbf140 100644
37107--- a/drivers/block/cpqarray.h
37108+++ b/drivers/block/cpqarray.h
37109@@ -99,7 +99,7 @@ struct ctlr_info {
37110 drv_info_t drv[NWD];
37111 struct proc_dir_entry *proc;
37112
37113- struct access_method access;
37114+ struct access_method *access;
37115
37116 cmdlist_t *reqQ;
37117 cmdlist_t *cmpQ;
37118diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
37119index 0e06f0c..c47b81d 100644
37120--- a/drivers/block/drbd/drbd_int.h
37121+++ b/drivers/block/drbd/drbd_int.h
37122@@ -582,7 +582,7 @@ struct drbd_epoch {
37123 struct drbd_tconn *tconn;
37124 struct list_head list;
37125 unsigned int barrier_nr;
37126- atomic_t epoch_size; /* increased on every request added. */
37127+ atomic_unchecked_t epoch_size; /* increased on every request added. */
37128 atomic_t active; /* increased on every req. added, and dec on every finished. */
37129 unsigned long flags;
37130 };
37131@@ -1022,7 +1022,7 @@ struct drbd_conf {
37132 unsigned int al_tr_number;
37133 int al_tr_cycle;
37134 wait_queue_head_t seq_wait;
37135- atomic_t packet_seq;
37136+ atomic_unchecked_t packet_seq;
37137 unsigned int peer_seq;
37138 spinlock_t peer_seq_lock;
37139 unsigned int minor;
37140@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
37141 char __user *uoptval;
37142 int err;
37143
37144- uoptval = (char __user __force *)optval;
37145+ uoptval = (char __force_user *)optval;
37146
37147 set_fs(KERNEL_DS);
37148 if (level == SOL_SOCKET)
37149diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
37150index 89c497c..9c736ae 100644
37151--- a/drivers/block/drbd/drbd_interval.c
37152+++ b/drivers/block/drbd/drbd_interval.c
37153@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
37154 }
37155
37156 static const struct rb_augment_callbacks augment_callbacks = {
37157- augment_propagate,
37158- augment_copy,
37159- augment_rotate,
37160+ .propagate = augment_propagate,
37161+ .copy = augment_copy,
37162+ .rotate = augment_rotate,
37163 };
37164
37165 /**
37166diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
37167index 9e3818b..7b64c92 100644
37168--- a/drivers/block/drbd/drbd_main.c
37169+++ b/drivers/block/drbd/drbd_main.c
37170@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
37171 p->sector = sector;
37172 p->block_id = block_id;
37173 p->blksize = blksize;
37174- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37175+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37176 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
37177 }
37178
37179@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
37180 return -EIO;
37181 p->sector = cpu_to_be64(req->i.sector);
37182 p->block_id = (unsigned long)req;
37183- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37184+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37185 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
37186 if (mdev->state.conn >= C_SYNC_SOURCE &&
37187 mdev->state.conn <= C_PAUSED_SYNC_T)
37188@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
37189 {
37190 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
37191
37192- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
37193- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
37194+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
37195+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
37196 kfree(tconn->current_epoch);
37197
37198 idr_destroy(&tconn->volumes);
37199diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
37200index c706d50..5e1b472 100644
37201--- a/drivers/block/drbd/drbd_nl.c
37202+++ b/drivers/block/drbd/drbd_nl.c
37203@@ -3440,7 +3440,7 @@ out:
37204
37205 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37206 {
37207- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37208+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37209 struct sk_buff *msg;
37210 struct drbd_genlmsghdr *d_out;
37211 unsigned seq;
37212@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37213 return;
37214 }
37215
37216- seq = atomic_inc_return(&drbd_genl_seq);
37217+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
37218 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
37219 if (!msg)
37220 goto failed;
37221diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
37222index 6fa6673..b7f97e9 100644
37223--- a/drivers/block/drbd/drbd_receiver.c
37224+++ b/drivers/block/drbd/drbd_receiver.c
37225@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
37226 {
37227 int err;
37228
37229- atomic_set(&mdev->packet_seq, 0);
37230+ atomic_set_unchecked(&mdev->packet_seq, 0);
37231 mdev->peer_seq = 0;
37232
37233 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
37234@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37235 do {
37236 next_epoch = NULL;
37237
37238- epoch_size = atomic_read(&epoch->epoch_size);
37239+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
37240
37241 switch (ev & ~EV_CLEANUP) {
37242 case EV_PUT:
37243@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37244 rv = FE_DESTROYED;
37245 } else {
37246 epoch->flags = 0;
37247- atomic_set(&epoch->epoch_size, 0);
37248+ atomic_set_unchecked(&epoch->epoch_size, 0);
37249 /* atomic_set(&epoch->active, 0); is already zero */
37250 if (rv == FE_STILL_LIVE)
37251 rv = FE_RECYCLED;
37252@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37253 conn_wait_active_ee_empty(tconn);
37254 drbd_flush(tconn);
37255
37256- if (atomic_read(&tconn->current_epoch->epoch_size)) {
37257+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37258 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
37259 if (epoch)
37260 break;
37261@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37262 }
37263
37264 epoch->flags = 0;
37265- atomic_set(&epoch->epoch_size, 0);
37266+ atomic_set_unchecked(&epoch->epoch_size, 0);
37267 atomic_set(&epoch->active, 0);
37268
37269 spin_lock(&tconn->epoch_lock);
37270- if (atomic_read(&tconn->current_epoch->epoch_size)) {
37271+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37272 list_add(&epoch->list, &tconn->current_epoch->list);
37273 tconn->current_epoch = epoch;
37274 tconn->epochs++;
37275@@ -2163,7 +2163,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37276
37277 err = wait_for_and_update_peer_seq(mdev, peer_seq);
37278 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
37279- atomic_inc(&tconn->current_epoch->epoch_size);
37280+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
37281 err2 = drbd_drain_block(mdev, pi->size);
37282 if (!err)
37283 err = err2;
37284@@ -2197,7 +2197,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37285
37286 spin_lock(&tconn->epoch_lock);
37287 peer_req->epoch = tconn->current_epoch;
37288- atomic_inc(&peer_req->epoch->epoch_size);
37289+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
37290 atomic_inc(&peer_req->epoch->active);
37291 spin_unlock(&tconn->epoch_lock);
37292
37293@@ -4344,7 +4344,7 @@ struct data_cmd {
37294 int expect_payload;
37295 size_t pkt_size;
37296 int (*fn)(struct drbd_tconn *, struct packet_info *);
37297-};
37298+} __do_const;
37299
37300 static struct data_cmd drbd_cmd_handler[] = {
37301 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
37302@@ -4464,7 +4464,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
37303 if (!list_empty(&tconn->current_epoch->list))
37304 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
37305 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
37306- atomic_set(&tconn->current_epoch->epoch_size, 0);
37307+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
37308 tconn->send.seen_any_write_yet = false;
37309
37310 conn_info(tconn, "Connection closed\n");
37311@@ -5220,7 +5220,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
37312 struct asender_cmd {
37313 size_t pkt_size;
37314 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
37315-};
37316+} __do_const;
37317
37318 static struct asender_cmd asender_tbl[] = {
37319 [P_PING] = { 0, got_Ping },
37320diff --git a/drivers/block/loop.c b/drivers/block/loop.c
37321index c8dac73..1800093 100644
37322--- a/drivers/block/loop.c
37323+++ b/drivers/block/loop.c
37324@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
37325
37326 file_start_write(file);
37327 set_fs(get_ds());
37328- bw = file->f_op->write(file, buf, len, &pos);
37329+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
37330 set_fs(old_fs);
37331 file_end_write(file);
37332 if (likely(bw == len))
37333diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
37334index 83a598e..2de5ce3 100644
37335--- a/drivers/block/null_blk.c
37336+++ b/drivers/block/null_blk.c
37337@@ -407,14 +407,24 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
37338 return 0;
37339 }
37340
37341-static struct blk_mq_ops null_mq_ops = {
37342- .queue_rq = null_queue_rq,
37343- .map_queue = blk_mq_map_queue,
37344+static struct blk_mq_ops null_mq_single_ops = {
37345+ .queue_rq = null_queue_rq,
37346+ .map_queue = blk_mq_map_queue,
37347 .init_hctx = null_init_hctx,
37348+ .alloc_hctx = blk_mq_alloc_single_hw_queue,
37349+ .free_hctx = blk_mq_free_single_hw_queue,
37350+};
37351+
37352+static struct blk_mq_ops null_mq_per_node_ops = {
37353+ .queue_rq = null_queue_rq,
37354+ .map_queue = blk_mq_map_queue,
37355+ .init_hctx = null_init_hctx,
37356+ .alloc_hctx = null_alloc_hctx,
37357+ .free_hctx = null_free_hctx,
37358 };
37359
37360 static struct blk_mq_reg null_mq_reg = {
37361- .ops = &null_mq_ops,
37362+ .ops = &null_mq_single_ops,
37363 .queue_depth = 64,
37364 .cmd_size = sizeof(struct nullb_cmd),
37365 .flags = BLK_MQ_F_SHOULD_MERGE,
37366@@ -545,13 +555,8 @@ static int null_add_dev(void)
37367 null_mq_reg.queue_depth = hw_queue_depth;
37368 null_mq_reg.nr_hw_queues = submit_queues;
37369
37370- if (use_per_node_hctx) {
37371- null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
37372- null_mq_reg.ops->free_hctx = null_free_hctx;
37373- } else {
37374- null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
37375- null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
37376- }
37377+ if (use_per_node_hctx)
37378+ null_mq_reg.ops = &null_mq_per_node_ops;
37379
37380 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
37381 } else if (queue_mode == NULL_Q_BIO) {
37382diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
37383index ff8668c..f62167a 100644
37384--- a/drivers/block/pktcdvd.c
37385+++ b/drivers/block/pktcdvd.c
37386@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
37387
37388 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
37389 {
37390- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
37391+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
37392 }
37393
37394 /*
37395@@ -1883,7 +1883,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
37396 return -EROFS;
37397 }
37398 pd->settings.fp = ti.fp;
37399- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
37400+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
37401
37402 if (ti.nwa_v) {
37403 pd->nwa = be32_to_cpu(ti.next_writable);
37404diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
37405index e5565fb..71be10b4 100644
37406--- a/drivers/block/smart1,2.h
37407+++ b/drivers/block/smart1,2.h
37408@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
37409 }
37410
37411 static struct access_method smart4_access = {
37412- smart4_submit_command,
37413- smart4_intr_mask,
37414- smart4_fifo_full,
37415- smart4_intr_pending,
37416- smart4_completed,
37417+ .submit_command = smart4_submit_command,
37418+ .set_intr_mask = smart4_intr_mask,
37419+ .fifo_full = smart4_fifo_full,
37420+ .intr_pending = smart4_intr_pending,
37421+ .command_completed = smart4_completed,
37422 };
37423
37424 /*
37425@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
37426 }
37427
37428 static struct access_method smart2_access = {
37429- smart2_submit_command,
37430- smart2_intr_mask,
37431- smart2_fifo_full,
37432- smart2_intr_pending,
37433- smart2_completed,
37434+ .submit_command = smart2_submit_command,
37435+ .set_intr_mask = smart2_intr_mask,
37436+ .fifo_full = smart2_fifo_full,
37437+ .intr_pending = smart2_intr_pending,
37438+ .command_completed = smart2_completed,
37439 };
37440
37441 /*
37442@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
37443 }
37444
37445 static struct access_method smart2e_access = {
37446- smart2e_submit_command,
37447- smart2e_intr_mask,
37448- smart2e_fifo_full,
37449- smart2e_intr_pending,
37450- smart2e_completed,
37451+ .submit_command = smart2e_submit_command,
37452+ .set_intr_mask = smart2e_intr_mask,
37453+ .fifo_full = smart2e_fifo_full,
37454+ .intr_pending = smart2e_intr_pending,
37455+ .command_completed = smart2e_completed,
37456 };
37457
37458 /*
37459@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
37460 }
37461
37462 static struct access_method smart1_access = {
37463- smart1_submit_command,
37464- smart1_intr_mask,
37465- smart1_fifo_full,
37466- smart1_intr_pending,
37467- smart1_completed,
37468+ .submit_command = smart1_submit_command,
37469+ .set_intr_mask = smart1_intr_mask,
37470+ .fifo_full = smart1_fifo_full,
37471+ .intr_pending = smart1_intr_pending,
37472+ .command_completed = smart1_completed,
37473 };
37474diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
37475index f038dba..bb74c08 100644
37476--- a/drivers/bluetooth/btwilink.c
37477+++ b/drivers/bluetooth/btwilink.c
37478@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
37479
37480 static int bt_ti_probe(struct platform_device *pdev)
37481 {
37482- static struct ti_st *hst;
37483+ struct ti_st *hst;
37484 struct hci_dev *hdev;
37485 int err;
37486
37487diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
37488index b6739cb..962fd35 100644
37489--- a/drivers/bus/arm-cci.c
37490+++ b/drivers/bus/arm-cci.c
37491@@ -979,7 +979,7 @@ static int cci_probe(void)
37492
37493 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
37494
37495- ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
37496+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
37497 if (!ports)
37498 return -ENOMEM;
37499
37500diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
37501index 8a3aff7..d7538c2 100644
37502--- a/drivers/cdrom/cdrom.c
37503+++ b/drivers/cdrom/cdrom.c
37504@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
37505 ENSURE(reset, CDC_RESET);
37506 ENSURE(generic_packet, CDC_GENERIC_PACKET);
37507 cdi->mc_flags = 0;
37508- cdo->n_minors = 0;
37509 cdi->options = CDO_USE_FFLAGS;
37510
37511 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
37512@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
37513 else
37514 cdi->cdda_method = CDDA_OLD;
37515
37516- if (!cdo->generic_packet)
37517- cdo->generic_packet = cdrom_dummy_generic_packet;
37518+ if (!cdo->generic_packet) {
37519+ pax_open_kernel();
37520+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
37521+ pax_close_kernel();
37522+ }
37523
37524 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
37525 mutex_lock(&cdrom_mutex);
37526@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
37527 if (cdi->exit)
37528 cdi->exit(cdi);
37529
37530- cdi->ops->n_minors--;
37531 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
37532 }
37533
37534@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
37535 */
37536 nr = nframes;
37537 do {
37538- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37539+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37540 if (cgc.buffer)
37541 break;
37542
37543@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
37544 struct cdrom_device_info *cdi;
37545 int ret;
37546
37547- ret = scnprintf(info + *pos, max_size - *pos, header);
37548+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
37549 if (!ret)
37550 return 1;
37551
37552diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
37553index 5980cb9..6d7bd7e 100644
37554--- a/drivers/cdrom/gdrom.c
37555+++ b/drivers/cdrom/gdrom.c
37556@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
37557 .audio_ioctl = gdrom_audio_ioctl,
37558 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
37559 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
37560- .n_minors = 1,
37561 };
37562
37563 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
37564diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
37565index fa3243d..8c98297 100644
37566--- a/drivers/char/Kconfig
37567+++ b/drivers/char/Kconfig
37568@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
37569
37570 config DEVKMEM
37571 bool "/dev/kmem virtual device support"
37572- default y
37573+ default n
37574+ depends on !GRKERNSEC_KMEM
37575 help
37576 Say Y here if you want to support the /dev/kmem device. The
37577 /dev/kmem device is rarely used, but can be used for certain
37578@@ -576,6 +577,7 @@ config DEVPORT
37579 bool
37580 depends on !M68K
37581 depends on ISA || PCI
37582+ depends on !GRKERNSEC_KMEM
37583 default y
37584
37585 source "drivers/s390/char/Kconfig"
37586diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
37587index a48e05b..6bac831 100644
37588--- a/drivers/char/agp/compat_ioctl.c
37589+++ b/drivers/char/agp/compat_ioctl.c
37590@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
37591 return -ENOMEM;
37592 }
37593
37594- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
37595+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
37596 sizeof(*usegment) * ureserve.seg_count)) {
37597 kfree(usegment);
37598 kfree(ksegment);
37599diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
37600index 1b19239..b87b143 100644
37601--- a/drivers/char/agp/frontend.c
37602+++ b/drivers/char/agp/frontend.c
37603@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37604 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
37605 return -EFAULT;
37606
37607- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
37608+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
37609 return -EFAULT;
37610
37611 client = agp_find_client_by_pid(reserve.pid);
37612@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37613 if (segment == NULL)
37614 return -ENOMEM;
37615
37616- if (copy_from_user(segment, (void __user *) reserve.seg_list,
37617+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
37618 sizeof(struct agp_segment) * reserve.seg_count)) {
37619 kfree(segment);
37620 return -EFAULT;
37621diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
37622index 4f94375..413694e 100644
37623--- a/drivers/char/genrtc.c
37624+++ b/drivers/char/genrtc.c
37625@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
37626 switch (cmd) {
37627
37628 case RTC_PLL_GET:
37629+ memset(&pll, 0, sizeof(pll));
37630 if (get_rtc_pll(&pll))
37631 return -EINVAL;
37632 else
37633diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
37634index 5d9c31d..c94ccb5 100644
37635--- a/drivers/char/hpet.c
37636+++ b/drivers/char/hpet.c
37637@@ -578,7 +578,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
37638 }
37639
37640 static int
37641-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
37642+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
37643 struct hpet_info *info)
37644 {
37645 struct hpet_timer __iomem *timer;
37646diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
37647index 86fe45c..c0ea948 100644
37648--- a/drivers/char/hw_random/intel-rng.c
37649+++ b/drivers/char/hw_random/intel-rng.c
37650@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
37651
37652 if (no_fwh_detect)
37653 return -ENODEV;
37654- printk(warning);
37655+ printk("%s", warning);
37656 return -EBUSY;
37657 }
37658
37659diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
37660index ec4e10f..f2a763b 100644
37661--- a/drivers/char/ipmi/ipmi_msghandler.c
37662+++ b/drivers/char/ipmi/ipmi_msghandler.c
37663@@ -420,7 +420,7 @@ struct ipmi_smi {
37664 struct proc_dir_entry *proc_dir;
37665 char proc_dir_name[10];
37666
37667- atomic_t stats[IPMI_NUM_STATS];
37668+ atomic_unchecked_t stats[IPMI_NUM_STATS];
37669
37670 /*
37671 * run_to_completion duplicate of smb_info, smi_info
37672@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
37673
37674
37675 #define ipmi_inc_stat(intf, stat) \
37676- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
37677+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
37678 #define ipmi_get_stat(intf, stat) \
37679- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
37680+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
37681
37682 static int is_lan_addr(struct ipmi_addr *addr)
37683 {
37684@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
37685 INIT_LIST_HEAD(&intf->cmd_rcvrs);
37686 init_waitqueue_head(&intf->waitq);
37687 for (i = 0; i < IPMI_NUM_STATS; i++)
37688- atomic_set(&intf->stats[i], 0);
37689+ atomic_set_unchecked(&intf->stats[i], 0);
37690
37691 intf->proc_dir = NULL;
37692
37693diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
37694index 15e4a60..b046093 100644
37695--- a/drivers/char/ipmi/ipmi_si_intf.c
37696+++ b/drivers/char/ipmi/ipmi_si_intf.c
37697@@ -280,7 +280,7 @@ struct smi_info {
37698 unsigned char slave_addr;
37699
37700 /* Counters and things for the proc filesystem. */
37701- atomic_t stats[SI_NUM_STATS];
37702+ atomic_unchecked_t stats[SI_NUM_STATS];
37703
37704 struct task_struct *thread;
37705
37706@@ -289,9 +289,9 @@ struct smi_info {
37707 };
37708
37709 #define smi_inc_stat(smi, stat) \
37710- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
37711+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
37712 #define smi_get_stat(smi, stat) \
37713- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
37714+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
37715
37716 #define SI_MAX_PARMS 4
37717
37718@@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info *new_smi)
37719 atomic_set(&new_smi->req_events, 0);
37720 new_smi->run_to_completion = 0;
37721 for (i = 0; i < SI_NUM_STATS; i++)
37722- atomic_set(&new_smi->stats[i], 0);
37723+ atomic_set_unchecked(&new_smi->stats[i], 0);
37724
37725 new_smi->interrupt_disabled = 1;
37726 atomic_set(&new_smi->stop_operation, 0);
37727diff --git a/drivers/char/mem.c b/drivers/char/mem.c
37728index f895a8c..2bc9147 100644
37729--- a/drivers/char/mem.c
37730+++ b/drivers/char/mem.c
37731@@ -18,6 +18,7 @@
37732 #include <linux/raw.h>
37733 #include <linux/tty.h>
37734 #include <linux/capability.h>
37735+#include <linux/security.h>
37736 #include <linux/ptrace.h>
37737 #include <linux/device.h>
37738 #include <linux/highmem.h>
37739@@ -37,6 +38,10 @@
37740
37741 #define DEVPORT_MINOR 4
37742
37743+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37744+extern const struct file_operations grsec_fops;
37745+#endif
37746+
37747 static inline unsigned long size_inside_page(unsigned long start,
37748 unsigned long size)
37749 {
37750@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37751
37752 while (cursor < to) {
37753 if (!devmem_is_allowed(pfn)) {
37754+#ifdef CONFIG_GRKERNSEC_KMEM
37755+ gr_handle_mem_readwrite(from, to);
37756+#else
37757 printk(KERN_INFO
37758 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
37759 current->comm, from, to);
37760+#endif
37761 return 0;
37762 }
37763 cursor += PAGE_SIZE;
37764@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37765 }
37766 return 1;
37767 }
37768+#elif defined(CONFIG_GRKERNSEC_KMEM)
37769+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37770+{
37771+ return 0;
37772+}
37773 #else
37774 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37775 {
37776@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37777
37778 while (count > 0) {
37779 unsigned long remaining;
37780+ char *temp;
37781
37782 sz = size_inside_page(p, count);
37783
37784@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37785 if (!ptr)
37786 return -EFAULT;
37787
37788- remaining = copy_to_user(buf, ptr, sz);
37789+#ifdef CONFIG_PAX_USERCOPY
37790+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37791+ if (!temp) {
37792+ unxlate_dev_mem_ptr(p, ptr);
37793+ return -ENOMEM;
37794+ }
37795+ memcpy(temp, ptr, sz);
37796+#else
37797+ temp = ptr;
37798+#endif
37799+
37800+ remaining = copy_to_user(buf, temp, sz);
37801+
37802+#ifdef CONFIG_PAX_USERCOPY
37803+ kfree(temp);
37804+#endif
37805+
37806 unxlate_dev_mem_ptr(p, ptr);
37807 if (remaining)
37808 return -EFAULT;
37809@@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37810 size_t count, loff_t *ppos)
37811 {
37812 unsigned long p = *ppos;
37813- ssize_t low_count, read, sz;
37814+ ssize_t low_count, read, sz, err = 0;
37815 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
37816- int err = 0;
37817
37818 read = 0;
37819 if (p < (unsigned long) high_memory) {
37820@@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37821 }
37822 #endif
37823 while (low_count > 0) {
37824+ char *temp;
37825+
37826 sz = size_inside_page(p, low_count);
37827
37828 /*
37829@@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37830 */
37831 kbuf = xlate_dev_kmem_ptr((char *)p);
37832
37833- if (copy_to_user(buf, kbuf, sz))
37834+#ifdef CONFIG_PAX_USERCOPY
37835+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37836+ if (!temp)
37837+ return -ENOMEM;
37838+ memcpy(temp, kbuf, sz);
37839+#else
37840+ temp = kbuf;
37841+#endif
37842+
37843+ err = copy_to_user(buf, temp, sz);
37844+
37845+#ifdef CONFIG_PAX_USERCOPY
37846+ kfree(temp);
37847+#endif
37848+
37849+ if (err)
37850 return -EFAULT;
37851 buf += sz;
37852 p += sz;
37853@@ -822,6 +869,9 @@ static const struct memdev {
37854 #ifdef CONFIG_PRINTK
37855 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
37856 #endif
37857+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37858+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
37859+#endif
37860 };
37861
37862 static int memory_open(struct inode *inode, struct file *filp)
37863@@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
37864 continue;
37865
37866 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
37867- NULL, devlist[minor].name);
37868+ NULL, "%s", devlist[minor].name);
37869 }
37870
37871 return tty_init();
37872diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
37873index 9df78e2..01ba9ae 100644
37874--- a/drivers/char/nvram.c
37875+++ b/drivers/char/nvram.c
37876@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
37877
37878 spin_unlock_irq(&rtc_lock);
37879
37880- if (copy_to_user(buf, contents, tmp - contents))
37881+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
37882 return -EFAULT;
37883
37884 *ppos = i;
37885diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
37886index d39cca6..8c1e269 100644
37887--- a/drivers/char/pcmcia/synclink_cs.c
37888+++ b/drivers/char/pcmcia/synclink_cs.c
37889@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37890
37891 if (debug_level >= DEBUG_LEVEL_INFO)
37892 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
37893- __FILE__, __LINE__, info->device_name, port->count);
37894+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
37895
37896- WARN_ON(!port->count);
37897+ WARN_ON(!atomic_read(&port->count));
37898
37899 if (tty_port_close_start(port, tty, filp) == 0)
37900 goto cleanup;
37901@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37902 cleanup:
37903 if (debug_level >= DEBUG_LEVEL_INFO)
37904 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
37905- tty->driver->name, port->count);
37906+ tty->driver->name, atomic_read(&port->count));
37907 }
37908
37909 /* Wait until the transmitter is empty.
37910@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37911
37912 if (debug_level >= DEBUG_LEVEL_INFO)
37913 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
37914- __FILE__, __LINE__, tty->driver->name, port->count);
37915+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
37916
37917 /* If port is closing, signal caller to try again */
37918 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
37919@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37920 goto cleanup;
37921 }
37922 spin_lock(&port->lock);
37923- port->count++;
37924+ atomic_inc(&port->count);
37925 spin_unlock(&port->lock);
37926 spin_unlock_irqrestore(&info->netlock, flags);
37927
37928- if (port->count == 1) {
37929+ if (atomic_read(&port->count) == 1) {
37930 /* 1st open on this device, init hardware */
37931 retval = startup(info, tty);
37932 if (retval < 0)
37933@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
37934 unsigned short new_crctype;
37935
37936 /* return error if TTY interface open */
37937- if (info->port.count)
37938+ if (atomic_read(&info->port.count))
37939 return -EBUSY;
37940
37941 switch (encoding)
37942@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
37943
37944 /* arbitrate between network and tty opens */
37945 spin_lock_irqsave(&info->netlock, flags);
37946- if (info->port.count != 0 || info->netcount != 0) {
37947+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
37948 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
37949 spin_unlock_irqrestore(&info->netlock, flags);
37950 return -EBUSY;
37951@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37952 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
37953
37954 /* return error if TTY interface open */
37955- if (info->port.count)
37956+ if (atomic_read(&info->port.count))
37957 return -EBUSY;
37958
37959 if (cmd != SIOCWANDEV)
37960diff --git a/drivers/char/random.c b/drivers/char/random.c
37961index 429b75b..03d60db 100644
37962--- a/drivers/char/random.c
37963+++ b/drivers/char/random.c
37964@@ -270,10 +270,17 @@
37965 /*
37966 * Configuration information
37967 */
37968+#ifdef CONFIG_GRKERNSEC_RANDNET
37969+#define INPUT_POOL_SHIFT 14
37970+#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
37971+#define OUTPUT_POOL_SHIFT 12
37972+#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
37973+#else
37974 #define INPUT_POOL_SHIFT 12
37975 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
37976 #define OUTPUT_POOL_SHIFT 10
37977 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
37978+#endif
37979 #define SEC_XFER_SIZE 512
37980 #define EXTRACT_SIZE 10
37981
37982@@ -361,12 +368,19 @@ static struct poolinfo {
37983 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
37984 int tap1, tap2, tap3, tap4, tap5;
37985 } poolinfo_table[] = {
37986+#ifdef CONFIG_GRKERNSEC_RANDNET
37987+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
37988+ { S(512), 411, 308, 208, 104, 1 },
37989+ /* x^128 + x^104 + x^76 + x^51 + x^25 + x + 1 -- 105 */
37990+ { S(128), 104, 76, 51, 25, 1 },
37991+#else
37992 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
37993 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
37994 { S(128), 104, 76, 51, 25, 1 },
37995 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
37996 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
37997 { S(32), 26, 19, 14, 7, 1 },
37998+#endif
37999 #if 0
38000 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
38001 { S(2048), 1638, 1231, 819, 411, 1 },
38002@@ -524,8 +538,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
38003 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
38004 }
38005
38006- ACCESS_ONCE(r->input_rotate) = input_rotate;
38007- ACCESS_ONCE(r->add_ptr) = i;
38008+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
38009+ ACCESS_ONCE_RW(r->add_ptr) = i;
38010 smp_wmb();
38011
38012 if (out)
38013@@ -1151,7 +1165,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38014
38015 extract_buf(r, tmp);
38016 i = min_t(int, nbytes, EXTRACT_SIZE);
38017- if (copy_to_user(buf, tmp, i)) {
38018+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38019 ret = -EFAULT;
38020 break;
38021 }
38022@@ -1507,7 +1521,7 @@ EXPORT_SYMBOL(generate_random_uuid);
38023 #include <linux/sysctl.h>
38024
38025 static int min_read_thresh = 8, min_write_thresh;
38026-static int max_read_thresh = INPUT_POOL_WORDS * 32;
38027+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
38028 static int max_write_thresh = INPUT_POOL_WORDS * 32;
38029 static char sysctl_bootid[16];
38030
38031@@ -1523,7 +1537,7 @@ static char sysctl_bootid[16];
38032 static int proc_do_uuid(struct ctl_table *table, int write,
38033 void __user *buffer, size_t *lenp, loff_t *ppos)
38034 {
38035- struct ctl_table fake_table;
38036+ ctl_table_no_const fake_table;
38037 unsigned char buf[64], tmp_uuid[16], *uuid;
38038
38039 uuid = table->data;
38040@@ -1553,7 +1567,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38041 static int proc_do_entropy(ctl_table *table, int write,
38042 void __user *buffer, size_t *lenp, loff_t *ppos)
38043 {
38044- ctl_table fake_table;
38045+ ctl_table_no_const fake_table;
38046 int entropy_count;
38047
38048 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38049diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38050index 7cc1fe22..b602d6b 100644
38051--- a/drivers/char/sonypi.c
38052+++ b/drivers/char/sonypi.c
38053@@ -54,6 +54,7 @@
38054
38055 #include <asm/uaccess.h>
38056 #include <asm/io.h>
38057+#include <asm/local.h>
38058
38059 #include <linux/sonypi.h>
38060
38061@@ -490,7 +491,7 @@ static struct sonypi_device {
38062 spinlock_t fifo_lock;
38063 wait_queue_head_t fifo_proc_list;
38064 struct fasync_struct *fifo_async;
38065- int open_count;
38066+ local_t open_count;
38067 int model;
38068 struct input_dev *input_jog_dev;
38069 struct input_dev *input_key_dev;
38070@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
38071 static int sonypi_misc_release(struct inode *inode, struct file *file)
38072 {
38073 mutex_lock(&sonypi_device.lock);
38074- sonypi_device.open_count--;
38075+ local_dec(&sonypi_device.open_count);
38076 mutex_unlock(&sonypi_device.lock);
38077 return 0;
38078 }
38079@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
38080 {
38081 mutex_lock(&sonypi_device.lock);
38082 /* Flush input queue on first open */
38083- if (!sonypi_device.open_count)
38084+ if (!local_read(&sonypi_device.open_count))
38085 kfifo_reset(&sonypi_device.fifo);
38086- sonypi_device.open_count++;
38087+ local_inc(&sonypi_device.open_count);
38088 mutex_unlock(&sonypi_device.lock);
38089
38090 return 0;
38091diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
38092index 64420b3..5c40b56 100644
38093--- a/drivers/char/tpm/tpm_acpi.c
38094+++ b/drivers/char/tpm/tpm_acpi.c
38095@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
38096 virt = acpi_os_map_memory(start, len);
38097 if (!virt) {
38098 kfree(log->bios_event_log);
38099+ log->bios_event_log = NULL;
38100 printk("%s: ERROR - Unable to map memory\n", __func__);
38101 return -EIO;
38102 }
38103
38104- memcpy_fromio(log->bios_event_log, virt, len);
38105+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
38106
38107 acpi_os_unmap_memory(virt, len);
38108 return 0;
38109diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
38110index 59f7cb2..bac8b6d 100644
38111--- a/drivers/char/tpm/tpm_eventlog.c
38112+++ b/drivers/char/tpm/tpm_eventlog.c
38113@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
38114 event = addr;
38115
38116 if ((event->event_type == 0 && event->event_size == 0) ||
38117- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
38118+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
38119 return NULL;
38120
38121 return addr;
38122@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
38123 return NULL;
38124
38125 if ((event->event_type == 0 && event->event_size == 0) ||
38126- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
38127+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
38128 return NULL;
38129
38130 (*pos)++;
38131@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
38132 int i;
38133
38134 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
38135- seq_putc(m, data[i]);
38136+ if (!seq_putc(m, data[i]))
38137+ return -EFAULT;
38138
38139 return 0;
38140 }
38141diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
38142index feea87c..18aefff 100644
38143--- a/drivers/char/virtio_console.c
38144+++ b/drivers/char/virtio_console.c
38145@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
38146 if (to_user) {
38147 ssize_t ret;
38148
38149- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
38150+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
38151 if (ret)
38152 return -EFAULT;
38153 } else {
38154@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
38155 if (!port_has_data(port) && !port->host_connected)
38156 return 0;
38157
38158- return fill_readbuf(port, ubuf, count, true);
38159+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
38160 }
38161
38162 static int wait_port_writable(struct port *port, bool nonblock)
38163diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
38164index a33f46f..a720eed 100644
38165--- a/drivers/clk/clk-composite.c
38166+++ b/drivers/clk/clk-composite.c
38167@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
38168 struct clk *clk;
38169 struct clk_init_data init;
38170 struct clk_composite *composite;
38171- struct clk_ops *clk_composite_ops;
38172+ clk_ops_no_const *clk_composite_ops;
38173
38174 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
38175 if (!composite) {
38176diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
38177index 81dd31a..ef5c542 100644
38178--- a/drivers/clk/socfpga/clk.c
38179+++ b/drivers/clk/socfpga/clk.c
38180@@ -22,6 +22,7 @@
38181 #include <linux/clk-provider.h>
38182 #include <linux/io.h>
38183 #include <linux/of.h>
38184+#include <asm/pgtable.h>
38185
38186 /* Clock Manager offsets */
38187 #define CLKMGR_CTRL 0x0
38188@@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
38189 streq(clk_name, "periph_pll") ||
38190 streq(clk_name, "sdram_pll")) {
38191 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
38192- clk_pll_ops.enable = clk_gate_ops.enable;
38193- clk_pll_ops.disable = clk_gate_ops.disable;
38194+ pax_open_kernel();
38195+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
38196+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
38197+ pax_close_kernel();
38198 }
38199
38200 clk = clk_register(NULL, &socfpga_clk->hw.hw);
38201@@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
38202 return parent_rate / div;
38203 }
38204
38205-static struct clk_ops gateclk_ops = {
38206+static clk_ops_no_const gateclk_ops __read_only = {
38207 .recalc_rate = socfpga_clk_recalc_rate,
38208 .get_parent = socfpga_clk_get_parent,
38209 .set_parent = socfpga_clk_set_parent,
38210diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
38211index caf41eb..223d27a 100644
38212--- a/drivers/cpufreq/acpi-cpufreq.c
38213+++ b/drivers/cpufreq/acpi-cpufreq.c
38214@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
38215 return sprintf(buf, "%u\n", boost_enabled);
38216 }
38217
38218-static struct global_attr global_boost = __ATTR(boost, 0644,
38219+static global_attr_no_const global_boost = __ATTR(boost, 0644,
38220 show_global_boost,
38221 store_global_boost);
38222
38223@@ -693,8 +693,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38224 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
38225 per_cpu(acfreq_data, cpu) = data;
38226
38227- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
38228- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38229+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
38230+ pax_open_kernel();
38231+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38232+ pax_close_kernel();
38233+ }
38234
38235 result = acpi_processor_register_performance(data->acpi_data, cpu);
38236 if (result)
38237@@ -827,7 +830,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38238 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
38239 break;
38240 case ACPI_ADR_SPACE_FIXED_HARDWARE:
38241- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38242+ pax_open_kernel();
38243+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38244+ pax_close_kernel();
38245 break;
38246 default:
38247 break;
38248diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
38249index 8d19f7c..6bc2daa 100644
38250--- a/drivers/cpufreq/cpufreq.c
38251+++ b/drivers/cpufreq/cpufreq.c
38252@@ -1885,7 +1885,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
38253 #endif
38254
38255 mutex_lock(&cpufreq_governor_mutex);
38256- list_del(&governor->governor_list);
38257+ pax_list_del(&governor->governor_list);
38258 mutex_unlock(&cpufreq_governor_mutex);
38259 return;
38260 }
38261@@ -2115,7 +2115,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
38262 return NOTIFY_OK;
38263 }
38264
38265-static struct notifier_block __refdata cpufreq_cpu_notifier = {
38266+static struct notifier_block cpufreq_cpu_notifier = {
38267 .notifier_call = cpufreq_cpu_callback,
38268 };
38269
38270@@ -2148,8 +2148,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
38271
38272 pr_debug("trying to register driver %s\n", driver_data->name);
38273
38274- if (driver_data->setpolicy)
38275- driver_data->flags |= CPUFREQ_CONST_LOOPS;
38276+ if (driver_data->setpolicy) {
38277+ pax_open_kernel();
38278+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
38279+ pax_close_kernel();
38280+ }
38281
38282 write_lock_irqsave(&cpufreq_driver_lock, flags);
38283 if (cpufreq_driver) {
38284diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
38285index e6be635..f8a90dc 100644
38286--- a/drivers/cpufreq/cpufreq_governor.c
38287+++ b/drivers/cpufreq/cpufreq_governor.c
38288@@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38289 struct dbs_data *dbs_data;
38290 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
38291 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
38292- struct od_ops *od_ops = NULL;
38293+ const struct od_ops *od_ops = NULL;
38294 struct od_dbs_tuners *od_tuners = NULL;
38295 struct cs_dbs_tuners *cs_tuners = NULL;
38296 struct cpu_dbs_common_info *cpu_cdbs;
38297@@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38298
38299 if ((cdata->governor == GOV_CONSERVATIVE) &&
38300 (!policy->governor->initialized)) {
38301- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38302+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38303
38304 cpufreq_register_notifier(cs_ops->notifier_block,
38305 CPUFREQ_TRANSITION_NOTIFIER);
38306@@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38307
38308 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
38309 (policy->governor->initialized == 1)) {
38310- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38311+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38312
38313 cpufreq_unregister_notifier(cs_ops->notifier_block,
38314 CPUFREQ_TRANSITION_NOTIFIER);
38315diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
38316index b5f2b86..daa801b 100644
38317--- a/drivers/cpufreq/cpufreq_governor.h
38318+++ b/drivers/cpufreq/cpufreq_governor.h
38319@@ -205,7 +205,7 @@ struct common_dbs_data {
38320 void (*exit)(struct dbs_data *dbs_data);
38321
38322 /* Governor specific ops, see below */
38323- void *gov_ops;
38324+ const void *gov_ops;
38325 };
38326
38327 /* Governor Per policy data */
38328@@ -225,7 +225,7 @@ struct od_ops {
38329 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
38330 unsigned int freq_next, unsigned int relation);
38331 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
38332-};
38333+} __no_const;
38334
38335 struct cs_ops {
38336 struct notifier_block *notifier_block;
38337diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
38338index 18d4091..434be15 100644
38339--- a/drivers/cpufreq/cpufreq_ondemand.c
38340+++ b/drivers/cpufreq/cpufreq_ondemand.c
38341@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
38342
38343 define_get_cpu_dbs_routines(od_cpu_dbs_info);
38344
38345-static struct od_ops od_ops = {
38346+static struct od_ops od_ops __read_only = {
38347 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
38348 .powersave_bias_target = generic_powersave_bias_target,
38349 .freq_increase = dbs_freq_increase,
38350@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
38351 (struct cpufreq_policy *, unsigned int, unsigned int),
38352 unsigned int powersave_bias)
38353 {
38354- od_ops.powersave_bias_target = f;
38355+ pax_open_kernel();
38356+ *(void **)&od_ops.powersave_bias_target = f;
38357+ pax_close_kernel();
38358 od_set_powersave_bias(powersave_bias);
38359 }
38360 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
38361
38362 void od_unregister_powersave_bias_handler(void)
38363 {
38364- od_ops.powersave_bias_target = generic_powersave_bias_target;
38365+ pax_open_kernel();
38366+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
38367+ pax_close_kernel();
38368 od_set_powersave_bias(0);
38369 }
38370 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
38371diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
38372index 4cf0d28..5830372 100644
38373--- a/drivers/cpufreq/cpufreq_stats.c
38374+++ b/drivers/cpufreq/cpufreq_stats.c
38375@@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
38376 }
38377
38378 /* priority=1 so this will get called before cpufreq_remove_dev */
38379-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
38380+static struct notifier_block cpufreq_stat_cpu_notifier = {
38381 .notifier_call = cpufreq_stat_cpu_callback,
38382 .priority = 1,
38383 };
38384diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
38385index d51f17ed..9f43b15 100644
38386--- a/drivers/cpufreq/intel_pstate.c
38387+++ b/drivers/cpufreq/intel_pstate.c
38388@@ -112,10 +112,10 @@ struct pstate_funcs {
38389 struct cpu_defaults {
38390 struct pstate_adjust_policy pid_policy;
38391 struct pstate_funcs funcs;
38392-};
38393+} __do_const;
38394
38395 static struct pstate_adjust_policy pid_params;
38396-static struct pstate_funcs pstate_funcs;
38397+static struct pstate_funcs *pstate_funcs;
38398
38399 struct perf_limits {
38400 int no_turbo;
38401@@ -462,7 +462,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
38402
38403 cpu->pstate.current_pstate = pstate;
38404
38405- pstate_funcs.set(pstate);
38406+ pstate_funcs->set(pstate);
38407 }
38408
38409 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
38410@@ -484,9 +484,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
38411 {
38412 sprintf(cpu->name, "Intel 2nd generation core");
38413
38414- cpu->pstate.min_pstate = pstate_funcs.get_min();
38415- cpu->pstate.max_pstate = pstate_funcs.get_max();
38416- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
38417+ cpu->pstate.min_pstate = pstate_funcs->get_min();
38418+ cpu->pstate.max_pstate = pstate_funcs->get_max();
38419+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
38420
38421 /*
38422 * goto max pstate so we don't slow up boot if we are built-in if we are
38423@@ -750,9 +750,9 @@ static int intel_pstate_msrs_not_valid(void)
38424 rdmsrl(MSR_IA32_APERF, aperf);
38425 rdmsrl(MSR_IA32_MPERF, mperf);
38426
38427- if (!pstate_funcs.get_max() ||
38428- !pstate_funcs.get_min() ||
38429- !pstate_funcs.get_turbo())
38430+ if (!pstate_funcs->get_max() ||
38431+ !pstate_funcs->get_min() ||
38432+ !pstate_funcs->get_turbo())
38433 return -ENODEV;
38434
38435 rdmsrl(MSR_IA32_APERF, tmp);
38436@@ -766,7 +766,7 @@ static int intel_pstate_msrs_not_valid(void)
38437 return 0;
38438 }
38439
38440-static void copy_pid_params(struct pstate_adjust_policy *policy)
38441+static void copy_pid_params(const struct pstate_adjust_policy *policy)
38442 {
38443 pid_params.sample_rate_ms = policy->sample_rate_ms;
38444 pid_params.p_gain_pct = policy->p_gain_pct;
38445@@ -778,10 +778,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
38446
38447 static void copy_cpu_funcs(struct pstate_funcs *funcs)
38448 {
38449- pstate_funcs.get_max = funcs->get_max;
38450- pstate_funcs.get_min = funcs->get_min;
38451- pstate_funcs.get_turbo = funcs->get_turbo;
38452- pstate_funcs.set = funcs->set;
38453+ pstate_funcs = funcs;
38454 }
38455
38456 #if IS_ENABLED(CONFIG_ACPI)
38457diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
38458index 3d1cba9..0ab21d2 100644
38459--- a/drivers/cpufreq/p4-clockmod.c
38460+++ b/drivers/cpufreq/p4-clockmod.c
38461@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
38462 case 0x0F: /* Core Duo */
38463 case 0x16: /* Celeron Core */
38464 case 0x1C: /* Atom */
38465- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38466+ pax_open_kernel();
38467+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38468+ pax_close_kernel();
38469 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
38470 case 0x0D: /* Pentium M (Dothan) */
38471- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38472+ pax_open_kernel();
38473+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38474+ pax_close_kernel();
38475 /* fall through */
38476 case 0x09: /* Pentium M (Banias) */
38477 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
38478@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
38479
38480 /* on P-4s, the TSC runs with constant frequency independent whether
38481 * throttling is active or not. */
38482- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38483+ pax_open_kernel();
38484+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38485+ pax_close_kernel();
38486
38487 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
38488 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
38489diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
38490index 724ffbd..ad83692 100644
38491--- a/drivers/cpufreq/sparc-us3-cpufreq.c
38492+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
38493@@ -18,14 +18,12 @@
38494 #include <asm/head.h>
38495 #include <asm/timer.h>
38496
38497-static struct cpufreq_driver *cpufreq_us3_driver;
38498-
38499 struct us3_freq_percpu_info {
38500 struct cpufreq_frequency_table table[4];
38501 };
38502
38503 /* Indexed by cpu number. */
38504-static struct us3_freq_percpu_info *us3_freq_table;
38505+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
38506
38507 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
38508 * in the Safari config register.
38509@@ -156,14 +154,26 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
38510
38511 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
38512 {
38513- if (cpufreq_us3_driver) {
38514- cpufreq_frequency_table_put_attr(policy->cpu);
38515- us3_freq_target(policy, 0);
38516- }
38517+ cpufreq_frequency_table_put_attr(policy->cpu);
38518+ us3_freq_target(policy, 0);
38519
38520 return 0;
38521 }
38522
38523+static int __init us3_freq_init(void);
38524+static void __exit us3_freq_exit(void);
38525+
38526+static struct cpufreq_driver cpufreq_us3_driver = {
38527+ .init = us3_freq_cpu_init,
38528+ .verify = cpufreq_generic_frequency_table_verify,
38529+ .target_index = us3_freq_target,
38530+ .get = us3_freq_get,
38531+ .exit = us3_freq_cpu_exit,
38532+ .owner = THIS_MODULE,
38533+ .name = "UltraSPARC-III",
38534+
38535+};
38536+
38537 static int __init us3_freq_init(void)
38538 {
38539 unsigned long manuf, impl, ver;
38540@@ -180,55 +190,15 @@ static int __init us3_freq_init(void)
38541 (impl == CHEETAH_IMPL ||
38542 impl == CHEETAH_PLUS_IMPL ||
38543 impl == JAGUAR_IMPL ||
38544- impl == PANTHER_IMPL)) {
38545- struct cpufreq_driver *driver;
38546-
38547- ret = -ENOMEM;
38548- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
38549- if (!driver)
38550- goto err_out;
38551-
38552- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
38553- GFP_KERNEL);
38554- if (!us3_freq_table)
38555- goto err_out;
38556-
38557- driver->init = us3_freq_cpu_init;
38558- driver->verify = cpufreq_generic_frequency_table_verify;
38559- driver->target_index = us3_freq_target;
38560- driver->get = us3_freq_get;
38561- driver->exit = us3_freq_cpu_exit;
38562- strcpy(driver->name, "UltraSPARC-III");
38563-
38564- cpufreq_us3_driver = driver;
38565- ret = cpufreq_register_driver(driver);
38566- if (ret)
38567- goto err_out;
38568-
38569- return 0;
38570-
38571-err_out:
38572- if (driver) {
38573- kfree(driver);
38574- cpufreq_us3_driver = NULL;
38575- }
38576- kfree(us3_freq_table);
38577- us3_freq_table = NULL;
38578- return ret;
38579- }
38580+ impl == PANTHER_IMPL))
38581+ return cpufreq_register_driver(&cpufreq_us3_driver);
38582
38583 return -ENODEV;
38584 }
38585
38586 static void __exit us3_freq_exit(void)
38587 {
38588- if (cpufreq_us3_driver) {
38589- cpufreq_unregister_driver(cpufreq_us3_driver);
38590- kfree(cpufreq_us3_driver);
38591- cpufreq_us3_driver = NULL;
38592- kfree(us3_freq_table);
38593- us3_freq_table = NULL;
38594- }
38595+ cpufreq_unregister_driver(&cpufreq_us3_driver);
38596 }
38597
38598 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
38599diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
38600index 4e1daca..e707b61 100644
38601--- a/drivers/cpufreq/speedstep-centrino.c
38602+++ b/drivers/cpufreq/speedstep-centrino.c
38603@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
38604 !cpu_has(cpu, X86_FEATURE_EST))
38605 return -ENODEV;
38606
38607- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
38608- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38609+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
38610+ pax_open_kernel();
38611+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38612+ pax_close_kernel();
38613+ }
38614
38615 if (policy->cpu != 0)
38616 return -ENODEV;
38617diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
38618index 06dbe7c..c2c8671 100644
38619--- a/drivers/cpuidle/driver.c
38620+++ b/drivers/cpuidle/driver.c
38621@@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_device *dev,
38622
38623 static void poll_idle_init(struct cpuidle_driver *drv)
38624 {
38625- struct cpuidle_state *state = &drv->states[0];
38626+ cpuidle_state_no_const *state = &drv->states[0];
38627
38628 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
38629 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
38630diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
38631index ca89412..a7b9c49 100644
38632--- a/drivers/cpuidle/governor.c
38633+++ b/drivers/cpuidle/governor.c
38634@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
38635 mutex_lock(&cpuidle_lock);
38636 if (__cpuidle_find_governor(gov->name) == NULL) {
38637 ret = 0;
38638- list_add_tail(&gov->governor_list, &cpuidle_governors);
38639+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
38640 if (!cpuidle_curr_governor ||
38641 cpuidle_curr_governor->rating < gov->rating)
38642 cpuidle_switch_governor(gov);
38643diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
38644index e918b6d..f87ea80 100644
38645--- a/drivers/cpuidle/sysfs.c
38646+++ b/drivers/cpuidle/sysfs.c
38647@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
38648 NULL
38649 };
38650
38651-static struct attribute_group cpuidle_attr_group = {
38652+static attribute_group_no_const cpuidle_attr_group = {
38653 .attrs = cpuidle_default_attrs,
38654 .name = "cpuidle",
38655 };
38656diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
38657index 12fea3e..1e28f47 100644
38658--- a/drivers/crypto/hifn_795x.c
38659+++ b/drivers/crypto/hifn_795x.c
38660@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
38661 MODULE_PARM_DESC(hifn_pll_ref,
38662 "PLL reference clock (pci[freq] or ext[freq], default ext)");
38663
38664-static atomic_t hifn_dev_number;
38665+static atomic_unchecked_t hifn_dev_number;
38666
38667 #define ACRYPTO_OP_DECRYPT 0
38668 #define ACRYPTO_OP_ENCRYPT 1
38669@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
38670 goto err_out_disable_pci_device;
38671
38672 snprintf(name, sizeof(name), "hifn%d",
38673- atomic_inc_return(&hifn_dev_number)-1);
38674+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
38675
38676 err = pci_request_regions(pdev, name);
38677 if (err)
38678diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
38679index a0b2f7e..1b6f028 100644
38680--- a/drivers/devfreq/devfreq.c
38681+++ b/drivers/devfreq/devfreq.c
38682@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
38683 goto err_out;
38684 }
38685
38686- list_add(&governor->node, &devfreq_governor_list);
38687+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
38688
38689 list_for_each_entry(devfreq, &devfreq_list, node) {
38690 int ret = 0;
38691@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
38692 }
38693 }
38694
38695- list_del(&governor->node);
38696+ pax_list_del((struct list_head *)&governor->node);
38697 err_out:
38698 mutex_unlock(&devfreq_list_lock);
38699
38700diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
38701index 0d765c0..60b7480 100644
38702--- a/drivers/dma/sh/shdmac.c
38703+++ b/drivers/dma/sh/shdmac.c
38704@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
38705 return ret;
38706 }
38707
38708-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
38709+static struct notifier_block sh_dmae_nmi_notifier = {
38710 .notifier_call = sh_dmae_nmi_handler,
38711
38712 /* Run before NMI debug handler and KGDB */
38713diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
38714index 1026743..80b081c 100644
38715--- a/drivers/edac/edac_device.c
38716+++ b/drivers/edac/edac_device.c
38717@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
38718 */
38719 int edac_device_alloc_index(void)
38720 {
38721- static atomic_t device_indexes = ATOMIC_INIT(0);
38722+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
38723
38724- return atomic_inc_return(&device_indexes) - 1;
38725+ return atomic_inc_return_unchecked(&device_indexes) - 1;
38726 }
38727 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
38728
38729diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
38730index 9f7e0e60..348c875 100644
38731--- a/drivers/edac/edac_mc_sysfs.c
38732+++ b/drivers/edac/edac_mc_sysfs.c
38733@@ -150,7 +150,7 @@ static const char * const edac_caps[] = {
38734 struct dev_ch_attribute {
38735 struct device_attribute attr;
38736 int channel;
38737-};
38738+} __do_const;
38739
38740 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
38741 struct dev_ch_attribute dev_attr_legacy_##_name = \
38742@@ -1007,14 +1007,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
38743 }
38744
38745 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
38746+ pax_open_kernel();
38747 if (mci->get_sdram_scrub_rate) {
38748- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38749- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38750+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38751+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38752 }
38753 if (mci->set_sdram_scrub_rate) {
38754- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38755- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38756+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38757+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38758 }
38759+ pax_close_kernel();
38760 err = device_create_file(&mci->dev,
38761 &dev_attr_sdram_scrub_rate);
38762 if (err) {
38763diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
38764index 2cf44b4d..6dd2dc7 100644
38765--- a/drivers/edac/edac_pci.c
38766+++ b/drivers/edac/edac_pci.c
38767@@ -29,7 +29,7 @@
38768
38769 static DEFINE_MUTEX(edac_pci_ctls_mutex);
38770 static LIST_HEAD(edac_pci_list);
38771-static atomic_t pci_indexes = ATOMIC_INIT(0);
38772+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
38773
38774 /*
38775 * edac_pci_alloc_ctl_info
38776@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
38777 */
38778 int edac_pci_alloc_index(void)
38779 {
38780- return atomic_inc_return(&pci_indexes) - 1;
38781+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
38782 }
38783 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
38784
38785diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
38786index e8658e4..22746d6 100644
38787--- a/drivers/edac/edac_pci_sysfs.c
38788+++ b/drivers/edac/edac_pci_sysfs.c
38789@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
38790 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
38791 static int edac_pci_poll_msec = 1000; /* one second workq period */
38792
38793-static atomic_t pci_parity_count = ATOMIC_INIT(0);
38794-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
38795+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
38796+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
38797
38798 static struct kobject *edac_pci_top_main_kobj;
38799 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
38800@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
38801 void *value;
38802 ssize_t(*show) (void *, char *);
38803 ssize_t(*store) (void *, const char *, size_t);
38804-};
38805+} __do_const;
38806
38807 /* Set of show/store abstract level functions for PCI Parity object */
38808 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
38809@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38810 edac_printk(KERN_CRIT, EDAC_PCI,
38811 "Signaled System Error on %s\n",
38812 pci_name(dev));
38813- atomic_inc(&pci_nonparity_count);
38814+ atomic_inc_unchecked(&pci_nonparity_count);
38815 }
38816
38817 if (status & (PCI_STATUS_PARITY)) {
38818@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38819 "Master Data Parity Error on %s\n",
38820 pci_name(dev));
38821
38822- atomic_inc(&pci_parity_count);
38823+ atomic_inc_unchecked(&pci_parity_count);
38824 }
38825
38826 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38827@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38828 "Detected Parity Error on %s\n",
38829 pci_name(dev));
38830
38831- atomic_inc(&pci_parity_count);
38832+ atomic_inc_unchecked(&pci_parity_count);
38833 }
38834 }
38835
38836@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38837 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
38838 "Signaled System Error on %s\n",
38839 pci_name(dev));
38840- atomic_inc(&pci_nonparity_count);
38841+ atomic_inc_unchecked(&pci_nonparity_count);
38842 }
38843
38844 if (status & (PCI_STATUS_PARITY)) {
38845@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38846 "Master Data Parity Error on "
38847 "%s\n", pci_name(dev));
38848
38849- atomic_inc(&pci_parity_count);
38850+ atomic_inc_unchecked(&pci_parity_count);
38851 }
38852
38853 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38854@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38855 "Detected Parity Error on %s\n",
38856 pci_name(dev));
38857
38858- atomic_inc(&pci_parity_count);
38859+ atomic_inc_unchecked(&pci_parity_count);
38860 }
38861 }
38862 }
38863@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
38864 if (!check_pci_errors)
38865 return;
38866
38867- before_count = atomic_read(&pci_parity_count);
38868+ before_count = atomic_read_unchecked(&pci_parity_count);
38869
38870 /* scan all PCI devices looking for a Parity Error on devices and
38871 * bridges.
38872@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
38873 /* Only if operator has selected panic on PCI Error */
38874 if (edac_pci_get_panic_on_pe()) {
38875 /* If the count is different 'after' from 'before' */
38876- if (before_count != atomic_read(&pci_parity_count))
38877+ if (before_count != atomic_read_unchecked(&pci_parity_count))
38878 panic("EDAC: PCI Parity Error");
38879 }
38880 }
38881diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
38882index 51b7e3a..aa8a3e8 100644
38883--- a/drivers/edac/mce_amd.h
38884+++ b/drivers/edac/mce_amd.h
38885@@ -77,7 +77,7 @@ struct amd_decoder_ops {
38886 bool (*mc0_mce)(u16, u8);
38887 bool (*mc1_mce)(u16, u8);
38888 bool (*mc2_mce)(u16, u8);
38889-};
38890+} __no_const;
38891
38892 void amd_report_gart_errors(bool);
38893 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
38894diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
38895index 57ea7f4..af06b76 100644
38896--- a/drivers/firewire/core-card.c
38897+++ b/drivers/firewire/core-card.c
38898@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
38899 const struct fw_card_driver *driver,
38900 struct device *device)
38901 {
38902- static atomic_t index = ATOMIC_INIT(-1);
38903+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
38904
38905- card->index = atomic_inc_return(&index);
38906+ card->index = atomic_inc_return_unchecked(&index);
38907 card->driver = driver;
38908 card->device = device;
38909 card->current_tlabel = 0;
38910@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
38911
38912 void fw_core_remove_card(struct fw_card *card)
38913 {
38914- struct fw_card_driver dummy_driver = dummy_driver_template;
38915+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
38916
38917 card->driver->update_phy_reg(card, 4,
38918 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
38919diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
38920index de4aa40..49ab1f2 100644
38921--- a/drivers/firewire/core-device.c
38922+++ b/drivers/firewire/core-device.c
38923@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
38924 struct config_rom_attribute {
38925 struct device_attribute attr;
38926 u32 key;
38927-};
38928+} __do_const;
38929
38930 static ssize_t show_immediate(struct device *dev,
38931 struct device_attribute *dattr, char *buf)
38932diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
38933index 0e79951..b180217 100644
38934--- a/drivers/firewire/core-transaction.c
38935+++ b/drivers/firewire/core-transaction.c
38936@@ -38,6 +38,7 @@
38937 #include <linux/timer.h>
38938 #include <linux/types.h>
38939 #include <linux/workqueue.h>
38940+#include <linux/sched.h>
38941
38942 #include <asm/byteorder.h>
38943
38944diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
38945index 515a42c..5ecf3ba 100644
38946--- a/drivers/firewire/core.h
38947+++ b/drivers/firewire/core.h
38948@@ -111,6 +111,7 @@ struct fw_card_driver {
38949
38950 int (*stop_iso)(struct fw_iso_context *ctx);
38951 };
38952+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
38953
38954 void fw_card_initialize(struct fw_card *card,
38955 const struct fw_card_driver *driver, struct device *device);
38956diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
38957index 94a58a0..f5eba42 100644
38958--- a/drivers/firmware/dmi-id.c
38959+++ b/drivers/firmware/dmi-id.c
38960@@ -16,7 +16,7 @@
38961 struct dmi_device_attribute{
38962 struct device_attribute dev_attr;
38963 int field;
38964-};
38965+} __do_const;
38966 #define to_dmi_dev_attr(_dev_attr) \
38967 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
38968
38969diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
38970index c7e81ff..94a7401 100644
38971--- a/drivers/firmware/dmi_scan.c
38972+++ b/drivers/firmware/dmi_scan.c
38973@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
38974 if (buf == NULL)
38975 return -1;
38976
38977- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
38978+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
38979
38980 iounmap(buf);
38981 return 0;
38982diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
38983index 1491dd4..aa910db 100644
38984--- a/drivers/firmware/efi/cper.c
38985+++ b/drivers/firmware/efi/cper.c
38986@@ -41,12 +41,12 @@
38987 */
38988 u64 cper_next_record_id(void)
38989 {
38990- static atomic64_t seq;
38991+ static atomic64_unchecked_t seq;
38992
38993- if (!atomic64_read(&seq))
38994- atomic64_set(&seq, ((u64)get_seconds()) << 32);
38995+ if (!atomic64_read_unchecked(&seq))
38996+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
38997
38998- return atomic64_inc_return(&seq);
38999+ return atomic64_inc_return_unchecked(&seq);
39000 }
39001 EXPORT_SYMBOL_GPL(cper_next_record_id);
39002
39003diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
39004index 2e2fbde..7676c8b 100644
39005--- a/drivers/firmware/efi/efi.c
39006+++ b/drivers/firmware/efi/efi.c
39007@@ -81,14 +81,16 @@ static struct attribute_group efi_subsys_attr_group = {
39008 };
39009
39010 static struct efivars generic_efivars;
39011-static struct efivar_operations generic_ops;
39012+static efivar_operations_no_const generic_ops __read_only;
39013
39014 static int generic_ops_register(void)
39015 {
39016- generic_ops.get_variable = efi.get_variable;
39017- generic_ops.set_variable = efi.set_variable;
39018- generic_ops.get_next_variable = efi.get_next_variable;
39019- generic_ops.query_variable_store = efi_query_variable_store;
39020+ pax_open_kernel();
39021+ *(void **)&generic_ops.get_variable = efi.get_variable;
39022+ *(void **)&generic_ops.set_variable = efi.set_variable;
39023+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
39024+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
39025+ pax_close_kernel();
39026
39027 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
39028 }
39029diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
39030index 3dc2482..7bd2f61 100644
39031--- a/drivers/firmware/efi/efivars.c
39032+++ b/drivers/firmware/efi/efivars.c
39033@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
39034 static int
39035 create_efivars_bin_attributes(void)
39036 {
39037- struct bin_attribute *attr;
39038+ bin_attribute_no_const *attr;
39039 int error;
39040
39041 /* new_var */
39042diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
39043index 2a90ba6..07f3733 100644
39044--- a/drivers/firmware/google/memconsole.c
39045+++ b/drivers/firmware/google/memconsole.c
39046@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
39047 if (!found_memconsole())
39048 return -ENODEV;
39049
39050- memconsole_bin_attr.size = memconsole_length;
39051+ pax_open_kernel();
39052+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
39053+ pax_close_kernel();
39054
39055 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
39056
39057diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
39058index 814addb..0937d7f 100644
39059--- a/drivers/gpio/gpio-ich.c
39060+++ b/drivers/gpio/gpio-ich.c
39061@@ -71,7 +71,7 @@ struct ichx_desc {
39062 /* Some chipsets have quirks, let these use their own request/get */
39063 int (*request)(struct gpio_chip *chip, unsigned offset);
39064 int (*get)(struct gpio_chip *chip, unsigned offset);
39065-};
39066+} __do_const;
39067
39068 static struct {
39069 spinlock_t lock;
39070diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
39071index 9902732..64b62dd 100644
39072--- a/drivers/gpio/gpio-vr41xx.c
39073+++ b/drivers/gpio/gpio-vr41xx.c
39074@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
39075 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
39076 maskl, pendl, maskh, pendh);
39077
39078- atomic_inc(&irq_err_count);
39079+ atomic_inc_unchecked(&irq_err_count);
39080
39081 return -EINVAL;
39082 }
39083diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
39084index 62d0ff3..073dbf3 100644
39085--- a/drivers/gpu/drm/armada/armada_drv.c
39086+++ b/drivers/gpu/drm/armada/armada_drv.c
39087@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
39088 {
39089 struct armada_private *priv = dev->dev_private;
39090
39091- /*
39092- * Yes, we really must jump through these hoops just to store a
39093- * _pointer_ to something into the kfifo. This is utterly insane
39094- * and idiotic, because it kfifo requires the _data_ pointed to by
39095- * the pointer const, not the pointer itself. Not only that, but
39096- * you have to pass a pointer _to_ the pointer you want stored.
39097- */
39098- const struct drm_framebuffer *silly_api_alert = fb;
39099- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
39100+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
39101 schedule_work(&priv->fb_unref_work);
39102 }
39103
39104diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
39105index d6cf77c..2842146 100644
39106--- a/drivers/gpu/drm/drm_crtc.c
39107+++ b/drivers/gpu/drm/drm_crtc.c
39108@@ -3102,7 +3102,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
39109 goto done;
39110 }
39111
39112- if (copy_to_user(&enum_ptr[copied].name,
39113+ if (copy_to_user(enum_ptr[copied].name,
39114 &prop_enum->name, DRM_PROP_NAME_LEN)) {
39115 ret = -EFAULT;
39116 goto done;
39117diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
39118index 01361ab..891e821 100644
39119--- a/drivers/gpu/drm/drm_crtc_helper.c
39120+++ b/drivers/gpu/drm/drm_crtc_helper.c
39121@@ -338,7 +338,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
39122 struct drm_crtc *tmp;
39123 int crtc_mask = 1;
39124
39125- WARN(!crtc, "checking null crtc?\n");
39126+ BUG_ON(!crtc);
39127
39128 dev = crtc->dev;
39129
39130diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
39131index d9137e4..69b73a0 100644
39132--- a/drivers/gpu/drm/drm_drv.c
39133+++ b/drivers/gpu/drm/drm_drv.c
39134@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
39135 /**
39136 * Copy and IOCTL return string to user space
39137 */
39138-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
39139+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
39140 {
39141 int len;
39142
39143@@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp,
39144 struct drm_file *file_priv = filp->private_data;
39145 struct drm_device *dev;
39146 const struct drm_ioctl_desc *ioctl = NULL;
39147- drm_ioctl_t *func;
39148+ drm_ioctl_no_const_t func;
39149 unsigned int nr = DRM_IOCTL_NR(cmd);
39150 int retcode = -EINVAL;
39151 char stack_kdata[128];
39152diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
39153index c5b929c..8a3b8be 100644
39154--- a/drivers/gpu/drm/drm_fops.c
39155+++ b/drivers/gpu/drm/drm_fops.c
39156@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
39157 if (drm_device_is_unplugged(dev))
39158 return -ENODEV;
39159
39160- if (!dev->open_count++)
39161+ if (local_inc_return(&dev->open_count) == 1)
39162 need_setup = 1;
39163 mutex_lock(&dev->struct_mutex);
39164 old_imapping = inode->i_mapping;
39165@@ -127,7 +127,7 @@ err_undo:
39166 iput(container_of(dev->dev_mapping, struct inode, i_data));
39167 dev->dev_mapping = old_mapping;
39168 mutex_unlock(&dev->struct_mutex);
39169- dev->open_count--;
39170+ local_dec(&dev->open_count);
39171 return retcode;
39172 }
39173 EXPORT_SYMBOL(drm_open);
39174@@ -467,7 +467,7 @@ int drm_release(struct inode *inode, struct file *filp)
39175
39176 mutex_lock(&drm_global_mutex);
39177
39178- DRM_DEBUG("open_count = %d\n", dev->open_count);
39179+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
39180
39181 if (dev->driver->preclose)
39182 dev->driver->preclose(dev, file_priv);
39183@@ -476,10 +476,10 @@ int drm_release(struct inode *inode, struct file *filp)
39184 * Begin inline drm_release
39185 */
39186
39187- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
39188+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
39189 task_pid_nr(current),
39190 (long)old_encode_dev(file_priv->minor->device),
39191- dev->open_count);
39192+ local_read(&dev->open_count));
39193
39194 /* Release any auth tokens that might point to this file_priv,
39195 (do that under the drm_global_mutex) */
39196@@ -577,7 +577,7 @@ int drm_release(struct inode *inode, struct file *filp)
39197 * End inline drm_release
39198 */
39199
39200- if (!--dev->open_count) {
39201+ if (local_dec_and_test(&dev->open_count)) {
39202 if (atomic_read(&dev->ioctl_count)) {
39203 DRM_ERROR("Device busy: %d\n",
39204 atomic_read(&dev->ioctl_count));
39205diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
39206index 3d2e91c..d31c4c9 100644
39207--- a/drivers/gpu/drm/drm_global.c
39208+++ b/drivers/gpu/drm/drm_global.c
39209@@ -36,7 +36,7 @@
39210 struct drm_global_item {
39211 struct mutex mutex;
39212 void *object;
39213- int refcount;
39214+ atomic_t refcount;
39215 };
39216
39217 static struct drm_global_item glob[DRM_GLOBAL_NUM];
39218@@ -49,7 +49,7 @@ void drm_global_init(void)
39219 struct drm_global_item *item = &glob[i];
39220 mutex_init(&item->mutex);
39221 item->object = NULL;
39222- item->refcount = 0;
39223+ atomic_set(&item->refcount, 0);
39224 }
39225 }
39226
39227@@ -59,7 +59,7 @@ void drm_global_release(void)
39228 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
39229 struct drm_global_item *item = &glob[i];
39230 BUG_ON(item->object != NULL);
39231- BUG_ON(item->refcount != 0);
39232+ BUG_ON(atomic_read(&item->refcount) != 0);
39233 }
39234 }
39235
39236@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39237 struct drm_global_item *item = &glob[ref->global_type];
39238
39239 mutex_lock(&item->mutex);
39240- if (item->refcount == 0) {
39241+ if (atomic_read(&item->refcount) == 0) {
39242 item->object = kzalloc(ref->size, GFP_KERNEL);
39243 if (unlikely(item->object == NULL)) {
39244 ret = -ENOMEM;
39245@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39246 goto out_err;
39247
39248 }
39249- ++item->refcount;
39250+ atomic_inc(&item->refcount);
39251 ref->object = item->object;
39252 mutex_unlock(&item->mutex);
39253 return 0;
39254@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
39255 struct drm_global_item *item = &glob[ref->global_type];
39256
39257 mutex_lock(&item->mutex);
39258- BUG_ON(item->refcount == 0);
39259+ BUG_ON(atomic_read(&item->refcount) == 0);
39260 BUG_ON(ref->object != item->object);
39261- if (--item->refcount == 0) {
39262+ if (atomic_dec_and_test(&item->refcount)) {
39263 ref->release(ref);
39264 item->object = NULL;
39265 }
39266diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
39267index 7d5a152..d7186da 100644
39268--- a/drivers/gpu/drm/drm_info.c
39269+++ b/drivers/gpu/drm/drm_info.c
39270@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
39271 struct drm_local_map *map;
39272 struct drm_map_list *r_list;
39273
39274- /* Hardcoded from _DRM_FRAME_BUFFER,
39275- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
39276- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
39277- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
39278+ static const char * const types[] = {
39279+ [_DRM_FRAME_BUFFER] = "FB",
39280+ [_DRM_REGISTERS] = "REG",
39281+ [_DRM_SHM] = "SHM",
39282+ [_DRM_AGP] = "AGP",
39283+ [_DRM_SCATTER_GATHER] = "SG",
39284+ [_DRM_CONSISTENT] = "PCI",
39285+ [_DRM_GEM] = "GEM" };
39286 const char *type;
39287 int i;
39288
39289@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
39290 map = r_list->map;
39291 if (!map)
39292 continue;
39293- if (map->type < 0 || map->type > 5)
39294+ if (map->type >= ARRAY_SIZE(types))
39295 type = "??";
39296 else
39297 type = types[map->type];
39298@@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, void *data)
39299 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
39300 vma->vm_flags & VM_LOCKED ? 'l' : '-',
39301 vma->vm_flags & VM_IO ? 'i' : '-',
39302+#ifdef CONFIG_GRKERNSEC_HIDESYM
39303+ 0);
39304+#else
39305 vma->vm_pgoff);
39306+#endif
39307
39308 #if defined(__i386__)
39309 pgprot = pgprot_val(vma->vm_page_prot);
39310diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
39311index 2f4c434..dd12cd2 100644
39312--- a/drivers/gpu/drm/drm_ioc32.c
39313+++ b/drivers/gpu/drm/drm_ioc32.c
39314@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
39315 request = compat_alloc_user_space(nbytes);
39316 if (!access_ok(VERIFY_WRITE, request, nbytes))
39317 return -EFAULT;
39318- list = (struct drm_buf_desc *) (request + 1);
39319+ list = (struct drm_buf_desc __user *) (request + 1);
39320
39321 if (__put_user(count, &request->count)
39322 || __put_user(list, &request->list))
39323@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
39324 request = compat_alloc_user_space(nbytes);
39325 if (!access_ok(VERIFY_WRITE, request, nbytes))
39326 return -EFAULT;
39327- list = (struct drm_buf_pub *) (request + 1);
39328+ list = (struct drm_buf_pub __user *) (request + 1);
39329
39330 if (__put_user(count, &request->count)
39331 || __put_user(list, &request->list))
39332@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
39333 return 0;
39334 }
39335
39336-drm_ioctl_compat_t *drm_compat_ioctls[] = {
39337+drm_ioctl_compat_t drm_compat_ioctls[] = {
39338 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
39339 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
39340 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
39341@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
39342 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39343 {
39344 unsigned int nr = DRM_IOCTL_NR(cmd);
39345- drm_ioctl_compat_t *fn;
39346 int ret;
39347
39348 /* Assume that ioctls without an explicit compat routine will just
39349@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39350 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
39351 return drm_ioctl(filp, cmd, arg);
39352
39353- fn = drm_compat_ioctls[nr];
39354-
39355- if (fn != NULL)
39356- ret = (*fn) (filp, cmd, arg);
39357+ if (drm_compat_ioctls[nr] != NULL)
39358+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
39359 else
39360 ret = drm_ioctl(filp, cmd, arg);
39361
39362diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
39363index 66dd3a0..3bed6c4 100644
39364--- a/drivers/gpu/drm/drm_stub.c
39365+++ b/drivers/gpu/drm/drm_stub.c
39366@@ -403,7 +403,7 @@ void drm_unplug_dev(struct drm_device *dev)
39367
39368 drm_device_set_unplugged(dev);
39369
39370- if (dev->open_count == 0) {
39371+ if (local_read(&dev->open_count) == 0) {
39372 drm_put_dev(dev);
39373 }
39374 mutex_unlock(&drm_global_mutex);
39375diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
39376index c22c309..ae758c3 100644
39377--- a/drivers/gpu/drm/drm_sysfs.c
39378+++ b/drivers/gpu/drm/drm_sysfs.c
39379@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
39380 */
39381 int drm_sysfs_device_add(struct drm_minor *minor)
39382 {
39383- char *minor_str;
39384+ const char *minor_str;
39385 int r;
39386
39387 if (minor->type == DRM_MINOR_CONTROL)
39388diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
39389index d4d16ed..8fb0b51 100644
39390--- a/drivers/gpu/drm/i810/i810_drv.h
39391+++ b/drivers/gpu/drm/i810/i810_drv.h
39392@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
39393 int page_flipping;
39394
39395 wait_queue_head_t irq_queue;
39396- atomic_t irq_received;
39397- atomic_t irq_emitted;
39398+ atomic_unchecked_t irq_received;
39399+ atomic_unchecked_t irq_emitted;
39400
39401 int front_offset;
39402 } drm_i810_private_t;
39403diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
39404index 6ed45a9..eb6dc41 100644
39405--- a/drivers/gpu/drm/i915/i915_debugfs.c
39406+++ b/drivers/gpu/drm/i915/i915_debugfs.c
39407@@ -702,7 +702,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
39408 I915_READ(GTIMR));
39409 }
39410 seq_printf(m, "Interrupts received: %d\n",
39411- atomic_read(&dev_priv->irq_received));
39412+ atomic_read_unchecked(&dev_priv->irq_received));
39413 for_each_ring(ring, dev_priv, i) {
39414 if (INTEL_INFO(dev)->gen >= 6) {
39415 seq_printf(m,
39416diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
39417index 5c64842..f14bdf8 100644
39418--- a/drivers/gpu/drm/i915/i915_dma.c
39419+++ b/drivers/gpu/drm/i915/i915_dma.c
39420@@ -1271,7 +1271,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
39421 bool can_switch;
39422
39423 spin_lock(&dev->count_lock);
39424- can_switch = (dev->open_count == 0);
39425+ can_switch = (local_read(&dev->open_count) == 0);
39426 spin_unlock(&dev->count_lock);
39427 return can_switch;
39428 }
39429diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
39430index 90fcccb..b8aabc9 100644
39431--- a/drivers/gpu/drm/i915/i915_drv.h
39432+++ b/drivers/gpu/drm/i915/i915_drv.h
39433@@ -1325,7 +1325,7 @@ typedef struct drm_i915_private {
39434 drm_dma_handle_t *status_page_dmah;
39435 struct resource mch_res;
39436
39437- atomic_t irq_received;
39438+ atomic_unchecked_t irq_received;
39439
39440 /* protects the irq masks */
39441 spinlock_t irq_lock;
39442diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
39443index a3ba9a8..ee52ddd 100644
39444--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
39445+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
39446@@ -861,9 +861,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
39447
39448 static int
39449 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
39450- int count)
39451+ unsigned int count)
39452 {
39453- int i;
39454+ unsigned int i;
39455 unsigned relocs_total = 0;
39456 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
39457
39458diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
39459index 3c59584..500f2e9 100644
39460--- a/drivers/gpu/drm/i915/i915_ioc32.c
39461+++ b/drivers/gpu/drm/i915/i915_ioc32.c
39462@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
39463 (unsigned long)request);
39464 }
39465
39466-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39467+static drm_ioctl_compat_t i915_compat_ioctls[] = {
39468 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
39469 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
39470 [DRM_I915_GETPARAM] = compat_i915_getparam,
39471@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39472 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39473 {
39474 unsigned int nr = DRM_IOCTL_NR(cmd);
39475- drm_ioctl_compat_t *fn = NULL;
39476 int ret;
39477
39478 if (nr < DRM_COMMAND_BASE)
39479 return drm_compat_ioctl(filp, cmd, arg);
39480
39481- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
39482- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39483-
39484- if (fn != NULL)
39485+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
39486+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39487 ret = (*fn) (filp, cmd, arg);
39488- else
39489+ } else
39490 ret = drm_ioctl(filp, cmd, arg);
39491
39492 return ret;
39493diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
39494index f13d5ed..8e6f36d 100644
39495--- a/drivers/gpu/drm/i915/i915_irq.c
39496+++ b/drivers/gpu/drm/i915/i915_irq.c
39497@@ -1420,7 +1420,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
39498 int pipe;
39499 u32 pipe_stats[I915_MAX_PIPES];
39500
39501- atomic_inc(&dev_priv->irq_received);
39502+ atomic_inc_unchecked(&dev_priv->irq_received);
39503
39504 while (true) {
39505 iir = I915_READ(VLV_IIR);
39506@@ -1730,7 +1730,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
39507 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
39508 irqreturn_t ret = IRQ_NONE;
39509
39510- atomic_inc(&dev_priv->irq_received);
39511+ atomic_inc_unchecked(&dev_priv->irq_received);
39512
39513 /* We get interrupts on unclaimed registers, so check for this before we
39514 * do any I915_{READ,WRITE}. */
39515@@ -1800,7 +1800,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
39516 uint32_t tmp = 0;
39517 enum pipe pipe;
39518
39519- atomic_inc(&dev_priv->irq_received);
39520+ atomic_inc_unchecked(&dev_priv->irq_received);
39521
39522 master_ctl = I915_READ(GEN8_MASTER_IRQ);
39523 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
39524@@ -2624,7 +2624,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
39525 {
39526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39527
39528- atomic_set(&dev_priv->irq_received, 0);
39529+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39530
39531 I915_WRITE(HWSTAM, 0xeffe);
39532
39533@@ -2642,7 +2642,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
39534 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39535 int pipe;
39536
39537- atomic_set(&dev_priv->irq_received, 0);
39538+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39539
39540 /* VLV magic */
39541 I915_WRITE(VLV_IMR, 0);
39542@@ -2673,7 +2673,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
39543 struct drm_i915_private *dev_priv = dev->dev_private;
39544 int pipe;
39545
39546- atomic_set(&dev_priv->irq_received, 0);
39547+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39548
39549 I915_WRITE(GEN8_MASTER_IRQ, 0);
39550 POSTING_READ(GEN8_MASTER_IRQ);
39551@@ -2999,7 +2999,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
39552 if (!dev_priv)
39553 return;
39554
39555- atomic_set(&dev_priv->irq_received, 0);
39556+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39557
39558 I915_WRITE(GEN8_MASTER_IRQ, 0);
39559
39560@@ -3093,7 +3093,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
39561 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39562 int pipe;
39563
39564- atomic_set(&dev_priv->irq_received, 0);
39565+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39566
39567 for_each_pipe(pipe)
39568 I915_WRITE(PIPESTAT(pipe), 0);
39569@@ -3179,7 +3179,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
39570 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39571 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39572
39573- atomic_inc(&dev_priv->irq_received);
39574+ atomic_inc_unchecked(&dev_priv->irq_received);
39575
39576 iir = I915_READ16(IIR);
39577 if (iir == 0)
39578@@ -3254,7 +3254,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
39579 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39580 int pipe;
39581
39582- atomic_set(&dev_priv->irq_received, 0);
39583+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39584
39585 if (I915_HAS_HOTPLUG(dev)) {
39586 I915_WRITE(PORT_HOTPLUG_EN, 0);
39587@@ -3361,7 +3361,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
39588 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39589 int pipe, ret = IRQ_NONE;
39590
39591- atomic_inc(&dev_priv->irq_received);
39592+ atomic_inc_unchecked(&dev_priv->irq_received);
39593
39594 iir = I915_READ(IIR);
39595 do {
39596@@ -3488,7 +3488,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
39597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39598 int pipe;
39599
39600- atomic_set(&dev_priv->irq_received, 0);
39601+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39602
39603 I915_WRITE(PORT_HOTPLUG_EN, 0);
39604 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
39605@@ -3604,7 +3604,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
39606 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39607 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39608
39609- atomic_inc(&dev_priv->irq_received);
39610+ atomic_inc_unchecked(&dev_priv->irq_received);
39611
39612 iir = I915_READ(IIR);
39613
39614diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
39615index 2bde35d..529646c 100644
39616--- a/drivers/gpu/drm/i915/intel_display.c
39617+++ b/drivers/gpu/drm/i915/intel_display.c
39618@@ -10492,13 +10492,13 @@ struct intel_quirk {
39619 int subsystem_vendor;
39620 int subsystem_device;
39621 void (*hook)(struct drm_device *dev);
39622-};
39623+} __do_const;
39624
39625 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
39626 struct intel_dmi_quirk {
39627 void (*hook)(struct drm_device *dev);
39628 const struct dmi_system_id (*dmi_id_list)[];
39629-};
39630+} __do_const;
39631
39632 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39633 {
39634@@ -10506,18 +10506,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39635 return 1;
39636 }
39637
39638-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39639+static const struct dmi_system_id intel_dmi_quirks_table[] = {
39640 {
39641- .dmi_id_list = &(const struct dmi_system_id[]) {
39642- {
39643- .callback = intel_dmi_reverse_brightness,
39644- .ident = "NCR Corporation",
39645- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39646- DMI_MATCH(DMI_PRODUCT_NAME, ""),
39647- },
39648- },
39649- { } /* terminating entry */
39650+ .callback = intel_dmi_reverse_brightness,
39651+ .ident = "NCR Corporation",
39652+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39653+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
39654 },
39655+ },
39656+ { } /* terminating entry */
39657+};
39658+
39659+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39660+ {
39661+ .dmi_id_list = &intel_dmi_quirks_table,
39662 .hook = quirk_invert_brightness,
39663 },
39664 };
39665diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
39666index ca4bc54..ee598a2 100644
39667--- a/drivers/gpu/drm/mga/mga_drv.h
39668+++ b/drivers/gpu/drm/mga/mga_drv.h
39669@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
39670 u32 clear_cmd;
39671 u32 maccess;
39672
39673- atomic_t vbl_received; /**< Number of vblanks received. */
39674+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
39675 wait_queue_head_t fence_queue;
39676- atomic_t last_fence_retired;
39677+ atomic_unchecked_t last_fence_retired;
39678 u32 next_fence_to_post;
39679
39680 unsigned int fb_cpp;
39681diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
39682index 709e90d..89a1c0d 100644
39683--- a/drivers/gpu/drm/mga/mga_ioc32.c
39684+++ b/drivers/gpu/drm/mga/mga_ioc32.c
39685@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
39686 return 0;
39687 }
39688
39689-drm_ioctl_compat_t *mga_compat_ioctls[] = {
39690+drm_ioctl_compat_t mga_compat_ioctls[] = {
39691 [DRM_MGA_INIT] = compat_mga_init,
39692 [DRM_MGA_GETPARAM] = compat_mga_getparam,
39693 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
39694@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
39695 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39696 {
39697 unsigned int nr = DRM_IOCTL_NR(cmd);
39698- drm_ioctl_compat_t *fn = NULL;
39699 int ret;
39700
39701 if (nr < DRM_COMMAND_BASE)
39702 return drm_compat_ioctl(filp, cmd, arg);
39703
39704- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
39705- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39706-
39707- if (fn != NULL)
39708+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
39709+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39710 ret = (*fn) (filp, cmd, arg);
39711- else
39712+ } else
39713 ret = drm_ioctl(filp, cmd, arg);
39714
39715 return ret;
39716diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
39717index 2b0ceb8..517e99e 100644
39718--- a/drivers/gpu/drm/mga/mga_irq.c
39719+++ b/drivers/gpu/drm/mga/mga_irq.c
39720@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
39721 if (crtc != 0)
39722 return 0;
39723
39724- return atomic_read(&dev_priv->vbl_received);
39725+ return atomic_read_unchecked(&dev_priv->vbl_received);
39726 }
39727
39728
39729@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39730 /* VBLANK interrupt */
39731 if (status & MGA_VLINEPEN) {
39732 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
39733- atomic_inc(&dev_priv->vbl_received);
39734+ atomic_inc_unchecked(&dev_priv->vbl_received);
39735 drm_handle_vblank(dev, 0);
39736 handled = 1;
39737 }
39738@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39739 if ((prim_start & ~0x03) != (prim_end & ~0x03))
39740 MGA_WRITE(MGA_PRIMEND, prim_end);
39741
39742- atomic_inc(&dev_priv->last_fence_retired);
39743+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
39744 DRM_WAKEUP(&dev_priv->fence_queue);
39745 handled = 1;
39746 }
39747@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
39748 * using fences.
39749 */
39750 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
39751- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
39752+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
39753 - *sequence) <= (1 << 23)));
39754
39755 *sequence = cur_fence;
39756diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
39757index 4c3feaa..26391ce 100644
39758--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
39759+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
39760@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
39761 struct bit_table {
39762 const char id;
39763 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
39764-};
39765+} __no_const;
39766
39767 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
39768
39769diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
39770index 4b0fb6c..67667a9 100644
39771--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
39772+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
39773@@ -96,7 +96,6 @@ struct nouveau_drm {
39774 struct drm_global_reference mem_global_ref;
39775 struct ttm_bo_global_ref bo_global_ref;
39776 struct ttm_bo_device bdev;
39777- atomic_t validate_sequence;
39778 int (*move)(struct nouveau_channel *,
39779 struct ttm_buffer_object *,
39780 struct ttm_mem_reg *, struct ttm_mem_reg *);
39781diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39782index c1a7e5a..38b8539 100644
39783--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39784+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39785@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
39786 unsigned long arg)
39787 {
39788 unsigned int nr = DRM_IOCTL_NR(cmd);
39789- drm_ioctl_compat_t *fn = NULL;
39790+ drm_ioctl_compat_t fn = NULL;
39791 int ret;
39792
39793 if (nr < DRM_COMMAND_BASE)
39794diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
39795index 19e3757..ad16478 100644
39796--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
39797+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
39798@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
39799 }
39800
39801 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
39802- nouveau_vram_manager_init,
39803- nouveau_vram_manager_fini,
39804- nouveau_vram_manager_new,
39805- nouveau_vram_manager_del,
39806- nouveau_vram_manager_debug
39807+ .init = nouveau_vram_manager_init,
39808+ .takedown = nouveau_vram_manager_fini,
39809+ .get_node = nouveau_vram_manager_new,
39810+ .put_node = nouveau_vram_manager_del,
39811+ .debug = nouveau_vram_manager_debug
39812 };
39813
39814 static int
39815@@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
39816 }
39817
39818 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
39819- nouveau_gart_manager_init,
39820- nouveau_gart_manager_fini,
39821- nouveau_gart_manager_new,
39822- nouveau_gart_manager_del,
39823- nouveau_gart_manager_debug
39824+ .init = nouveau_gart_manager_init,
39825+ .takedown = nouveau_gart_manager_fini,
39826+ .get_node = nouveau_gart_manager_new,
39827+ .put_node = nouveau_gart_manager_del,
39828+ .debug = nouveau_gart_manager_debug
39829 };
39830
39831 #include <core/subdev/vm/nv04.h>
39832@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
39833 }
39834
39835 const struct ttm_mem_type_manager_func nv04_gart_manager = {
39836- nv04_gart_manager_init,
39837- nv04_gart_manager_fini,
39838- nv04_gart_manager_new,
39839- nv04_gart_manager_del,
39840- nv04_gart_manager_debug
39841+ .init = nv04_gart_manager_init,
39842+ .takedown = nv04_gart_manager_fini,
39843+ .get_node = nv04_gart_manager_new,
39844+ .put_node = nv04_gart_manager_del,
39845+ .debug = nv04_gart_manager_debug
39846 };
39847
39848 int
39849diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
39850index 81638d7..2e45854 100644
39851--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
39852+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
39853@@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
39854 bool can_switch;
39855
39856 spin_lock(&dev->count_lock);
39857- can_switch = (dev->open_count == 0);
39858+ can_switch = (local_read(&dev->open_count) == 0);
39859 spin_unlock(&dev->count_lock);
39860 return can_switch;
39861 }
39862diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
39863index eb89653..613cf71 100644
39864--- a/drivers/gpu/drm/qxl/qxl_cmd.c
39865+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
39866@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
39867 int ret;
39868
39869 mutex_lock(&qdev->async_io_mutex);
39870- irq_num = atomic_read(&qdev->irq_received_io_cmd);
39871+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39872 if (qdev->last_sent_io_cmd > irq_num) {
39873 if (intr)
39874 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39875- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39876+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39877 else
39878 ret = wait_event_timeout(qdev->io_cmd_event,
39879- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39880+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39881 /* 0 is timeout, just bail the "hw" has gone away */
39882 if (ret <= 0)
39883 goto out;
39884- irq_num = atomic_read(&qdev->irq_received_io_cmd);
39885+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39886 }
39887 outb(val, addr);
39888 qdev->last_sent_io_cmd = irq_num + 1;
39889 if (intr)
39890 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39891- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39892+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39893 else
39894 ret = wait_event_timeout(qdev->io_cmd_event,
39895- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39896+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39897 out:
39898 if (ret > 0)
39899 ret = 0;
39900diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
39901index c3c2bbd..bc3c0fb 100644
39902--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
39903+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
39904@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
39905 struct drm_info_node *node = (struct drm_info_node *) m->private;
39906 struct qxl_device *qdev = node->minor->dev->dev_private;
39907
39908- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
39909- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
39910- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
39911- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
39912+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
39913+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
39914+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
39915+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
39916 seq_printf(m, "%d\n", qdev->irq_received_error);
39917 return 0;
39918 }
39919diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
39920index 7bda32f..dd98fc5 100644
39921--- a/drivers/gpu/drm/qxl/qxl_drv.h
39922+++ b/drivers/gpu/drm/qxl/qxl_drv.h
39923@@ -290,10 +290,10 @@ struct qxl_device {
39924 unsigned int last_sent_io_cmd;
39925
39926 /* interrupt handling */
39927- atomic_t irq_received;
39928- atomic_t irq_received_display;
39929- atomic_t irq_received_cursor;
39930- atomic_t irq_received_io_cmd;
39931+ atomic_unchecked_t irq_received;
39932+ atomic_unchecked_t irq_received_display;
39933+ atomic_unchecked_t irq_received_cursor;
39934+ atomic_unchecked_t irq_received_io_cmd;
39935 unsigned irq_received_error;
39936 wait_queue_head_t display_event;
39937 wait_queue_head_t cursor_event;
39938diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
39939index 7b95c75..9cffb4f 100644
39940--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
39941+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
39942@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
39943
39944 /* TODO copy slow path code from i915 */
39945 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
39946- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
39947+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
39948
39949 {
39950 struct qxl_drawable *draw = fb_cmd;
39951@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
39952 struct drm_qxl_reloc reloc;
39953
39954 if (DRM_COPY_FROM_USER(&reloc,
39955- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
39956+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
39957 sizeof(reloc))) {
39958 ret = -EFAULT;
39959 goto out_free_bos;
39960@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
39961 struct drm_qxl_command *commands =
39962 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
39963
39964- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
39965+ if (DRM_COPY_FROM_USER(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
39966 sizeof(user_cmd)))
39967 return -EFAULT;
39968
39969diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
39970index 21393dc..329f3a9 100644
39971--- a/drivers/gpu/drm/qxl/qxl_irq.c
39972+++ b/drivers/gpu/drm/qxl/qxl_irq.c
39973@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
39974
39975 pending = xchg(&qdev->ram_header->int_pending, 0);
39976
39977- atomic_inc(&qdev->irq_received);
39978+ atomic_inc_unchecked(&qdev->irq_received);
39979
39980 if (pending & QXL_INTERRUPT_DISPLAY) {
39981- atomic_inc(&qdev->irq_received_display);
39982+ atomic_inc_unchecked(&qdev->irq_received_display);
39983 wake_up_all(&qdev->display_event);
39984 qxl_queue_garbage_collect(qdev, false);
39985 }
39986 if (pending & QXL_INTERRUPT_CURSOR) {
39987- atomic_inc(&qdev->irq_received_cursor);
39988+ atomic_inc_unchecked(&qdev->irq_received_cursor);
39989 wake_up_all(&qdev->cursor_event);
39990 }
39991 if (pending & QXL_INTERRUPT_IO_CMD) {
39992- atomic_inc(&qdev->irq_received_io_cmd);
39993+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
39994 wake_up_all(&qdev->io_cmd_event);
39995 }
39996 if (pending & QXL_INTERRUPT_ERROR) {
39997@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
39998 init_waitqueue_head(&qdev->io_cmd_event);
39999 INIT_WORK(&qdev->client_monitors_config_work,
40000 qxl_client_monitors_config_work_func);
40001- atomic_set(&qdev->irq_received, 0);
40002- atomic_set(&qdev->irq_received_display, 0);
40003- atomic_set(&qdev->irq_received_cursor, 0);
40004- atomic_set(&qdev->irq_received_io_cmd, 0);
40005+ atomic_set_unchecked(&qdev->irq_received, 0);
40006+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40007+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40008+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40009 qdev->irq_received_error = 0;
40010 ret = drm_irq_install(qdev->ddev);
40011 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40012diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40013index c7e7e65..7dddd4d 100644
40014--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40015+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40016@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40017 }
40018 }
40019
40020-static struct vm_operations_struct qxl_ttm_vm_ops;
40021+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40022 static const struct vm_operations_struct *ttm_vm_ops;
40023
40024 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40025@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40026 return r;
40027 if (unlikely(ttm_vm_ops == NULL)) {
40028 ttm_vm_ops = vma->vm_ops;
40029+ pax_open_kernel();
40030 qxl_ttm_vm_ops = *ttm_vm_ops;
40031 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40032+ pax_close_kernel();
40033 }
40034 vma->vm_ops = &qxl_ttm_vm_ops;
40035 return 0;
40036@@ -560,25 +562,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40037 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40038 {
40039 #if defined(CONFIG_DEBUG_FS)
40040- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40041- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40042- unsigned i;
40043+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40044+ {
40045+ .name = "qxl_mem_mm",
40046+ .show = &qxl_mm_dump_table,
40047+ },
40048+ {
40049+ .name = "qxl_surf_mm",
40050+ .show = &qxl_mm_dump_table,
40051+ }
40052+ };
40053
40054- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40055- if (i == 0)
40056- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40057- else
40058- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40059- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40060- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40061- qxl_mem_types_list[i].driver_features = 0;
40062- if (i == 0)
40063- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40064- else
40065- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40066+ pax_open_kernel();
40067+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40068+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40069+ pax_close_kernel();
40070
40071- }
40072- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
40073+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
40074 #else
40075 return 0;
40076 #endif
40077diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
40078index c451257..0ad2134 100644
40079--- a/drivers/gpu/drm/r128/r128_cce.c
40080+++ b/drivers/gpu/drm/r128/r128_cce.c
40081@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
40082
40083 /* GH: Simple idle check.
40084 */
40085- atomic_set(&dev_priv->idle_count, 0);
40086+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40087
40088 /* We don't support anything other than bus-mastering ring mode,
40089 * but the ring can be in either AGP or PCI space for the ring
40090diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
40091index 56eb5e3..c4ec43d 100644
40092--- a/drivers/gpu/drm/r128/r128_drv.h
40093+++ b/drivers/gpu/drm/r128/r128_drv.h
40094@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
40095 int is_pci;
40096 unsigned long cce_buffers_offset;
40097
40098- atomic_t idle_count;
40099+ atomic_unchecked_t idle_count;
40100
40101 int page_flipping;
40102 int current_page;
40103 u32 crtc_offset;
40104 u32 crtc_offset_cntl;
40105
40106- atomic_t vbl_received;
40107+ atomic_unchecked_t vbl_received;
40108
40109 u32 color_fmt;
40110 unsigned int front_offset;
40111diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
40112index a954c54..9cc595c 100644
40113--- a/drivers/gpu/drm/r128/r128_ioc32.c
40114+++ b/drivers/gpu/drm/r128/r128_ioc32.c
40115@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
40116 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
40117 }
40118
40119-drm_ioctl_compat_t *r128_compat_ioctls[] = {
40120+drm_ioctl_compat_t r128_compat_ioctls[] = {
40121 [DRM_R128_INIT] = compat_r128_init,
40122 [DRM_R128_DEPTH] = compat_r128_depth,
40123 [DRM_R128_STIPPLE] = compat_r128_stipple,
40124@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
40125 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40126 {
40127 unsigned int nr = DRM_IOCTL_NR(cmd);
40128- drm_ioctl_compat_t *fn = NULL;
40129 int ret;
40130
40131 if (nr < DRM_COMMAND_BASE)
40132 return drm_compat_ioctl(filp, cmd, arg);
40133
40134- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
40135- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40136-
40137- if (fn != NULL)
40138+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
40139+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40140 ret = (*fn) (filp, cmd, arg);
40141- else
40142+ } else
40143 ret = drm_ioctl(filp, cmd, arg);
40144
40145 return ret;
40146diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
40147index 2ea4f09..d391371 100644
40148--- a/drivers/gpu/drm/r128/r128_irq.c
40149+++ b/drivers/gpu/drm/r128/r128_irq.c
40150@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
40151 if (crtc != 0)
40152 return 0;
40153
40154- return atomic_read(&dev_priv->vbl_received);
40155+ return atomic_read_unchecked(&dev_priv->vbl_received);
40156 }
40157
40158 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40159@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40160 /* VBLANK interrupt */
40161 if (status & R128_CRTC_VBLANK_INT) {
40162 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
40163- atomic_inc(&dev_priv->vbl_received);
40164+ atomic_inc_unchecked(&dev_priv->vbl_received);
40165 drm_handle_vblank(dev, 0);
40166 return IRQ_HANDLED;
40167 }
40168diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
40169index 01dd9ae..6352f04 100644
40170--- a/drivers/gpu/drm/r128/r128_state.c
40171+++ b/drivers/gpu/drm/r128/r128_state.c
40172@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
40173
40174 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
40175 {
40176- if (atomic_read(&dev_priv->idle_count) == 0)
40177+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
40178 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
40179 else
40180- atomic_set(&dev_priv->idle_count, 0);
40181+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40182 }
40183
40184 #endif
40185diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
40186index af85299..ed9ac8d 100644
40187--- a/drivers/gpu/drm/radeon/mkregtable.c
40188+++ b/drivers/gpu/drm/radeon/mkregtable.c
40189@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
40190 regex_t mask_rex;
40191 regmatch_t match[4];
40192 char buf[1024];
40193- size_t end;
40194+ long end;
40195 int len;
40196 int done = 0;
40197 int r;
40198 unsigned o;
40199 struct offset *offset;
40200 char last_reg_s[10];
40201- int last_reg;
40202+ unsigned long last_reg;
40203
40204 if (regcomp
40205 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
40206diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
40207index 39b033b..6efc056 100644
40208--- a/drivers/gpu/drm/radeon/radeon_device.c
40209+++ b/drivers/gpu/drm/radeon/radeon_device.c
40210@@ -1120,7 +1120,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
40211 bool can_switch;
40212
40213 spin_lock(&dev->count_lock);
40214- can_switch = (dev->open_count == 0);
40215+ can_switch = (local_read(&dev->open_count) == 0);
40216 spin_unlock(&dev->count_lock);
40217 return can_switch;
40218 }
40219diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
40220index 00e0d44..08381a4 100644
40221--- a/drivers/gpu/drm/radeon/radeon_drv.h
40222+++ b/drivers/gpu/drm/radeon/radeon_drv.h
40223@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
40224
40225 /* SW interrupt */
40226 wait_queue_head_t swi_queue;
40227- atomic_t swi_emitted;
40228+ atomic_unchecked_t swi_emitted;
40229 int vblank_crtc;
40230 uint32_t irq_enable_reg;
40231 uint32_t r500_disp_irq_reg;
40232diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
40233index bdb0f93..5ff558f 100644
40234--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
40235+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
40236@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40237 request = compat_alloc_user_space(sizeof(*request));
40238 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
40239 || __put_user(req32.param, &request->param)
40240- || __put_user((void __user *)(unsigned long)req32.value,
40241+ || __put_user((unsigned long)req32.value,
40242 &request->value))
40243 return -EFAULT;
40244
40245@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40246 #define compat_radeon_cp_setparam NULL
40247 #endif /* X86_64 || IA64 */
40248
40249-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40250+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
40251 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
40252 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
40253 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
40254@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40255 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40256 {
40257 unsigned int nr = DRM_IOCTL_NR(cmd);
40258- drm_ioctl_compat_t *fn = NULL;
40259 int ret;
40260
40261 if (nr < DRM_COMMAND_BASE)
40262 return drm_compat_ioctl(filp, cmd, arg);
40263
40264- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
40265- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40266-
40267- if (fn != NULL)
40268+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
40269+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40270 ret = (*fn) (filp, cmd, arg);
40271- else
40272+ } else
40273 ret = drm_ioctl(filp, cmd, arg);
40274
40275 return ret;
40276diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
40277index 8d68e97..9dcfed8 100644
40278--- a/drivers/gpu/drm/radeon/radeon_irq.c
40279+++ b/drivers/gpu/drm/radeon/radeon_irq.c
40280@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
40281 unsigned int ret;
40282 RING_LOCALS;
40283
40284- atomic_inc(&dev_priv->swi_emitted);
40285- ret = atomic_read(&dev_priv->swi_emitted);
40286+ atomic_inc_unchecked(&dev_priv->swi_emitted);
40287+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
40288
40289 BEGIN_RING(4);
40290 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
40291@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
40292 drm_radeon_private_t *dev_priv =
40293 (drm_radeon_private_t *) dev->dev_private;
40294
40295- atomic_set(&dev_priv->swi_emitted, 0);
40296+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
40297 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
40298
40299 dev->max_vblank_count = 0x001fffff;
40300diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
40301index 4d20910..6726b6d 100644
40302--- a/drivers/gpu/drm/radeon/radeon_state.c
40303+++ b/drivers/gpu/drm/radeon/radeon_state.c
40304@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
40305 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
40306 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
40307
40308- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40309+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40310 sarea_priv->nbox * sizeof(depth_boxes[0])))
40311 return -EFAULT;
40312
40313@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
40314 {
40315 drm_radeon_private_t *dev_priv = dev->dev_private;
40316 drm_radeon_getparam_t *param = data;
40317- int value;
40318+ int value = 0;
40319
40320 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
40321
40322diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
40323index 71245d6..94c556d 100644
40324--- a/drivers/gpu/drm/radeon/radeon_ttm.c
40325+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
40326@@ -784,7 +784,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
40327 man->size = size >> PAGE_SHIFT;
40328 }
40329
40330-static struct vm_operations_struct radeon_ttm_vm_ops;
40331+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
40332 static const struct vm_operations_struct *ttm_vm_ops = NULL;
40333
40334 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40335@@ -825,8 +825,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
40336 }
40337 if (unlikely(ttm_vm_ops == NULL)) {
40338 ttm_vm_ops = vma->vm_ops;
40339+ pax_open_kernel();
40340 radeon_ttm_vm_ops = *ttm_vm_ops;
40341 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
40342+ pax_close_kernel();
40343 }
40344 vma->vm_ops = &radeon_ttm_vm_ops;
40345 return 0;
40346@@ -855,38 +857,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
40347 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
40348 {
40349 #if defined(CONFIG_DEBUG_FS)
40350- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
40351- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
40352- unsigned i;
40353+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
40354+ {
40355+ .name = "radeon_vram_mm",
40356+ .show = &radeon_mm_dump_table,
40357+ },
40358+ {
40359+ .name = "radeon_gtt_mm",
40360+ .show = &radeon_mm_dump_table,
40361+ },
40362+ {
40363+ .name = "ttm_page_pool",
40364+ .show = &ttm_page_alloc_debugfs,
40365+ },
40366+ {
40367+ .name = "ttm_dma_page_pool",
40368+ .show = &ttm_dma_page_alloc_debugfs,
40369+ },
40370+ };
40371+ unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
40372
40373- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
40374- if (i == 0)
40375- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
40376- else
40377- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
40378- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40379- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
40380- radeon_mem_types_list[i].driver_features = 0;
40381- if (i == 0)
40382- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
40383- else
40384- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
40385-
40386- }
40387- /* Add ttm page pool to debugfs */
40388- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
40389- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40390- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
40391- radeon_mem_types_list[i].driver_features = 0;
40392- radeon_mem_types_list[i++].data = NULL;
40393+ pax_open_kernel();
40394+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
40395+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
40396+ pax_close_kernel();
40397 #ifdef CONFIG_SWIOTLB
40398- if (swiotlb_nr_tbl()) {
40399- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
40400- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40401- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
40402- radeon_mem_types_list[i].driver_features = 0;
40403- radeon_mem_types_list[i++].data = NULL;
40404- }
40405+ if (swiotlb_nr_tbl())
40406+ i++;
40407 #endif
40408 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
40409
40410diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
40411index ae1cb31..5b5b6b7c 100644
40412--- a/drivers/gpu/drm/tegra/dc.c
40413+++ b/drivers/gpu/drm/tegra/dc.c
40414@@ -1064,7 +1064,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
40415 }
40416
40417 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
40418- dc->debugfs_files[i].data = dc;
40419+ *(void **)&dc->debugfs_files[i].data = dc;
40420
40421 err = drm_debugfs_create_files(dc->debugfs_files,
40422 ARRAY_SIZE(debugfs_files),
40423diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
40424index 0cd9bc2..9759be4 100644
40425--- a/drivers/gpu/drm/tegra/hdmi.c
40426+++ b/drivers/gpu/drm/tegra/hdmi.c
40427@@ -57,7 +57,7 @@ struct tegra_hdmi {
40428 bool stereo;
40429 bool dvi;
40430
40431- struct drm_info_list *debugfs_files;
40432+ drm_info_list_no_const *debugfs_files;
40433 struct drm_minor *minor;
40434 struct dentry *debugfs;
40435 };
40436diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
40437index c58eba33..83c2728 100644
40438--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
40439+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
40440@@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
40441 }
40442
40443 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
40444- ttm_bo_man_init,
40445- ttm_bo_man_takedown,
40446- ttm_bo_man_get_node,
40447- ttm_bo_man_put_node,
40448- ttm_bo_man_debug
40449+ .init = ttm_bo_man_init,
40450+ .takedown = ttm_bo_man_takedown,
40451+ .get_node = ttm_bo_man_get_node,
40452+ .put_node = ttm_bo_man_put_node,
40453+ .debug = ttm_bo_man_debug
40454 };
40455 EXPORT_SYMBOL(ttm_bo_manager_func);
40456diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
40457index dbc2def..0a9f710 100644
40458--- a/drivers/gpu/drm/ttm/ttm_memory.c
40459+++ b/drivers/gpu/drm/ttm/ttm_memory.c
40460@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
40461 zone->glob = glob;
40462 glob->zone_kernel = zone;
40463 ret = kobject_init_and_add(
40464- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
40465+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
40466 if (unlikely(ret != 0)) {
40467 kobject_put(&zone->kobj);
40468 return ret;
40469@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
40470 zone->glob = glob;
40471 glob->zone_dma32 = zone;
40472 ret = kobject_init_and_add(
40473- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
40474+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
40475 if (unlikely(ret != 0)) {
40476 kobject_put(&zone->kobj);
40477 return ret;
40478diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
40479index 863bef9..cba15cf 100644
40480--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
40481+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
40482@@ -391,9 +391,9 @@ out:
40483 static unsigned long
40484 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
40485 {
40486- static atomic_t start_pool = ATOMIC_INIT(0);
40487+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
40488 unsigned i;
40489- unsigned pool_offset = atomic_add_return(1, &start_pool);
40490+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
40491 struct ttm_page_pool *pool;
40492 int shrink_pages = sc->nr_to_scan;
40493 unsigned long freed = 0;
40494diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
40495index 97e9d61..bf23c461 100644
40496--- a/drivers/gpu/drm/udl/udl_fb.c
40497+++ b/drivers/gpu/drm/udl/udl_fb.c
40498@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
40499 fb_deferred_io_cleanup(info);
40500 kfree(info->fbdefio);
40501 info->fbdefio = NULL;
40502- info->fbops->fb_mmap = udl_fb_mmap;
40503 }
40504
40505 pr_warn("released /dev/fb%d user=%d count=%d\n",
40506diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
40507index a811ef2..ff99b05 100644
40508--- a/drivers/gpu/drm/via/via_drv.h
40509+++ b/drivers/gpu/drm/via/via_drv.h
40510@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
40511 typedef uint32_t maskarray_t[5];
40512
40513 typedef struct drm_via_irq {
40514- atomic_t irq_received;
40515+ atomic_unchecked_t irq_received;
40516 uint32_t pending_mask;
40517 uint32_t enable_mask;
40518 wait_queue_head_t irq_queue;
40519@@ -75,7 +75,7 @@ typedef struct drm_via_private {
40520 struct timeval last_vblank;
40521 int last_vblank_valid;
40522 unsigned usec_per_vblank;
40523- atomic_t vbl_received;
40524+ atomic_unchecked_t vbl_received;
40525 drm_via_state_t hc_state;
40526 char pci_buf[VIA_PCI_BUF_SIZE];
40527 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
40528diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
40529index ac98964..5dbf512 100644
40530--- a/drivers/gpu/drm/via/via_irq.c
40531+++ b/drivers/gpu/drm/via/via_irq.c
40532@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
40533 if (crtc != 0)
40534 return 0;
40535
40536- return atomic_read(&dev_priv->vbl_received);
40537+ return atomic_read_unchecked(&dev_priv->vbl_received);
40538 }
40539
40540 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40541@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40542
40543 status = VIA_READ(VIA_REG_INTERRUPT);
40544 if (status & VIA_IRQ_VBLANK_PENDING) {
40545- atomic_inc(&dev_priv->vbl_received);
40546- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
40547+ atomic_inc_unchecked(&dev_priv->vbl_received);
40548+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
40549 do_gettimeofday(&cur_vblank);
40550 if (dev_priv->last_vblank_valid) {
40551 dev_priv->usec_per_vblank =
40552@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40553 dev_priv->last_vblank = cur_vblank;
40554 dev_priv->last_vblank_valid = 1;
40555 }
40556- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
40557+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
40558 DRM_DEBUG("US per vblank is: %u\n",
40559 dev_priv->usec_per_vblank);
40560 }
40561@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40562
40563 for (i = 0; i < dev_priv->num_irqs; ++i) {
40564 if (status & cur_irq->pending_mask) {
40565- atomic_inc(&cur_irq->irq_received);
40566+ atomic_inc_unchecked(&cur_irq->irq_received);
40567 DRM_WAKEUP(&cur_irq->irq_queue);
40568 handled = 1;
40569 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
40570@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
40571 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
40572 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
40573 masks[irq][4]));
40574- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
40575+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
40576 } else {
40577 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
40578 (((cur_irq_sequence =
40579- atomic_read(&cur_irq->irq_received)) -
40580+ atomic_read_unchecked(&cur_irq->irq_received)) -
40581 *sequence) <= (1 << 23)));
40582 }
40583 *sequence = cur_irq_sequence;
40584@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
40585 }
40586
40587 for (i = 0; i < dev_priv->num_irqs; ++i) {
40588- atomic_set(&cur_irq->irq_received, 0);
40589+ atomic_set_unchecked(&cur_irq->irq_received, 0);
40590 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
40591 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
40592 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
40593@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
40594 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
40595 case VIA_IRQ_RELATIVE:
40596 irqwait->request.sequence +=
40597- atomic_read(&cur_irq->irq_received);
40598+ atomic_read_unchecked(&cur_irq->irq_received);
40599 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
40600 case VIA_IRQ_ABSOLUTE:
40601 break;
40602diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
40603index 20890ad..699e4f2 100644
40604--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
40605+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
40606@@ -342,7 +342,7 @@ struct vmw_private {
40607 * Fencing and IRQs.
40608 */
40609
40610- atomic_t marker_seq;
40611+ atomic_unchecked_t marker_seq;
40612 wait_queue_head_t fence_queue;
40613 wait_queue_head_t fifo_queue;
40614 int fence_queue_waiters; /* Protected by hw_mutex */
40615diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40616index 3eb1486..0a47ee9 100644
40617--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40618+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40619@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
40620 (unsigned int) min,
40621 (unsigned int) fifo->capabilities);
40622
40623- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40624+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40625 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
40626 vmw_marker_queue_init(&fifo->marker_queue);
40627 return vmw_fifo_send_fence(dev_priv, &dummy);
40628@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
40629 if (reserveable)
40630 iowrite32(bytes, fifo_mem +
40631 SVGA_FIFO_RESERVED);
40632- return fifo_mem + (next_cmd >> 2);
40633+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
40634 } else {
40635 need_bounce = true;
40636 }
40637@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40638
40639 fm = vmw_fifo_reserve(dev_priv, bytes);
40640 if (unlikely(fm == NULL)) {
40641- *seqno = atomic_read(&dev_priv->marker_seq);
40642+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40643 ret = -ENOMEM;
40644 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
40645 false, 3*HZ);
40646@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40647 }
40648
40649 do {
40650- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
40651+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
40652 } while (*seqno == 0);
40653
40654 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
40655diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
40656index c5c054a..46f0548 100644
40657--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
40658+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
40659@@ -153,9 +153,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
40660 }
40661
40662 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
40663- vmw_gmrid_man_init,
40664- vmw_gmrid_man_takedown,
40665- vmw_gmrid_man_get_node,
40666- vmw_gmrid_man_put_node,
40667- vmw_gmrid_man_debug
40668+ .init = vmw_gmrid_man_init,
40669+ .takedown = vmw_gmrid_man_takedown,
40670+ .get_node = vmw_gmrid_man_get_node,
40671+ .put_node = vmw_gmrid_man_put_node,
40672+ .debug = vmw_gmrid_man_debug
40673 };
40674diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40675index 45d5b5a..f3f5e4e 100644
40676--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40677+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40678@@ -141,7 +141,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
40679 int ret;
40680
40681 num_clips = arg->num_clips;
40682- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40683+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40684
40685 if (unlikely(num_clips == 0))
40686 return 0;
40687@@ -225,7 +225,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
40688 int ret;
40689
40690 num_clips = arg->num_clips;
40691- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40692+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40693
40694 if (unlikely(num_clips == 0))
40695 return 0;
40696diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40697index 4640adb..e1384ed 100644
40698--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40699+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40700@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
40701 * emitted. Then the fence is stale and signaled.
40702 */
40703
40704- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
40705+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
40706 > VMW_FENCE_WRAP);
40707
40708 return ret;
40709@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
40710
40711 if (fifo_idle)
40712 down_read(&fifo_state->rwsem);
40713- signal_seq = atomic_read(&dev_priv->marker_seq);
40714+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
40715 ret = 0;
40716
40717 for (;;) {
40718diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40719index 8a8725c2..afed796 100644
40720--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40721+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40722@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
40723 while (!vmw_lag_lt(queue, us)) {
40724 spin_lock(&queue->lock);
40725 if (list_empty(&queue->head))
40726- seqno = atomic_read(&dev_priv->marker_seq);
40727+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40728 else {
40729 marker = list_first_entry(&queue->head,
40730 struct vmw_marker, head);
40731diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
40732index ec0ae2d..dc0780b 100644
40733--- a/drivers/gpu/vga/vga_switcheroo.c
40734+++ b/drivers/gpu/vga/vga_switcheroo.c
40735@@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
40736
40737 /* this version is for the case where the power switch is separate
40738 to the device being powered down. */
40739-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
40740+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
40741 {
40742 /* copy over all the bus versions */
40743 if (dev->bus && dev->bus->pm) {
40744@@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
40745 return ret;
40746 }
40747
40748-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
40749+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
40750 {
40751 /* copy over all the bus versions */
40752 if (dev->bus && dev->bus->pm) {
40753diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
40754index 253fe23..0dfec5f 100644
40755--- a/drivers/hid/hid-core.c
40756+++ b/drivers/hid/hid-core.c
40757@@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
40758
40759 int hid_add_device(struct hid_device *hdev)
40760 {
40761- static atomic_t id = ATOMIC_INIT(0);
40762+ static atomic_unchecked_t id = ATOMIC_INIT(0);
40763 int ret;
40764
40765 if (WARN_ON(hdev->status & HID_STAT_ADDED))
40766@@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hdev)
40767 /* XXX hack, any other cleaner solution after the driver core
40768 * is converted to allow more than 20 bytes as the device name? */
40769 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
40770- hdev->vendor, hdev->product, atomic_inc_return(&id));
40771+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
40772
40773 hid_debug_register(hdev, dev_name(&hdev->dev));
40774 ret = device_add(&hdev->dev);
40775diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
40776index c13fb5b..55a3802 100644
40777--- a/drivers/hid/hid-wiimote-debug.c
40778+++ b/drivers/hid/hid-wiimote-debug.c
40779@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
40780 else if (size == 0)
40781 return -EIO;
40782
40783- if (copy_to_user(u, buf, size))
40784+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
40785 return -EFAULT;
40786
40787 *off += size;
40788diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
40789index cedc6da..2c3da2a 100644
40790--- a/drivers/hid/uhid.c
40791+++ b/drivers/hid/uhid.c
40792@@ -47,7 +47,7 @@ struct uhid_device {
40793 struct mutex report_lock;
40794 wait_queue_head_t report_wait;
40795 atomic_t report_done;
40796- atomic_t report_id;
40797+ atomic_unchecked_t report_id;
40798 struct uhid_event report_buf;
40799 };
40800
40801@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
40802
40803 spin_lock_irqsave(&uhid->qlock, flags);
40804 ev->type = UHID_FEATURE;
40805- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
40806+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
40807 ev->u.feature.rnum = rnum;
40808 ev->u.feature.rtype = report_type;
40809
40810@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
40811 spin_lock_irqsave(&uhid->qlock, flags);
40812
40813 /* id for old report; drop it silently */
40814- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
40815+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
40816 goto unlock;
40817 if (atomic_read(&uhid->report_done))
40818 goto unlock;
40819diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
40820index cea623c..73011b0 100644
40821--- a/drivers/hv/channel.c
40822+++ b/drivers/hv/channel.c
40823@@ -362,8 +362,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
40824 int ret = 0;
40825 int t;
40826
40827- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
40828- atomic_inc(&vmbus_connection.next_gpadl_handle);
40829+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
40830+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
40831
40832 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
40833 if (ret)
40834diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
40835index f0c5e07..399256e 100644
40836--- a/drivers/hv/hv.c
40837+++ b/drivers/hv/hv.c
40838@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
40839 u64 output_address = (output) ? virt_to_phys(output) : 0;
40840 u32 output_address_hi = output_address >> 32;
40841 u32 output_address_lo = output_address & 0xFFFFFFFF;
40842- void *hypercall_page = hv_context.hypercall_page;
40843+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
40844
40845 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
40846 "=a"(hv_status_lo) : "d" (control_hi),
40847diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
40848index 7e17a54..a50a33d 100644
40849--- a/drivers/hv/hv_balloon.c
40850+++ b/drivers/hv/hv_balloon.c
40851@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
40852
40853 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
40854 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
40855-static atomic_t trans_id = ATOMIC_INIT(0);
40856+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
40857
40858 static int dm_ring_size = (5 * PAGE_SIZE);
40859
40860@@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
40861 pr_info("Memory hot add failed\n");
40862
40863 dm->state = DM_INITIALIZED;
40864- resp.hdr.trans_id = atomic_inc_return(&trans_id);
40865+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40866 vmbus_sendpacket(dm->dev->channel, &resp,
40867 sizeof(struct dm_hot_add_response),
40868 (unsigned long)NULL,
40869@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
40870 memset(&status, 0, sizeof(struct dm_status));
40871 status.hdr.type = DM_STATUS_REPORT;
40872 status.hdr.size = sizeof(struct dm_status);
40873- status.hdr.trans_id = atomic_inc_return(&trans_id);
40874+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40875
40876 /*
40877 * The host expects the guest to report free memory.
40878@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
40879 * send the status. This can happen if we were interrupted
40880 * after we picked our transaction ID.
40881 */
40882- if (status.hdr.trans_id != atomic_read(&trans_id))
40883+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
40884 return;
40885
40886 vmbus_sendpacket(dm->dev->channel, &status,
40887@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
40888 */
40889
40890 do {
40891- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
40892+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40893 ret = vmbus_sendpacket(dm_device.dev->channel,
40894 bl_resp,
40895 bl_resp->hdr.size,
40896@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
40897
40898 memset(&resp, 0, sizeof(struct dm_unballoon_response));
40899 resp.hdr.type = DM_UNBALLOON_RESPONSE;
40900- resp.hdr.trans_id = atomic_inc_return(&trans_id);
40901+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40902 resp.hdr.size = sizeof(struct dm_unballoon_response);
40903
40904 vmbus_sendpacket(dm_device.dev->channel, &resp,
40905@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
40906 memset(&version_req, 0, sizeof(struct dm_version_request));
40907 version_req.hdr.type = DM_VERSION_REQUEST;
40908 version_req.hdr.size = sizeof(struct dm_version_request);
40909- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40910+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40911 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
40912 version_req.is_last_attempt = 1;
40913
40914@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
40915 memset(&version_req, 0, sizeof(struct dm_version_request));
40916 version_req.hdr.type = DM_VERSION_REQUEST;
40917 version_req.hdr.size = sizeof(struct dm_version_request);
40918- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40919+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40920 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
40921 version_req.is_last_attempt = 0;
40922
40923@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
40924 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
40925 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
40926 cap_msg.hdr.size = sizeof(struct dm_capabilities);
40927- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
40928+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40929
40930 cap_msg.caps.cap_bits.balloon = 1;
40931 cap_msg.caps.cap_bits.hot_add = 1;
40932diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
40933index e055176..c22ff1f 100644
40934--- a/drivers/hv/hyperv_vmbus.h
40935+++ b/drivers/hv/hyperv_vmbus.h
40936@@ -602,7 +602,7 @@ enum vmbus_connect_state {
40937 struct vmbus_connection {
40938 enum vmbus_connect_state conn_state;
40939
40940- atomic_t next_gpadl_handle;
40941+ atomic_unchecked_t next_gpadl_handle;
40942
40943 /*
40944 * Represents channel interrupts. Each bit position represents a
40945diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40946index 48aad4f..c768fb2 100644
40947--- a/drivers/hv/vmbus_drv.c
40948+++ b/drivers/hv/vmbus_drv.c
40949@@ -846,10 +846,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40950 {
40951 int ret = 0;
40952
40953- static atomic_t device_num = ATOMIC_INIT(0);
40954+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40955
40956 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40957- atomic_inc_return(&device_num));
40958+ atomic_inc_return_unchecked(&device_num));
40959
40960 child_device_obj->device.bus = &hv_bus;
40961 child_device_obj->device.parent = &hv_acpi_dev->dev;
40962diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
40963index 6a34f7f..aa4c3a6 100644
40964--- a/drivers/hwmon/acpi_power_meter.c
40965+++ b/drivers/hwmon/acpi_power_meter.c
40966@@ -117,7 +117,7 @@ struct sensor_template {
40967 struct device_attribute *devattr,
40968 const char *buf, size_t count);
40969 int index;
40970-};
40971+} __do_const;
40972
40973 /* Averaging interval */
40974 static int update_avg_interval(struct acpi_power_meter_resource *resource)
40975@@ -632,7 +632,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
40976 struct sensor_template *attrs)
40977 {
40978 struct device *dev = &resource->acpi_dev->dev;
40979- struct sensor_device_attribute *sensors =
40980+ sensor_device_attribute_no_const *sensors =
40981 &resource->sensors[resource->num_sensors];
40982 int res = 0;
40983
40984diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
40985index 3288f13..71cfb4e 100644
40986--- a/drivers/hwmon/applesmc.c
40987+++ b/drivers/hwmon/applesmc.c
40988@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
40989 {
40990 struct applesmc_node_group *grp;
40991 struct applesmc_dev_attr *node;
40992- struct attribute *attr;
40993+ attribute_no_const *attr;
40994 int ret, i;
40995
40996 for (grp = groups; grp->format; grp++) {
40997diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
40998index dafc63c..4abb96c 100644
40999--- a/drivers/hwmon/asus_atk0110.c
41000+++ b/drivers/hwmon/asus_atk0110.c
41001@@ -151,10 +151,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
41002 struct atk_sensor_data {
41003 struct list_head list;
41004 struct atk_data *data;
41005- struct device_attribute label_attr;
41006- struct device_attribute input_attr;
41007- struct device_attribute limit1_attr;
41008- struct device_attribute limit2_attr;
41009+ device_attribute_no_const label_attr;
41010+ device_attribute_no_const input_attr;
41011+ device_attribute_no_const limit1_attr;
41012+ device_attribute_no_const limit2_attr;
41013 char label_attr_name[ATTR_NAME_SIZE];
41014 char input_attr_name[ATTR_NAME_SIZE];
41015 char limit1_attr_name[ATTR_NAME_SIZE];
41016@@ -274,7 +274,7 @@ static ssize_t atk_name_show(struct device *dev,
41017 static struct device_attribute atk_name_attr =
41018 __ATTR(name, 0444, atk_name_show, NULL);
41019
41020-static void atk_init_attribute(struct device_attribute *attr, char *name,
41021+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
41022 sysfs_show_func show)
41023 {
41024 sysfs_attr_init(&attr->attr);
41025diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
41026index 9425098..7646cc5 100644
41027--- a/drivers/hwmon/coretemp.c
41028+++ b/drivers/hwmon/coretemp.c
41029@@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
41030 return NOTIFY_OK;
41031 }
41032
41033-static struct notifier_block coretemp_cpu_notifier __refdata = {
41034+static struct notifier_block coretemp_cpu_notifier = {
41035 .notifier_call = coretemp_cpu_callback,
41036 };
41037
41038diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
41039index 632f1dc..57e6a58 100644
41040--- a/drivers/hwmon/ibmaem.c
41041+++ b/drivers/hwmon/ibmaem.c
41042@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
41043 struct aem_rw_sensor_template *rw)
41044 {
41045 struct device *dev = &data->pdev->dev;
41046- struct sensor_device_attribute *sensors = data->sensors;
41047+ sensor_device_attribute_no_const *sensors = data->sensors;
41048 int err;
41049
41050 /* Set up read-only sensors */
41051diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
41052index 708081b..fe2d4ab 100644
41053--- a/drivers/hwmon/iio_hwmon.c
41054+++ b/drivers/hwmon/iio_hwmon.c
41055@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
41056 {
41057 struct device *dev = &pdev->dev;
41058 struct iio_hwmon_state *st;
41059- struct sensor_device_attribute *a;
41060+ sensor_device_attribute_no_const *a;
41061 int ret, i;
41062 int in_i = 1, temp_i = 1, curr_i = 1;
41063 enum iio_chan_type type;
41064diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
41065index cf811c1..4c17110 100644
41066--- a/drivers/hwmon/nct6775.c
41067+++ b/drivers/hwmon/nct6775.c
41068@@ -944,10 +944,10 @@ static struct attribute_group *
41069 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
41070 int repeat)
41071 {
41072- struct attribute_group *group;
41073+ attribute_group_no_const *group;
41074 struct sensor_device_attr_u *su;
41075- struct sensor_device_attribute *a;
41076- struct sensor_device_attribute_2 *a2;
41077+ sensor_device_attribute_no_const *a;
41078+ sensor_device_attribute_2_no_const *a2;
41079 struct attribute **attrs;
41080 struct sensor_device_template **t;
41081 int i, count;
41082diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
41083index 3cbf66e..8c5cc2a 100644
41084--- a/drivers/hwmon/pmbus/pmbus_core.c
41085+++ b/drivers/hwmon/pmbus/pmbus_core.c
41086@@ -782,7 +782,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
41087 return 0;
41088 }
41089
41090-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41091+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
41092 const char *name,
41093 umode_t mode,
41094 ssize_t (*show)(struct device *dev,
41095@@ -799,7 +799,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41096 dev_attr->store = store;
41097 }
41098
41099-static void pmbus_attr_init(struct sensor_device_attribute *a,
41100+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
41101 const char *name,
41102 umode_t mode,
41103 ssize_t (*show)(struct device *dev,
41104@@ -821,7 +821,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
41105 u16 reg, u8 mask)
41106 {
41107 struct pmbus_boolean *boolean;
41108- struct sensor_device_attribute *a;
41109+ sensor_device_attribute_no_const *a;
41110
41111 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
41112 if (!boolean)
41113@@ -846,7 +846,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
41114 bool update, bool readonly)
41115 {
41116 struct pmbus_sensor *sensor;
41117- struct device_attribute *a;
41118+ device_attribute_no_const *a;
41119
41120 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
41121 if (!sensor)
41122@@ -877,7 +877,7 @@ static int pmbus_add_label(struct pmbus_data *data,
41123 const char *lstring, int index)
41124 {
41125 struct pmbus_label *label;
41126- struct device_attribute *a;
41127+ device_attribute_no_const *a;
41128
41129 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
41130 if (!label)
41131diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
41132index 97cd45a..ac54d8b 100644
41133--- a/drivers/hwmon/sht15.c
41134+++ b/drivers/hwmon/sht15.c
41135@@ -169,7 +169,7 @@ struct sht15_data {
41136 int supply_uv;
41137 bool supply_uv_valid;
41138 struct work_struct update_supply_work;
41139- atomic_t interrupt_handled;
41140+ atomic_unchecked_t interrupt_handled;
41141 };
41142
41143 /**
41144@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
41145 ret = gpio_direction_input(data->pdata->gpio_data);
41146 if (ret)
41147 return ret;
41148- atomic_set(&data->interrupt_handled, 0);
41149+ atomic_set_unchecked(&data->interrupt_handled, 0);
41150
41151 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41152 if (gpio_get_value(data->pdata->gpio_data) == 0) {
41153 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
41154 /* Only relevant if the interrupt hasn't occurred. */
41155- if (!atomic_read(&data->interrupt_handled))
41156+ if (!atomic_read_unchecked(&data->interrupt_handled))
41157 schedule_work(&data->read_work);
41158 }
41159 ret = wait_event_timeout(data->wait_queue,
41160@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
41161
41162 /* First disable the interrupt */
41163 disable_irq_nosync(irq);
41164- atomic_inc(&data->interrupt_handled);
41165+ atomic_inc_unchecked(&data->interrupt_handled);
41166 /* Then schedule a reading work struct */
41167 if (data->state != SHT15_READING_NOTHING)
41168 schedule_work(&data->read_work);
41169@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
41170 * If not, then start the interrupt again - care here as could
41171 * have gone low in meantime so verify it hasn't!
41172 */
41173- atomic_set(&data->interrupt_handled, 0);
41174+ atomic_set_unchecked(&data->interrupt_handled, 0);
41175 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41176 /* If still not occurred or another handler was scheduled */
41177 if (gpio_get_value(data->pdata->gpio_data)
41178- || atomic_read(&data->interrupt_handled))
41179+ || atomic_read_unchecked(&data->interrupt_handled))
41180 return;
41181 }
41182
41183diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
41184index 38944e9..ae9e5ed 100644
41185--- a/drivers/hwmon/via-cputemp.c
41186+++ b/drivers/hwmon/via-cputemp.c
41187@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
41188 return NOTIFY_OK;
41189 }
41190
41191-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
41192+static struct notifier_block via_cputemp_cpu_notifier = {
41193 .notifier_call = via_cputemp_cpu_callback,
41194 };
41195
41196diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
41197index 07f01ac..d79ad3d 100644
41198--- a/drivers/i2c/busses/i2c-amd756-s4882.c
41199+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
41200@@ -43,7 +43,7 @@
41201 extern struct i2c_adapter amd756_smbus;
41202
41203 static struct i2c_adapter *s4882_adapter;
41204-static struct i2c_algorithm *s4882_algo;
41205+static i2c_algorithm_no_const *s4882_algo;
41206
41207 /* Wrapper access functions for multiplexed SMBus */
41208 static DEFINE_MUTEX(amd756_lock);
41209diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
41210index 721f7eb..0fd2a09 100644
41211--- a/drivers/i2c/busses/i2c-diolan-u2c.c
41212+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
41213@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
41214 /* usb layer */
41215
41216 /* Send command to device, and get response. */
41217-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41218+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41219 {
41220 int ret = 0;
41221 int actual;
41222diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
41223index 2ca268d..c6acbdf 100644
41224--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
41225+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
41226@@ -41,7 +41,7 @@
41227 extern struct i2c_adapter *nforce2_smbus;
41228
41229 static struct i2c_adapter *s4985_adapter;
41230-static struct i2c_algorithm *s4985_algo;
41231+static i2c_algorithm_no_const *s4985_algo;
41232
41233 /* Wrapper access functions for multiplexed SMBus */
41234 static DEFINE_MUTEX(nforce2_lock);
41235diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
41236index 80b47e8..1a6040d9 100644
41237--- a/drivers/i2c/i2c-dev.c
41238+++ b/drivers/i2c/i2c-dev.c
41239@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
41240 break;
41241 }
41242
41243- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
41244+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
41245 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
41246 if (IS_ERR(rdwr_pa[i].buf)) {
41247 res = PTR_ERR(rdwr_pa[i].buf);
41248diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
41249index 0b510ba..4fbb5085 100644
41250--- a/drivers/ide/ide-cd.c
41251+++ b/drivers/ide/ide-cd.c
41252@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
41253 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
41254 if ((unsigned long)buf & alignment
41255 || blk_rq_bytes(rq) & q->dma_pad_mask
41256- || object_is_on_stack(buf))
41257+ || object_starts_on_stack(buf))
41258 drive->dma = 0;
41259 }
41260 }
41261diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
41262index 18f72e3..3722327 100644
41263--- a/drivers/iio/industrialio-core.c
41264+++ b/drivers/iio/industrialio-core.c
41265@@ -521,7 +521,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
41266 }
41267
41268 static
41269-int __iio_device_attr_init(struct device_attribute *dev_attr,
41270+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
41271 const char *postfix,
41272 struct iio_chan_spec const *chan,
41273 ssize_t (*readfunc)(struct device *dev,
41274diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
41275index f2ef7ef..743d02f 100644
41276--- a/drivers/infiniband/core/cm.c
41277+++ b/drivers/infiniband/core/cm.c
41278@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
41279
41280 struct cm_counter_group {
41281 struct kobject obj;
41282- atomic_long_t counter[CM_ATTR_COUNT];
41283+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
41284 };
41285
41286 struct cm_counter_attribute {
41287@@ -1392,7 +1392,7 @@ static void cm_dup_req_handler(struct cm_work *work,
41288 struct ib_mad_send_buf *msg = NULL;
41289 int ret;
41290
41291- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41292+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41293 counter[CM_REQ_COUNTER]);
41294
41295 /* Quick state check to discard duplicate REQs. */
41296@@ -1776,7 +1776,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
41297 if (!cm_id_priv)
41298 return;
41299
41300- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41301+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41302 counter[CM_REP_COUNTER]);
41303 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
41304 if (ret)
41305@@ -1943,7 +1943,7 @@ static int cm_rtu_handler(struct cm_work *work)
41306 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
41307 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
41308 spin_unlock_irq(&cm_id_priv->lock);
41309- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41310+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41311 counter[CM_RTU_COUNTER]);
41312 goto out;
41313 }
41314@@ -2126,7 +2126,7 @@ static int cm_dreq_handler(struct cm_work *work)
41315 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
41316 dreq_msg->local_comm_id);
41317 if (!cm_id_priv) {
41318- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41319+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41320 counter[CM_DREQ_COUNTER]);
41321 cm_issue_drep(work->port, work->mad_recv_wc);
41322 return -EINVAL;
41323@@ -2151,7 +2151,7 @@ static int cm_dreq_handler(struct cm_work *work)
41324 case IB_CM_MRA_REP_RCVD:
41325 break;
41326 case IB_CM_TIMEWAIT:
41327- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41328+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41329 counter[CM_DREQ_COUNTER]);
41330 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41331 goto unlock;
41332@@ -2165,7 +2165,7 @@ static int cm_dreq_handler(struct cm_work *work)
41333 cm_free_msg(msg);
41334 goto deref;
41335 case IB_CM_DREQ_RCVD:
41336- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41337+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41338 counter[CM_DREQ_COUNTER]);
41339 goto unlock;
41340 default:
41341@@ -2532,7 +2532,7 @@ static int cm_mra_handler(struct cm_work *work)
41342 ib_modify_mad(cm_id_priv->av.port->mad_agent,
41343 cm_id_priv->msg, timeout)) {
41344 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
41345- atomic_long_inc(&work->port->
41346+ atomic_long_inc_unchecked(&work->port->
41347 counter_group[CM_RECV_DUPLICATES].
41348 counter[CM_MRA_COUNTER]);
41349 goto out;
41350@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
41351 break;
41352 case IB_CM_MRA_REQ_RCVD:
41353 case IB_CM_MRA_REP_RCVD:
41354- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41355+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41356 counter[CM_MRA_COUNTER]);
41357 /* fall through */
41358 default:
41359@@ -2703,7 +2703,7 @@ static int cm_lap_handler(struct cm_work *work)
41360 case IB_CM_LAP_IDLE:
41361 break;
41362 case IB_CM_MRA_LAP_SENT:
41363- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41364+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41365 counter[CM_LAP_COUNTER]);
41366 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41367 goto unlock;
41368@@ -2719,7 +2719,7 @@ static int cm_lap_handler(struct cm_work *work)
41369 cm_free_msg(msg);
41370 goto deref;
41371 case IB_CM_LAP_RCVD:
41372- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41373+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41374 counter[CM_LAP_COUNTER]);
41375 goto unlock;
41376 default:
41377@@ -3003,7 +3003,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
41378 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
41379 if (cur_cm_id_priv) {
41380 spin_unlock_irq(&cm.lock);
41381- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41382+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41383 counter[CM_SIDR_REQ_COUNTER]);
41384 goto out; /* Duplicate message. */
41385 }
41386@@ -3215,10 +3215,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
41387 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
41388 msg->retries = 1;
41389
41390- atomic_long_add(1 + msg->retries,
41391+ atomic_long_add_unchecked(1 + msg->retries,
41392 &port->counter_group[CM_XMIT].counter[attr_index]);
41393 if (msg->retries)
41394- atomic_long_add(msg->retries,
41395+ atomic_long_add_unchecked(msg->retries,
41396 &port->counter_group[CM_XMIT_RETRIES].
41397 counter[attr_index]);
41398
41399@@ -3428,7 +3428,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
41400 }
41401
41402 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
41403- atomic_long_inc(&port->counter_group[CM_RECV].
41404+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
41405 counter[attr_id - CM_ATTR_ID_OFFSET]);
41406
41407 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
41408@@ -3633,7 +3633,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
41409 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
41410
41411 return sprintf(buf, "%ld\n",
41412- atomic_long_read(&group->counter[cm_attr->index]));
41413+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
41414 }
41415
41416 static const struct sysfs_ops cm_counter_ops = {
41417diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
41418index 9f5ad7c..588cd84 100644
41419--- a/drivers/infiniband/core/fmr_pool.c
41420+++ b/drivers/infiniband/core/fmr_pool.c
41421@@ -98,8 +98,8 @@ struct ib_fmr_pool {
41422
41423 struct task_struct *thread;
41424
41425- atomic_t req_ser;
41426- atomic_t flush_ser;
41427+ atomic_unchecked_t req_ser;
41428+ atomic_unchecked_t flush_ser;
41429
41430 wait_queue_head_t force_wait;
41431 };
41432@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
41433 struct ib_fmr_pool *pool = pool_ptr;
41434
41435 do {
41436- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
41437+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
41438 ib_fmr_batch_release(pool);
41439
41440- atomic_inc(&pool->flush_ser);
41441+ atomic_inc_unchecked(&pool->flush_ser);
41442 wake_up_interruptible(&pool->force_wait);
41443
41444 if (pool->flush_function)
41445@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
41446 }
41447
41448 set_current_state(TASK_INTERRUPTIBLE);
41449- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
41450+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
41451 !kthread_should_stop())
41452 schedule();
41453 __set_current_state(TASK_RUNNING);
41454@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
41455 pool->dirty_watermark = params->dirty_watermark;
41456 pool->dirty_len = 0;
41457 spin_lock_init(&pool->pool_lock);
41458- atomic_set(&pool->req_ser, 0);
41459- atomic_set(&pool->flush_ser, 0);
41460+ atomic_set_unchecked(&pool->req_ser, 0);
41461+ atomic_set_unchecked(&pool->flush_ser, 0);
41462 init_waitqueue_head(&pool->force_wait);
41463
41464 pool->thread = kthread_run(ib_fmr_cleanup_thread,
41465@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
41466 }
41467 spin_unlock_irq(&pool->pool_lock);
41468
41469- serial = atomic_inc_return(&pool->req_ser);
41470+ serial = atomic_inc_return_unchecked(&pool->req_ser);
41471 wake_up_process(pool->thread);
41472
41473 if (wait_event_interruptible(pool->force_wait,
41474- atomic_read(&pool->flush_ser) - serial >= 0))
41475+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
41476 return -EINTR;
41477
41478 return 0;
41479@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
41480 } else {
41481 list_add_tail(&fmr->list, &pool->dirty_list);
41482 if (++pool->dirty_len >= pool->dirty_watermark) {
41483- atomic_inc(&pool->req_ser);
41484+ atomic_inc_unchecked(&pool->req_ser);
41485 wake_up_process(pool->thread);
41486 }
41487 }
41488diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
41489index 84e4500..2c9beeb 100644
41490--- a/drivers/infiniband/hw/cxgb4/mem.c
41491+++ b/drivers/infiniband/hw/cxgb4/mem.c
41492@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
41493 int err;
41494 struct fw_ri_tpte tpt;
41495 u32 stag_idx;
41496- static atomic_t key;
41497+ static atomic_unchecked_t key;
41498
41499 if (c4iw_fatal_error(rdev))
41500 return -EIO;
41501@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
41502 if (rdev->stats.stag.cur > rdev->stats.stag.max)
41503 rdev->stats.stag.max = rdev->stats.stag.cur;
41504 mutex_unlock(&rdev->stats.lock);
41505- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
41506+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
41507 }
41508 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
41509 __func__, stag_state, type, pdid, stag_idx);
41510diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
41511index 644c2c7..ecf0879 100644
41512--- a/drivers/infiniband/hw/ipath/ipath_dma.c
41513+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
41514@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
41515 }
41516
41517 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
41518- ipath_mapping_error,
41519- ipath_dma_map_single,
41520- ipath_dma_unmap_single,
41521- ipath_dma_map_page,
41522- ipath_dma_unmap_page,
41523- ipath_map_sg,
41524- ipath_unmap_sg,
41525- ipath_sg_dma_address,
41526- ipath_sg_dma_len,
41527- ipath_sync_single_for_cpu,
41528- ipath_sync_single_for_device,
41529- ipath_dma_alloc_coherent,
41530- ipath_dma_free_coherent
41531+ .mapping_error = ipath_mapping_error,
41532+ .map_single = ipath_dma_map_single,
41533+ .unmap_single = ipath_dma_unmap_single,
41534+ .map_page = ipath_dma_map_page,
41535+ .unmap_page = ipath_dma_unmap_page,
41536+ .map_sg = ipath_map_sg,
41537+ .unmap_sg = ipath_unmap_sg,
41538+ .dma_address = ipath_sg_dma_address,
41539+ .dma_len = ipath_sg_dma_len,
41540+ .sync_single_for_cpu = ipath_sync_single_for_cpu,
41541+ .sync_single_for_device = ipath_sync_single_for_device,
41542+ .alloc_coherent = ipath_dma_alloc_coherent,
41543+ .free_coherent = ipath_dma_free_coherent
41544 };
41545diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
41546index 79b3dbc..96e5fcc 100644
41547--- a/drivers/infiniband/hw/ipath/ipath_rc.c
41548+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
41549@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
41550 struct ib_atomic_eth *ateth;
41551 struct ipath_ack_entry *e;
41552 u64 vaddr;
41553- atomic64_t *maddr;
41554+ atomic64_unchecked_t *maddr;
41555 u64 sdata;
41556 u32 rkey;
41557 u8 next;
41558@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
41559 IB_ACCESS_REMOTE_ATOMIC)))
41560 goto nack_acc_unlck;
41561 /* Perform atomic OP and save result. */
41562- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
41563+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
41564 sdata = be64_to_cpu(ateth->swap_data);
41565 e = &qp->s_ack_queue[qp->r_head_ack_queue];
41566 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
41567- (u64) atomic64_add_return(sdata, maddr) - sdata :
41568+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
41569 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
41570 be64_to_cpu(ateth->compare_data),
41571 sdata);
41572diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
41573index 1f95bba..9530f87 100644
41574--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
41575+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
41576@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
41577 unsigned long flags;
41578 struct ib_wc wc;
41579 u64 sdata;
41580- atomic64_t *maddr;
41581+ atomic64_unchecked_t *maddr;
41582 enum ib_wc_status send_status;
41583
41584 /*
41585@@ -382,11 +382,11 @@ again:
41586 IB_ACCESS_REMOTE_ATOMIC)))
41587 goto acc_err;
41588 /* Perform atomic OP and save result. */
41589- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
41590+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
41591 sdata = wqe->wr.wr.atomic.compare_add;
41592 *(u64 *) sqp->s_sge.sge.vaddr =
41593 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
41594- (u64) atomic64_add_return(sdata, maddr) - sdata :
41595+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
41596 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
41597 sdata, wqe->wr.wr.atomic.swap);
41598 goto send_comp;
41599diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
41600index f2a3f48..673ec79 100644
41601--- a/drivers/infiniband/hw/mlx4/mad.c
41602+++ b/drivers/infiniband/hw/mlx4/mad.c
41603@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
41604
41605 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
41606 {
41607- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
41608+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
41609 cpu_to_be64(0xff00000000000000LL);
41610 }
41611
41612diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
41613index 25b2cdf..099ff97 100644
41614--- a/drivers/infiniband/hw/mlx4/mcg.c
41615+++ b/drivers/infiniband/hw/mlx4/mcg.c
41616@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
41617 {
41618 char name[20];
41619
41620- atomic_set(&ctx->tid, 0);
41621+ atomic_set_unchecked(&ctx->tid, 0);
41622 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
41623 ctx->mcg_wq = create_singlethread_workqueue(name);
41624 if (!ctx->mcg_wq)
41625diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
41626index 036b663..c9a8c73 100644
41627--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
41628+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
41629@@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
41630 struct list_head mcg_mgid0_list;
41631 struct workqueue_struct *mcg_wq;
41632 struct mlx4_ib_demux_pv_ctx **tun;
41633- atomic_t tid;
41634+ atomic_unchecked_t tid;
41635 int flushing; /* flushing the work queue */
41636 };
41637
41638diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
41639index 9d3e5c1..6f166df 100644
41640--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
41641+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
41642@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
41643 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
41644 }
41645
41646-int mthca_QUERY_FW(struct mthca_dev *dev)
41647+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
41648 {
41649 struct mthca_mailbox *mailbox;
41650 u32 *outbox;
41651@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41652 CMD_TIME_CLASS_B);
41653 }
41654
41655-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41656+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41657 int num_mtt)
41658 {
41659 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
41660@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
41661 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
41662 }
41663
41664-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41665+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41666 int eq_num)
41667 {
41668 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
41669@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
41670 CMD_TIME_CLASS_B);
41671 }
41672
41673-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41674+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41675 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
41676 void *in_mad, void *response_mad)
41677 {
41678diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
41679index 87897b9..7e79542 100644
41680--- a/drivers/infiniband/hw/mthca/mthca_main.c
41681+++ b/drivers/infiniband/hw/mthca/mthca_main.c
41682@@ -692,7 +692,7 @@ err_close:
41683 return err;
41684 }
41685
41686-static int mthca_setup_hca(struct mthca_dev *dev)
41687+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
41688 {
41689 int err;
41690
41691diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
41692index ed9a989..6aa5dc2 100644
41693--- a/drivers/infiniband/hw/mthca/mthca_mr.c
41694+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
41695@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
41696 * through the bitmaps)
41697 */
41698
41699-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41700+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41701 {
41702 int o;
41703 int m;
41704@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
41705 return key;
41706 }
41707
41708-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41709+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41710 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
41711 {
41712 struct mthca_mailbox *mailbox;
41713@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
41714 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
41715 }
41716
41717-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41718+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41719 u64 *buffer_list, int buffer_size_shift,
41720 int list_len, u64 iova, u64 total_size,
41721 u32 access, struct mthca_mr *mr)
41722diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
41723index 5b71d43..35a9e14 100644
41724--- a/drivers/infiniband/hw/mthca/mthca_provider.c
41725+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
41726@@ -763,7 +763,7 @@ unlock:
41727 return 0;
41728 }
41729
41730-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41731+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41732 {
41733 struct mthca_dev *dev = to_mdev(ibcq->device);
41734 struct mthca_cq *cq = to_mcq(ibcq);
41735diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
41736index 4291410..d2ab1fb 100644
41737--- a/drivers/infiniband/hw/nes/nes.c
41738+++ b/drivers/infiniband/hw/nes/nes.c
41739@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
41740 LIST_HEAD(nes_adapter_list);
41741 static LIST_HEAD(nes_dev_list);
41742
41743-atomic_t qps_destroyed;
41744+atomic_unchecked_t qps_destroyed;
41745
41746 static unsigned int ee_flsh_adapter;
41747 static unsigned int sysfs_nonidx_addr;
41748@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
41749 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
41750 struct nes_adapter *nesadapter = nesdev->nesadapter;
41751
41752- atomic_inc(&qps_destroyed);
41753+ atomic_inc_unchecked(&qps_destroyed);
41754
41755 /* Free the control structures */
41756
41757diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
41758index 33cc589..3bd6538 100644
41759--- a/drivers/infiniband/hw/nes/nes.h
41760+++ b/drivers/infiniband/hw/nes/nes.h
41761@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
41762 extern unsigned int wqm_quanta;
41763 extern struct list_head nes_adapter_list;
41764
41765-extern atomic_t cm_connects;
41766-extern atomic_t cm_accepts;
41767-extern atomic_t cm_disconnects;
41768-extern atomic_t cm_closes;
41769-extern atomic_t cm_connecteds;
41770-extern atomic_t cm_connect_reqs;
41771-extern atomic_t cm_rejects;
41772-extern atomic_t mod_qp_timouts;
41773-extern atomic_t qps_created;
41774-extern atomic_t qps_destroyed;
41775-extern atomic_t sw_qps_destroyed;
41776+extern atomic_unchecked_t cm_connects;
41777+extern atomic_unchecked_t cm_accepts;
41778+extern atomic_unchecked_t cm_disconnects;
41779+extern atomic_unchecked_t cm_closes;
41780+extern atomic_unchecked_t cm_connecteds;
41781+extern atomic_unchecked_t cm_connect_reqs;
41782+extern atomic_unchecked_t cm_rejects;
41783+extern atomic_unchecked_t mod_qp_timouts;
41784+extern atomic_unchecked_t qps_created;
41785+extern atomic_unchecked_t qps_destroyed;
41786+extern atomic_unchecked_t sw_qps_destroyed;
41787 extern u32 mh_detected;
41788 extern u32 mh_pauses_sent;
41789 extern u32 cm_packets_sent;
41790@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
41791 extern u32 cm_packets_received;
41792 extern u32 cm_packets_dropped;
41793 extern u32 cm_packets_retrans;
41794-extern atomic_t cm_listens_created;
41795-extern atomic_t cm_listens_destroyed;
41796+extern atomic_unchecked_t cm_listens_created;
41797+extern atomic_unchecked_t cm_listens_destroyed;
41798 extern u32 cm_backlog_drops;
41799-extern atomic_t cm_loopbacks;
41800-extern atomic_t cm_nodes_created;
41801-extern atomic_t cm_nodes_destroyed;
41802-extern atomic_t cm_accel_dropped_pkts;
41803-extern atomic_t cm_resets_recvd;
41804-extern atomic_t pau_qps_created;
41805-extern atomic_t pau_qps_destroyed;
41806+extern atomic_unchecked_t cm_loopbacks;
41807+extern atomic_unchecked_t cm_nodes_created;
41808+extern atomic_unchecked_t cm_nodes_destroyed;
41809+extern atomic_unchecked_t cm_accel_dropped_pkts;
41810+extern atomic_unchecked_t cm_resets_recvd;
41811+extern atomic_unchecked_t pau_qps_created;
41812+extern atomic_unchecked_t pau_qps_destroyed;
41813
41814 extern u32 int_mod_timer_init;
41815 extern u32 int_mod_cq_depth_256;
41816diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
41817index 6b29249..461d143 100644
41818--- a/drivers/infiniband/hw/nes/nes_cm.c
41819+++ b/drivers/infiniband/hw/nes/nes_cm.c
41820@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
41821 u32 cm_packets_retrans;
41822 u32 cm_packets_created;
41823 u32 cm_packets_received;
41824-atomic_t cm_listens_created;
41825-atomic_t cm_listens_destroyed;
41826+atomic_unchecked_t cm_listens_created;
41827+atomic_unchecked_t cm_listens_destroyed;
41828 u32 cm_backlog_drops;
41829-atomic_t cm_loopbacks;
41830-atomic_t cm_nodes_created;
41831-atomic_t cm_nodes_destroyed;
41832-atomic_t cm_accel_dropped_pkts;
41833-atomic_t cm_resets_recvd;
41834+atomic_unchecked_t cm_loopbacks;
41835+atomic_unchecked_t cm_nodes_created;
41836+atomic_unchecked_t cm_nodes_destroyed;
41837+atomic_unchecked_t cm_accel_dropped_pkts;
41838+atomic_unchecked_t cm_resets_recvd;
41839
41840 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
41841 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
41842@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core);
41843 /* instance of function pointers for client API */
41844 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
41845 static struct nes_cm_ops nes_cm_api = {
41846- mini_cm_accelerated,
41847- mini_cm_listen,
41848- mini_cm_del_listen,
41849- mini_cm_connect,
41850- mini_cm_close,
41851- mini_cm_accept,
41852- mini_cm_reject,
41853- mini_cm_recv_pkt,
41854- mini_cm_dealloc_core,
41855- mini_cm_get,
41856- mini_cm_set
41857+ .accelerated = mini_cm_accelerated,
41858+ .listen = mini_cm_listen,
41859+ .stop_listener = mini_cm_del_listen,
41860+ .connect = mini_cm_connect,
41861+ .close = mini_cm_close,
41862+ .accept = mini_cm_accept,
41863+ .reject = mini_cm_reject,
41864+ .recv_pkt = mini_cm_recv_pkt,
41865+ .destroy_cm_core = mini_cm_dealloc_core,
41866+ .get = mini_cm_get,
41867+ .set = mini_cm_set
41868 };
41869
41870 static struct nes_cm_core *g_cm_core;
41871
41872-atomic_t cm_connects;
41873-atomic_t cm_accepts;
41874-atomic_t cm_disconnects;
41875-atomic_t cm_closes;
41876-atomic_t cm_connecteds;
41877-atomic_t cm_connect_reqs;
41878-atomic_t cm_rejects;
41879+atomic_unchecked_t cm_connects;
41880+atomic_unchecked_t cm_accepts;
41881+atomic_unchecked_t cm_disconnects;
41882+atomic_unchecked_t cm_closes;
41883+atomic_unchecked_t cm_connecteds;
41884+atomic_unchecked_t cm_connect_reqs;
41885+atomic_unchecked_t cm_rejects;
41886
41887 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
41888 {
41889@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
41890 kfree(listener);
41891 listener = NULL;
41892 ret = 0;
41893- atomic_inc(&cm_listens_destroyed);
41894+ atomic_inc_unchecked(&cm_listens_destroyed);
41895 } else {
41896 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
41897 }
41898@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
41899 cm_node->rem_mac);
41900
41901 add_hte_node(cm_core, cm_node);
41902- atomic_inc(&cm_nodes_created);
41903+ atomic_inc_unchecked(&cm_nodes_created);
41904
41905 return cm_node;
41906 }
41907@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
41908 }
41909
41910 atomic_dec(&cm_core->node_cnt);
41911- atomic_inc(&cm_nodes_destroyed);
41912+ atomic_inc_unchecked(&cm_nodes_destroyed);
41913 nesqp = cm_node->nesqp;
41914 if (nesqp) {
41915 nesqp->cm_node = NULL;
41916@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
41917
41918 static void drop_packet(struct sk_buff *skb)
41919 {
41920- atomic_inc(&cm_accel_dropped_pkts);
41921+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
41922 dev_kfree_skb_any(skb);
41923 }
41924
41925@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
41926 {
41927
41928 int reset = 0; /* whether to send reset in case of err.. */
41929- atomic_inc(&cm_resets_recvd);
41930+ atomic_inc_unchecked(&cm_resets_recvd);
41931 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
41932 " refcnt=%d\n", cm_node, cm_node->state,
41933 atomic_read(&cm_node->ref_count));
41934@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
41935 rem_ref_cm_node(cm_node->cm_core, cm_node);
41936 return NULL;
41937 }
41938- atomic_inc(&cm_loopbacks);
41939+ atomic_inc_unchecked(&cm_loopbacks);
41940 loopbackremotenode->loopbackpartner = cm_node;
41941 loopbackremotenode->tcp_cntxt.rcv_wscale =
41942 NES_CM_DEFAULT_RCV_WND_SCALE;
41943@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
41944 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
41945 else {
41946 rem_ref_cm_node(cm_core, cm_node);
41947- atomic_inc(&cm_accel_dropped_pkts);
41948+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
41949 dev_kfree_skb_any(skb);
41950 }
41951 break;
41952@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41953
41954 if ((cm_id) && (cm_id->event_handler)) {
41955 if (issue_disconn) {
41956- atomic_inc(&cm_disconnects);
41957+ atomic_inc_unchecked(&cm_disconnects);
41958 cm_event.event = IW_CM_EVENT_DISCONNECT;
41959 cm_event.status = disconn_status;
41960 cm_event.local_addr = cm_id->local_addr;
41961@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41962 }
41963
41964 if (issue_close) {
41965- atomic_inc(&cm_closes);
41966+ atomic_inc_unchecked(&cm_closes);
41967 nes_disconnect(nesqp, 1);
41968
41969 cm_id->provider_data = nesqp;
41970@@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
41971
41972 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
41973 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
41974- atomic_inc(&cm_accepts);
41975+ atomic_inc_unchecked(&cm_accepts);
41976
41977 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
41978 netdev_refcnt_read(nesvnic->netdev));
41979@@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
41980 struct nes_cm_core *cm_core;
41981 u8 *start_buff;
41982
41983- atomic_inc(&cm_rejects);
41984+ atomic_inc_unchecked(&cm_rejects);
41985 cm_node = (struct nes_cm_node *)cm_id->provider_data;
41986 loopback = cm_node->loopbackpartner;
41987 cm_core = cm_node->cm_core;
41988@@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
41989 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
41990 ntohs(laddr->sin_port));
41991
41992- atomic_inc(&cm_connects);
41993+ atomic_inc_unchecked(&cm_connects);
41994 nesqp->active_conn = 1;
41995
41996 /* cache the cm_id in the qp */
41997@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
41998 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
41999 return err;
42000 }
42001- atomic_inc(&cm_listens_created);
42002+ atomic_inc_unchecked(&cm_listens_created);
42003 }
42004
42005 cm_id->add_ref(cm_id);
42006@@ -3505,7 +3505,7 @@ static void cm_event_connected(struct nes_cm_event *event)
42007
42008 if (nesqp->destroyed)
42009 return;
42010- atomic_inc(&cm_connecteds);
42011+ atomic_inc_unchecked(&cm_connecteds);
42012 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
42013 " local port 0x%04X. jiffies = %lu.\n",
42014 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
42015@@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm_event *event)
42016
42017 cm_id->add_ref(cm_id);
42018 ret = cm_id->event_handler(cm_id, &cm_event);
42019- atomic_inc(&cm_closes);
42020+ atomic_inc_unchecked(&cm_closes);
42021 cm_event.event = IW_CM_EVENT_CLOSE;
42022 cm_event.status = 0;
42023 cm_event.provider_data = cm_id->provider_data;
42024@@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
42025 return;
42026 cm_id = cm_node->cm_id;
42027
42028- atomic_inc(&cm_connect_reqs);
42029+ atomic_inc_unchecked(&cm_connect_reqs);
42030 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42031 cm_node, cm_id, jiffies);
42032
42033@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
42034 return;
42035 cm_id = cm_node->cm_id;
42036
42037- atomic_inc(&cm_connect_reqs);
42038+ atomic_inc_unchecked(&cm_connect_reqs);
42039 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42040 cm_node, cm_id, jiffies);
42041
42042diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
42043index 4166452..fc952c3 100644
42044--- a/drivers/infiniband/hw/nes/nes_mgt.c
42045+++ b/drivers/infiniband/hw/nes/nes_mgt.c
42046@@ -40,8 +40,8 @@
42047 #include "nes.h"
42048 #include "nes_mgt.h"
42049
42050-atomic_t pau_qps_created;
42051-atomic_t pau_qps_destroyed;
42052+atomic_unchecked_t pau_qps_created;
42053+atomic_unchecked_t pau_qps_destroyed;
42054
42055 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
42056 {
42057@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
42058 {
42059 struct sk_buff *skb;
42060 unsigned long flags;
42061- atomic_inc(&pau_qps_destroyed);
42062+ atomic_inc_unchecked(&pau_qps_destroyed);
42063
42064 /* Free packets that have not yet been forwarded */
42065 /* Lock is acquired by skb_dequeue when removing the skb */
42066@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
42067 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
42068 skb_queue_head_init(&nesqp->pau_list);
42069 spin_lock_init(&nesqp->pau_lock);
42070- atomic_inc(&pau_qps_created);
42071+ atomic_inc_unchecked(&pau_qps_created);
42072 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
42073 }
42074
42075diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
42076index 49eb511..a774366 100644
42077--- a/drivers/infiniband/hw/nes/nes_nic.c
42078+++ b/drivers/infiniband/hw/nes/nes_nic.c
42079@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
42080 target_stat_values[++index] = mh_detected;
42081 target_stat_values[++index] = mh_pauses_sent;
42082 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
42083- target_stat_values[++index] = atomic_read(&cm_connects);
42084- target_stat_values[++index] = atomic_read(&cm_accepts);
42085- target_stat_values[++index] = atomic_read(&cm_disconnects);
42086- target_stat_values[++index] = atomic_read(&cm_connecteds);
42087- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
42088- target_stat_values[++index] = atomic_read(&cm_rejects);
42089- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
42090- target_stat_values[++index] = atomic_read(&qps_created);
42091- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
42092- target_stat_values[++index] = atomic_read(&qps_destroyed);
42093- target_stat_values[++index] = atomic_read(&cm_closes);
42094+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
42095+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
42096+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
42097+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
42098+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
42099+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
42100+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
42101+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
42102+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
42103+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
42104+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
42105 target_stat_values[++index] = cm_packets_sent;
42106 target_stat_values[++index] = cm_packets_bounced;
42107 target_stat_values[++index] = cm_packets_created;
42108 target_stat_values[++index] = cm_packets_received;
42109 target_stat_values[++index] = cm_packets_dropped;
42110 target_stat_values[++index] = cm_packets_retrans;
42111- target_stat_values[++index] = atomic_read(&cm_listens_created);
42112- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
42113+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
42114+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
42115 target_stat_values[++index] = cm_backlog_drops;
42116- target_stat_values[++index] = atomic_read(&cm_loopbacks);
42117- target_stat_values[++index] = atomic_read(&cm_nodes_created);
42118- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
42119- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
42120- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
42121+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
42122+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
42123+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
42124+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
42125+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
42126 target_stat_values[++index] = nesadapter->free_4kpbl;
42127 target_stat_values[++index] = nesadapter->free_256pbl;
42128 target_stat_values[++index] = int_mod_timer_init;
42129 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
42130 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
42131 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
42132- target_stat_values[++index] = atomic_read(&pau_qps_created);
42133- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
42134+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
42135+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
42136 }
42137
42138 /**
42139diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
42140index 8308e36..ae0d3b5 100644
42141--- a/drivers/infiniband/hw/nes/nes_verbs.c
42142+++ b/drivers/infiniband/hw/nes/nes_verbs.c
42143@@ -46,9 +46,9 @@
42144
42145 #include <rdma/ib_umem.h>
42146
42147-atomic_t mod_qp_timouts;
42148-atomic_t qps_created;
42149-atomic_t sw_qps_destroyed;
42150+atomic_unchecked_t mod_qp_timouts;
42151+atomic_unchecked_t qps_created;
42152+atomic_unchecked_t sw_qps_destroyed;
42153
42154 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
42155
42156@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
42157 if (init_attr->create_flags)
42158 return ERR_PTR(-EINVAL);
42159
42160- atomic_inc(&qps_created);
42161+ atomic_inc_unchecked(&qps_created);
42162 switch (init_attr->qp_type) {
42163 case IB_QPT_RC:
42164 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
42165@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
42166 struct iw_cm_event cm_event;
42167 int ret = 0;
42168
42169- atomic_inc(&sw_qps_destroyed);
42170+ atomic_inc_unchecked(&sw_qps_destroyed);
42171 nesqp->destroyed = 1;
42172
42173 /* Blow away the connection if it exists. */
42174diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
42175index 1946101..09766d2 100644
42176--- a/drivers/infiniband/hw/qib/qib.h
42177+++ b/drivers/infiniband/hw/qib/qib.h
42178@@ -52,6 +52,7 @@
42179 #include <linux/kref.h>
42180 #include <linux/sched.h>
42181 #include <linux/kthread.h>
42182+#include <linux/slab.h>
42183
42184 #include "qib_common.h"
42185 #include "qib_verbs.h"
42186diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
42187index 24c41ba..102d71f 100644
42188--- a/drivers/input/gameport/gameport.c
42189+++ b/drivers/input/gameport/gameport.c
42190@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
42191 */
42192 static void gameport_init_port(struct gameport *gameport)
42193 {
42194- static atomic_t gameport_no = ATOMIC_INIT(0);
42195+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
42196
42197 __module_get(THIS_MODULE);
42198
42199 mutex_init(&gameport->drv_mutex);
42200 device_initialize(&gameport->dev);
42201 dev_set_name(&gameport->dev, "gameport%lu",
42202- (unsigned long)atomic_inc_return(&gameport_no) - 1);
42203+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
42204 gameport->dev.bus = &gameport_bus;
42205 gameport->dev.release = gameport_release_port;
42206 if (gameport->parent)
42207diff --git a/drivers/input/input.c b/drivers/input/input.c
42208index d2965e4..f52b7d7 100644
42209--- a/drivers/input/input.c
42210+++ b/drivers/input/input.c
42211@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
42212 */
42213 struct input_dev *input_allocate_device(void)
42214 {
42215- static atomic_t input_no = ATOMIC_INIT(0);
42216+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
42217 struct input_dev *dev;
42218
42219 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
42220@@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(void)
42221 INIT_LIST_HEAD(&dev->node);
42222
42223 dev_set_name(&dev->dev, "input%ld",
42224- (unsigned long) atomic_inc_return(&input_no) - 1);
42225+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
42226
42227 __module_get(THIS_MODULE);
42228 }
42229diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
42230index 04c69af..5f92d00 100644
42231--- a/drivers/input/joystick/sidewinder.c
42232+++ b/drivers/input/joystick/sidewinder.c
42233@@ -30,6 +30,7 @@
42234 #include <linux/kernel.h>
42235 #include <linux/module.h>
42236 #include <linux/slab.h>
42237+#include <linux/sched.h>
42238 #include <linux/init.h>
42239 #include <linux/input.h>
42240 #include <linux/gameport.h>
42241diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
42242index 75e3b10..fb390fd 100644
42243--- a/drivers/input/joystick/xpad.c
42244+++ b/drivers/input/joystick/xpad.c
42245@@ -736,7 +736,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
42246
42247 static int xpad_led_probe(struct usb_xpad *xpad)
42248 {
42249- static atomic_t led_seq = ATOMIC_INIT(0);
42250+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
42251 long led_no;
42252 struct xpad_led *led;
42253 struct led_classdev *led_cdev;
42254@@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
42255 if (!led)
42256 return -ENOMEM;
42257
42258- led_no = (long)atomic_inc_return(&led_seq) - 1;
42259+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
42260
42261 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
42262 led->xpad = xpad;
42263diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
42264index e204f26..8459f15 100644
42265--- a/drivers/input/misc/ims-pcu.c
42266+++ b/drivers/input/misc/ims-pcu.c
42267@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
42268
42269 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42270 {
42271- static atomic_t device_no = ATOMIC_INIT(0);
42272+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
42273
42274 const struct ims_pcu_device_info *info;
42275 u8 device_id;
42276@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42277 }
42278
42279 /* Device appears to be operable, complete initialization */
42280- pcu->device_no = atomic_inc_return(&device_no) - 1;
42281+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
42282
42283 error = ims_pcu_setup_backlight(pcu);
42284 if (error)
42285diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
42286index 2f0b39d..7370f13 100644
42287--- a/drivers/input/mouse/psmouse.h
42288+++ b/drivers/input/mouse/psmouse.h
42289@@ -116,7 +116,7 @@ struct psmouse_attribute {
42290 ssize_t (*set)(struct psmouse *psmouse, void *data,
42291 const char *buf, size_t count);
42292 bool protect;
42293-};
42294+} __do_const;
42295 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
42296
42297 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
42298diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
42299index 4c842c3..590b0bf 100644
42300--- a/drivers/input/mousedev.c
42301+++ b/drivers/input/mousedev.c
42302@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
42303
42304 spin_unlock_irq(&client->packet_lock);
42305
42306- if (copy_to_user(buffer, data, count))
42307+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
42308 return -EFAULT;
42309
42310 return count;
42311diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
42312index 8f4c4ab..5fc8a45 100644
42313--- a/drivers/input/serio/serio.c
42314+++ b/drivers/input/serio/serio.c
42315@@ -505,7 +505,7 @@ static void serio_release_port(struct device *dev)
42316 */
42317 static void serio_init_port(struct serio *serio)
42318 {
42319- static atomic_t serio_no = ATOMIC_INIT(0);
42320+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
42321
42322 __module_get(THIS_MODULE);
42323
42324@@ -516,7 +516,7 @@ static void serio_init_port(struct serio *serio)
42325 mutex_init(&serio->drv_mutex);
42326 device_initialize(&serio->dev);
42327 dev_set_name(&serio->dev, "serio%ld",
42328- (long)atomic_inc_return(&serio_no) - 1);
42329+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
42330 serio->dev.bus = &serio_bus;
42331 serio->dev.release = serio_release_port;
42332 serio->dev.groups = serio_device_attr_groups;
42333diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
42334index 59df2e7..8f1cafb 100644
42335--- a/drivers/input/serio/serio_raw.c
42336+++ b/drivers/input/serio/serio_raw.c
42337@@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
42338
42339 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42340 {
42341- static atomic_t serio_raw_no = ATOMIC_INIT(0);
42342+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
42343 struct serio_raw *serio_raw;
42344 int err;
42345
42346@@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42347 }
42348
42349 snprintf(serio_raw->name, sizeof(serio_raw->name),
42350- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
42351+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
42352 kref_init(&serio_raw->kref);
42353 INIT_LIST_HEAD(&serio_raw->client_list);
42354 init_waitqueue_head(&serio_raw->wait);
42355diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
42356index e5555fc..937986d 100644
42357--- a/drivers/iommu/iommu.c
42358+++ b/drivers/iommu/iommu.c
42359@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
42360 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
42361 {
42362 bus_register_notifier(bus, &iommu_bus_nb);
42363- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
42364+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
42365 }
42366
42367 /**
42368diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
42369index 39f81ae..2660096 100644
42370--- a/drivers/iommu/irq_remapping.c
42371+++ b/drivers/iommu/irq_remapping.c
42372@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
42373 void panic_if_irq_remap(const char *msg)
42374 {
42375 if (irq_remapping_enabled)
42376- panic(msg);
42377+ panic("%s", msg);
42378 }
42379
42380 static void ir_ack_apic_edge(struct irq_data *data)
42381@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
42382
42383 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
42384 {
42385- chip->irq_print_chip = ir_print_prefix;
42386- chip->irq_ack = ir_ack_apic_edge;
42387- chip->irq_eoi = ir_ack_apic_level;
42388- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
42389+ pax_open_kernel();
42390+ *(void **)&chip->irq_print_chip = ir_print_prefix;
42391+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
42392+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
42393+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
42394+ pax_close_kernel();
42395 }
42396
42397 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
42398diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
42399index 341c601..e5f407e 100644
42400--- a/drivers/irqchip/irq-gic.c
42401+++ b/drivers/irqchip/irq-gic.c
42402@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
42403 * Supported arch specific GIC irq extension.
42404 * Default make them NULL.
42405 */
42406-struct irq_chip gic_arch_extn = {
42407+irq_chip_no_const gic_arch_extn = {
42408 .irq_eoi = NULL,
42409 .irq_mask = NULL,
42410 .irq_unmask = NULL,
42411@@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
42412 chained_irq_exit(chip, desc);
42413 }
42414
42415-static struct irq_chip gic_chip = {
42416+static irq_chip_no_const gic_chip __read_only = {
42417 .name = "GIC",
42418 .irq_mask = gic_mask_irq,
42419 .irq_unmask = gic_unmask_irq,
42420diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
42421index ac6f72b..81150f2 100644
42422--- a/drivers/isdn/capi/capi.c
42423+++ b/drivers/isdn/capi/capi.c
42424@@ -81,8 +81,8 @@ struct capiminor {
42425
42426 struct capi20_appl *ap;
42427 u32 ncci;
42428- atomic_t datahandle;
42429- atomic_t msgid;
42430+ atomic_unchecked_t datahandle;
42431+ atomic_unchecked_t msgid;
42432
42433 struct tty_port port;
42434 int ttyinstop;
42435@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
42436 capimsg_setu16(s, 2, mp->ap->applid);
42437 capimsg_setu8 (s, 4, CAPI_DATA_B3);
42438 capimsg_setu8 (s, 5, CAPI_RESP);
42439- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
42440+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
42441 capimsg_setu32(s, 8, mp->ncci);
42442 capimsg_setu16(s, 12, datahandle);
42443 }
42444@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
42445 mp->outbytes -= len;
42446 spin_unlock_bh(&mp->outlock);
42447
42448- datahandle = atomic_inc_return(&mp->datahandle);
42449+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
42450 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
42451 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
42452 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
42453 capimsg_setu16(skb->data, 2, mp->ap->applid);
42454 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
42455 capimsg_setu8 (skb->data, 5, CAPI_REQ);
42456- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
42457+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
42458 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
42459 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
42460 capimsg_setu16(skb->data, 16, len); /* Data length */
42461diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
42462index c44950d..10ac276 100644
42463--- a/drivers/isdn/gigaset/bas-gigaset.c
42464+++ b/drivers/isdn/gigaset/bas-gigaset.c
42465@@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
42466
42467
42468 static const struct gigaset_ops gigops = {
42469- gigaset_write_cmd,
42470- gigaset_write_room,
42471- gigaset_chars_in_buffer,
42472- gigaset_brkchars,
42473- gigaset_init_bchannel,
42474- gigaset_close_bchannel,
42475- gigaset_initbcshw,
42476- gigaset_freebcshw,
42477- gigaset_reinitbcshw,
42478- gigaset_initcshw,
42479- gigaset_freecshw,
42480- gigaset_set_modem_ctrl,
42481- gigaset_baud_rate,
42482- gigaset_set_line_ctrl,
42483- gigaset_isoc_send_skb,
42484- gigaset_isoc_input,
42485+ .write_cmd = gigaset_write_cmd,
42486+ .write_room = gigaset_write_room,
42487+ .chars_in_buffer = gigaset_chars_in_buffer,
42488+ .brkchars = gigaset_brkchars,
42489+ .init_bchannel = gigaset_init_bchannel,
42490+ .close_bchannel = gigaset_close_bchannel,
42491+ .initbcshw = gigaset_initbcshw,
42492+ .freebcshw = gigaset_freebcshw,
42493+ .reinitbcshw = gigaset_reinitbcshw,
42494+ .initcshw = gigaset_initcshw,
42495+ .freecshw = gigaset_freecshw,
42496+ .set_modem_ctrl = gigaset_set_modem_ctrl,
42497+ .baud_rate = gigaset_baud_rate,
42498+ .set_line_ctrl = gigaset_set_line_ctrl,
42499+ .send_skb = gigaset_isoc_send_skb,
42500+ .handle_input = gigaset_isoc_input,
42501 };
42502
42503 /* bas_gigaset_init
42504diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
42505index 600c79b..3752bab 100644
42506--- a/drivers/isdn/gigaset/interface.c
42507+++ b/drivers/isdn/gigaset/interface.c
42508@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
42509 }
42510 tty->driver_data = cs;
42511
42512- ++cs->port.count;
42513+ atomic_inc(&cs->port.count);
42514
42515- if (cs->port.count == 1) {
42516+ if (atomic_read(&cs->port.count) == 1) {
42517 tty_port_tty_set(&cs->port, tty);
42518 cs->port.low_latency = 1;
42519 }
42520@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
42521
42522 if (!cs->connected)
42523 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
42524- else if (!cs->port.count)
42525+ else if (!atomic_read(&cs->port.count))
42526 dev_warn(cs->dev, "%s: device not opened\n", __func__);
42527- else if (!--cs->port.count)
42528+ else if (!atomic_dec_return(&cs->port.count))
42529 tty_port_tty_set(&cs->port, NULL);
42530
42531 mutex_unlock(&cs->mutex);
42532diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
42533index 8c91fd5..14f13ce 100644
42534--- a/drivers/isdn/gigaset/ser-gigaset.c
42535+++ b/drivers/isdn/gigaset/ser-gigaset.c
42536@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
42537 }
42538
42539 static const struct gigaset_ops ops = {
42540- gigaset_write_cmd,
42541- gigaset_write_room,
42542- gigaset_chars_in_buffer,
42543- gigaset_brkchars,
42544- gigaset_init_bchannel,
42545- gigaset_close_bchannel,
42546- gigaset_initbcshw,
42547- gigaset_freebcshw,
42548- gigaset_reinitbcshw,
42549- gigaset_initcshw,
42550- gigaset_freecshw,
42551- gigaset_set_modem_ctrl,
42552- gigaset_baud_rate,
42553- gigaset_set_line_ctrl,
42554- gigaset_m10x_send_skb, /* asyncdata.c */
42555- gigaset_m10x_input, /* asyncdata.c */
42556+ .write_cmd = gigaset_write_cmd,
42557+ .write_room = gigaset_write_room,
42558+ .chars_in_buffer = gigaset_chars_in_buffer,
42559+ .brkchars = gigaset_brkchars,
42560+ .init_bchannel = gigaset_init_bchannel,
42561+ .close_bchannel = gigaset_close_bchannel,
42562+ .initbcshw = gigaset_initbcshw,
42563+ .freebcshw = gigaset_freebcshw,
42564+ .reinitbcshw = gigaset_reinitbcshw,
42565+ .initcshw = gigaset_initcshw,
42566+ .freecshw = gigaset_freecshw,
42567+ .set_modem_ctrl = gigaset_set_modem_ctrl,
42568+ .baud_rate = gigaset_baud_rate,
42569+ .set_line_ctrl = gigaset_set_line_ctrl,
42570+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
42571+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
42572 };
42573
42574
42575diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
42576index d0a41cb..b953e50 100644
42577--- a/drivers/isdn/gigaset/usb-gigaset.c
42578+++ b/drivers/isdn/gigaset/usb-gigaset.c
42579@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
42580 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
42581 memcpy(cs->hw.usb->bchars, buf, 6);
42582 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
42583- 0, 0, &buf, 6, 2000);
42584+ 0, 0, buf, 6, 2000);
42585 }
42586
42587 static void gigaset_freebcshw(struct bc_state *bcs)
42588@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
42589 }
42590
42591 static const struct gigaset_ops ops = {
42592- gigaset_write_cmd,
42593- gigaset_write_room,
42594- gigaset_chars_in_buffer,
42595- gigaset_brkchars,
42596- gigaset_init_bchannel,
42597- gigaset_close_bchannel,
42598- gigaset_initbcshw,
42599- gigaset_freebcshw,
42600- gigaset_reinitbcshw,
42601- gigaset_initcshw,
42602- gigaset_freecshw,
42603- gigaset_set_modem_ctrl,
42604- gigaset_baud_rate,
42605- gigaset_set_line_ctrl,
42606- gigaset_m10x_send_skb,
42607- gigaset_m10x_input,
42608+ .write_cmd = gigaset_write_cmd,
42609+ .write_room = gigaset_write_room,
42610+ .chars_in_buffer = gigaset_chars_in_buffer,
42611+ .brkchars = gigaset_brkchars,
42612+ .init_bchannel = gigaset_init_bchannel,
42613+ .close_bchannel = gigaset_close_bchannel,
42614+ .initbcshw = gigaset_initbcshw,
42615+ .freebcshw = gigaset_freebcshw,
42616+ .reinitbcshw = gigaset_reinitbcshw,
42617+ .initcshw = gigaset_initcshw,
42618+ .freecshw = gigaset_freecshw,
42619+ .set_modem_ctrl = gigaset_set_modem_ctrl,
42620+ .baud_rate = gigaset_baud_rate,
42621+ .set_line_ctrl = gigaset_set_line_ctrl,
42622+ .send_skb = gigaset_m10x_send_skb,
42623+ .handle_input = gigaset_m10x_input,
42624 };
42625
42626 /*
42627diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
42628index 4d9b195..455075c 100644
42629--- a/drivers/isdn/hardware/avm/b1.c
42630+++ b/drivers/isdn/hardware/avm/b1.c
42631@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
42632 }
42633 if (left) {
42634 if (t4file->user) {
42635- if (copy_from_user(buf, dp, left))
42636+ if (left > sizeof buf || copy_from_user(buf, dp, left))
42637 return -EFAULT;
42638 } else {
42639 memcpy(buf, dp, left);
42640@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
42641 }
42642 if (left) {
42643 if (config->user) {
42644- if (copy_from_user(buf, dp, left))
42645+ if (left > sizeof buf || copy_from_user(buf, dp, left))
42646 return -EFAULT;
42647 } else {
42648 memcpy(buf, dp, left);
42649diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
42650index 9bb12ba..d4262f7 100644
42651--- a/drivers/isdn/i4l/isdn_common.c
42652+++ b/drivers/isdn/i4l/isdn_common.c
42653@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
42654 } else
42655 return -EINVAL;
42656 case IIOCDBGVAR:
42657+ if (!capable(CAP_SYS_RAWIO))
42658+ return -EPERM;
42659 if (arg) {
42660 if (copy_to_user(argp, &dev, sizeof(ulong)))
42661 return -EFAULT;
42662diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
42663index 91d5730..336523e 100644
42664--- a/drivers/isdn/i4l/isdn_concap.c
42665+++ b/drivers/isdn/i4l/isdn_concap.c
42666@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
42667 }
42668
42669 struct concap_device_ops isdn_concap_reliable_dl_dops = {
42670- &isdn_concap_dl_data_req,
42671- &isdn_concap_dl_connect_req,
42672- &isdn_concap_dl_disconn_req
42673+ .data_req = &isdn_concap_dl_data_req,
42674+ .connect_req = &isdn_concap_dl_connect_req,
42675+ .disconn_req = &isdn_concap_dl_disconn_req
42676 };
42677
42678 /* The following should better go into a dedicated source file such that
42679diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
42680index 3c5f249..5fac4d0 100644
42681--- a/drivers/isdn/i4l/isdn_tty.c
42682+++ b/drivers/isdn/i4l/isdn_tty.c
42683@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
42684
42685 #ifdef ISDN_DEBUG_MODEM_OPEN
42686 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
42687- port->count);
42688+ atomic_read(&port->count));
42689 #endif
42690- port->count++;
42691+ atomic_inc(&port->count);
42692 port->tty = tty;
42693 /*
42694 * Start up serial port
42695@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
42696 #endif
42697 return;
42698 }
42699- if ((tty->count == 1) && (port->count != 1)) {
42700+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
42701 /*
42702 * Uh, oh. tty->count is 1, which means that the tty
42703 * structure will be freed. Info->count should always
42704@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
42705 * serial port won't be shutdown.
42706 */
42707 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
42708- "info->count is %d\n", port->count);
42709- port->count = 1;
42710+ "info->count is %d\n", atomic_read(&port->count));
42711+ atomic_set(&port->count, 1);
42712 }
42713- if (--port->count < 0) {
42714+ if (atomic_dec_return(&port->count) < 0) {
42715 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
42716- info->line, port->count);
42717- port->count = 0;
42718+ info->line, atomic_read(&port->count));
42719+ atomic_set(&port->count, 0);
42720 }
42721- if (port->count) {
42722+ if (atomic_read(&port->count)) {
42723 #ifdef ISDN_DEBUG_MODEM_OPEN
42724 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
42725 #endif
42726@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
42727 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
42728 return;
42729 isdn_tty_shutdown(info);
42730- port->count = 0;
42731+ atomic_set(&port->count, 0);
42732 port->flags &= ~ASYNC_NORMAL_ACTIVE;
42733 port->tty = NULL;
42734 wake_up_interruptible(&port->open_wait);
42735@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
42736 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
42737 modem_info *info = &dev->mdm.info[i];
42738
42739- if (info->port.count == 0)
42740+ if (atomic_read(&info->port.count) == 0)
42741 continue;
42742 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
42743 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
42744diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
42745index e2d4e58..40cd045 100644
42746--- a/drivers/isdn/i4l/isdn_x25iface.c
42747+++ b/drivers/isdn/i4l/isdn_x25iface.c
42748@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
42749
42750
42751 static struct concap_proto_ops ix25_pops = {
42752- &isdn_x25iface_proto_new,
42753- &isdn_x25iface_proto_del,
42754- &isdn_x25iface_proto_restart,
42755- &isdn_x25iface_proto_close,
42756- &isdn_x25iface_xmit,
42757- &isdn_x25iface_receive,
42758- &isdn_x25iface_connect_ind,
42759- &isdn_x25iface_disconn_ind
42760+ .proto_new = &isdn_x25iface_proto_new,
42761+ .proto_del = &isdn_x25iface_proto_del,
42762+ .restart = &isdn_x25iface_proto_restart,
42763+ .close = &isdn_x25iface_proto_close,
42764+ .encap_and_xmit = &isdn_x25iface_xmit,
42765+ .data_ind = &isdn_x25iface_receive,
42766+ .connect_ind = &isdn_x25iface_connect_ind,
42767+ .disconn_ind = &isdn_x25iface_disconn_ind
42768 };
42769
42770 /* error message helper function */
42771diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
42772index 53d487f..f020f41 100644
42773--- a/drivers/isdn/icn/icn.c
42774+++ b/drivers/isdn/icn/icn.c
42775@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
42776 if (count > len)
42777 count = len;
42778 if (user) {
42779- if (copy_from_user(msg, buf, count))
42780+ if (count > sizeof msg || copy_from_user(msg, buf, count))
42781 return -EFAULT;
42782 } else
42783 memcpy(msg, buf, count);
42784diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
42785index a4f05c5..1433bc5 100644
42786--- a/drivers/isdn/mISDN/dsp_cmx.c
42787+++ b/drivers/isdn/mISDN/dsp_cmx.c
42788@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
42789 static u16 dsp_count; /* last sample count */
42790 static int dsp_count_valid; /* if we have last sample count */
42791
42792-void
42793+void __intentional_overflow(-1)
42794 dsp_cmx_send(void *arg)
42795 {
42796 struct dsp_conf *conf;
42797diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
42798index d93e245..e7ece6b 100644
42799--- a/drivers/leds/leds-clevo-mail.c
42800+++ b/drivers/leds/leds-clevo-mail.c
42801@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
42802 * detected as working, but in reality it is not) as low as
42803 * possible.
42804 */
42805-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
42806+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
42807 {
42808 .callback = clevo_mail_led_dmi_callback,
42809 .ident = "Clevo D410J",
42810diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
42811index 5b8f938..b73d657 100644
42812--- a/drivers/leds/leds-ss4200.c
42813+++ b/drivers/leds/leds-ss4200.c
42814@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
42815 * detected as working, but in reality it is not) as low as
42816 * possible.
42817 */
42818-static struct dmi_system_id nas_led_whitelist[] __initdata = {
42819+static struct dmi_system_id nas_led_whitelist[] __initconst = {
42820 {
42821 .callback = ss4200_led_dmi_callback,
42822 .ident = "Intel SS4200-E",
42823diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
42824index 0bf1e4e..b4bf44e 100644
42825--- a/drivers/lguest/core.c
42826+++ b/drivers/lguest/core.c
42827@@ -97,9 +97,17 @@ static __init int map_switcher(void)
42828 * The end address needs +1 because __get_vm_area allocates an
42829 * extra guard page, so we need space for that.
42830 */
42831+
42832+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
42833+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
42834+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
42835+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
42836+#else
42837 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
42838 VM_ALLOC, switcher_addr, switcher_addr
42839 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
42840+#endif
42841+
42842 if (!switcher_vma) {
42843 err = -ENOMEM;
42844 printk("lguest: could not map switcher pages high\n");
42845@@ -124,7 +132,7 @@ static __init int map_switcher(void)
42846 * Now the Switcher is mapped at the right address, we can't fail!
42847 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
42848 */
42849- memcpy(switcher_vma->addr, start_switcher_text,
42850+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
42851 end_switcher_text - start_switcher_text);
42852
42853 printk(KERN_INFO "lguest: mapped switcher at %p\n",
42854diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
42855index bfb39bb..08a603b 100644
42856--- a/drivers/lguest/page_tables.c
42857+++ b/drivers/lguest/page_tables.c
42858@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
42859 /*:*/
42860
42861 #ifdef CONFIG_X86_PAE
42862-static void release_pmd(pmd_t *spmd)
42863+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
42864 {
42865 /* If the entry's not present, there's nothing to release. */
42866 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
42867diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
42868index 922a1ac..9dd0c2a 100644
42869--- a/drivers/lguest/x86/core.c
42870+++ b/drivers/lguest/x86/core.c
42871@@ -59,7 +59,7 @@ static struct {
42872 /* Offset from where switcher.S was compiled to where we've copied it */
42873 static unsigned long switcher_offset(void)
42874 {
42875- return switcher_addr - (unsigned long)start_switcher_text;
42876+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
42877 }
42878
42879 /* This cpu's struct lguest_pages (after the Switcher text page) */
42880@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
42881 * These copies are pretty cheap, so we do them unconditionally: */
42882 /* Save the current Host top-level page directory.
42883 */
42884+
42885+#ifdef CONFIG_PAX_PER_CPU_PGD
42886+ pages->state.host_cr3 = read_cr3();
42887+#else
42888 pages->state.host_cr3 = __pa(current->mm->pgd);
42889+#endif
42890+
42891 /*
42892 * Set up the Guest's page tables to see this CPU's pages (and no
42893 * other CPU's pages).
42894@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
42895 * compiled-in switcher code and the high-mapped copy we just made.
42896 */
42897 for (i = 0; i < IDT_ENTRIES; i++)
42898- default_idt_entries[i] += switcher_offset();
42899+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
42900
42901 /*
42902 * Set up the Switcher's per-cpu areas.
42903@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
42904 * it will be undisturbed when we switch. To change %cs and jump we
42905 * need this structure to feed to Intel's "lcall" instruction.
42906 */
42907- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
42908+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
42909 lguest_entry.segment = LGUEST_CS;
42910
42911 /*
42912diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
42913index 40634b0..4f5855e 100644
42914--- a/drivers/lguest/x86/switcher_32.S
42915+++ b/drivers/lguest/x86/switcher_32.S
42916@@ -87,6 +87,7 @@
42917 #include <asm/page.h>
42918 #include <asm/segment.h>
42919 #include <asm/lguest.h>
42920+#include <asm/processor-flags.h>
42921
42922 // We mark the start of the code to copy
42923 // It's placed in .text tho it's never run here
42924@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
42925 // Changes type when we load it: damn Intel!
42926 // For after we switch over our page tables
42927 // That entry will be read-only: we'd crash.
42928+
42929+#ifdef CONFIG_PAX_KERNEXEC
42930+ mov %cr0, %edx
42931+ xor $X86_CR0_WP, %edx
42932+ mov %edx, %cr0
42933+#endif
42934+
42935 movl $(GDT_ENTRY_TSS*8), %edx
42936 ltr %dx
42937
42938@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
42939 // Let's clear it again for our return.
42940 // The GDT descriptor of the Host
42941 // Points to the table after two "size" bytes
42942- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
42943+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
42944 // Clear "used" from type field (byte 5, bit 2)
42945- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
42946+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
42947+
42948+#ifdef CONFIG_PAX_KERNEXEC
42949+ mov %cr0, %eax
42950+ xor $X86_CR0_WP, %eax
42951+ mov %eax, %cr0
42952+#endif
42953
42954 // Once our page table's switched, the Guest is live!
42955 // The Host fades as we run this final step.
42956@@ -295,13 +309,12 @@ deliver_to_host:
42957 // I consulted gcc, and it gave
42958 // These instructions, which I gladly credit:
42959 leal (%edx,%ebx,8), %eax
42960- movzwl (%eax),%edx
42961- movl 4(%eax), %eax
42962- xorw %ax, %ax
42963- orl %eax, %edx
42964+ movl 4(%eax), %edx
42965+ movw (%eax), %dx
42966 // Now the address of the handler's in %edx
42967 // We call it now: its "iret" drops us home.
42968- jmp *%edx
42969+ ljmp $__KERNEL_CS, $1f
42970+1: jmp *%edx
42971
42972 // Every interrupt can come to us here
42973 // But we must truly tell each apart.
42974diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
42975index 9762f1b..3e79734 100644
42976--- a/drivers/md/bcache/closure.h
42977+++ b/drivers/md/bcache/closure.h
42978@@ -483,7 +483,7 @@ static inline void closure_queue(struct closure *cl)
42979 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
42980 struct workqueue_struct *wq)
42981 {
42982- BUG_ON(object_is_on_stack(cl));
42983+ BUG_ON(object_starts_on_stack(cl));
42984 closure_set_ip(cl);
42985 cl->fn = fn;
42986 cl->wq = wq;
42987diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
42988index 12dc29b..1596277 100644
42989--- a/drivers/md/bitmap.c
42990+++ b/drivers/md/bitmap.c
42991@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
42992 chunk_kb ? "KB" : "B");
42993 if (bitmap->storage.file) {
42994 seq_printf(seq, ", file: ");
42995- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
42996+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
42997 }
42998
42999 seq_printf(seq, "\n");
43000diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
43001index 5152142..623d141 100644
43002--- a/drivers/md/dm-ioctl.c
43003+++ b/drivers/md/dm-ioctl.c
43004@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
43005 cmd == DM_LIST_VERSIONS_CMD)
43006 return 0;
43007
43008- if ((cmd == DM_DEV_CREATE_CMD)) {
43009+ if (cmd == DM_DEV_CREATE_CMD) {
43010 if (!*param->name) {
43011 DMWARN("name not supplied when creating device");
43012 return -EINVAL;
43013diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
43014index 9584443..9fc9ac9 100644
43015--- a/drivers/md/dm-raid1.c
43016+++ b/drivers/md/dm-raid1.c
43017@@ -40,7 +40,7 @@ enum dm_raid1_error {
43018
43019 struct mirror {
43020 struct mirror_set *ms;
43021- atomic_t error_count;
43022+ atomic_unchecked_t error_count;
43023 unsigned long error_type;
43024 struct dm_dev *dev;
43025 sector_t offset;
43026@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
43027 struct mirror *m;
43028
43029 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
43030- if (!atomic_read(&m->error_count))
43031+ if (!atomic_read_unchecked(&m->error_count))
43032 return m;
43033
43034 return NULL;
43035@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
43036 * simple way to tell if a device has encountered
43037 * errors.
43038 */
43039- atomic_inc(&m->error_count);
43040+ atomic_inc_unchecked(&m->error_count);
43041
43042 if (test_and_set_bit(error_type, &m->error_type))
43043 return;
43044@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
43045 struct mirror *m = get_default_mirror(ms);
43046
43047 do {
43048- if (likely(!atomic_read(&m->error_count)))
43049+ if (likely(!atomic_read_unchecked(&m->error_count)))
43050 return m;
43051
43052 if (m-- == ms->mirror)
43053@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
43054 {
43055 struct mirror *default_mirror = get_default_mirror(m->ms);
43056
43057- return !atomic_read(&default_mirror->error_count);
43058+ return !atomic_read_unchecked(&default_mirror->error_count);
43059 }
43060
43061 static int mirror_available(struct mirror_set *ms, struct bio *bio)
43062@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
43063 */
43064 if (likely(region_in_sync(ms, region, 1)))
43065 m = choose_mirror(ms, bio->bi_sector);
43066- else if (m && atomic_read(&m->error_count))
43067+ else if (m && atomic_read_unchecked(&m->error_count))
43068 m = NULL;
43069
43070 if (likely(m))
43071@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
43072 }
43073
43074 ms->mirror[mirror].ms = ms;
43075- atomic_set(&(ms->mirror[mirror].error_count), 0);
43076+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
43077 ms->mirror[mirror].error_type = 0;
43078 ms->mirror[mirror].offset = offset;
43079
43080@@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_target *ti)
43081 */
43082 static char device_status_char(struct mirror *m)
43083 {
43084- if (!atomic_read(&(m->error_count)))
43085+ if (!atomic_read_unchecked(&(m->error_count)))
43086 return 'A';
43087
43088 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
43089diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
43090index 28a9012..9c0f6a5 100644
43091--- a/drivers/md/dm-stats.c
43092+++ b/drivers/md/dm-stats.c
43093@@ -382,7 +382,7 @@ do_sync_free:
43094 synchronize_rcu_expedited();
43095 dm_stat_free(&s->rcu_head);
43096 } else {
43097- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
43098+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
43099 call_rcu(&s->rcu_head, dm_stat_free);
43100 }
43101 return 0;
43102@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
43103 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
43104 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
43105 ));
43106- ACCESS_ONCE(last->last_sector) = end_sector;
43107- ACCESS_ONCE(last->last_rw) = bi_rw;
43108+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
43109+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
43110 }
43111
43112 rcu_read_lock();
43113diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
43114index 73c1712..7347292 100644
43115--- a/drivers/md/dm-stripe.c
43116+++ b/drivers/md/dm-stripe.c
43117@@ -21,7 +21,7 @@ struct stripe {
43118 struct dm_dev *dev;
43119 sector_t physical_start;
43120
43121- atomic_t error_count;
43122+ atomic_unchecked_t error_count;
43123 };
43124
43125 struct stripe_c {
43126@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
43127 kfree(sc);
43128 return r;
43129 }
43130- atomic_set(&(sc->stripe[i].error_count), 0);
43131+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
43132 }
43133
43134 ti->private = sc;
43135@@ -327,7 +327,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
43136 DMEMIT("%d ", sc->stripes);
43137 for (i = 0; i < sc->stripes; i++) {
43138 DMEMIT("%s ", sc->stripe[i].dev->name);
43139- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
43140+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
43141 'D' : 'A';
43142 }
43143 buffer[i] = '\0';
43144@@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
43145 */
43146 for (i = 0; i < sc->stripes; i++)
43147 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
43148- atomic_inc(&(sc->stripe[i].error_count));
43149- if (atomic_read(&(sc->stripe[i].error_count)) <
43150+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
43151+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
43152 DM_IO_ERROR_THRESHOLD)
43153 schedule_work(&sc->trigger_event);
43154 }
43155diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
43156index 3ba6a38..b0fa9b0 100644
43157--- a/drivers/md/dm-table.c
43158+++ b/drivers/md/dm-table.c
43159@@ -291,7 +291,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
43160 static int open_dev(struct dm_dev_internal *d, dev_t dev,
43161 struct mapped_device *md)
43162 {
43163- static char *_claim_ptr = "I belong to device-mapper";
43164+ static char _claim_ptr[] = "I belong to device-mapper";
43165 struct block_device *bdev;
43166
43167 int r;
43168@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
43169 if (!dev_size)
43170 return 0;
43171
43172- if ((start >= dev_size) || (start + len > dev_size)) {
43173+ if ((start >= dev_size) || (len > dev_size - start)) {
43174 DMWARN("%s: %s too small for target: "
43175 "start=%llu, len=%llu, dev_size=%llu",
43176 dm_device_name(ti->table->md), bdevname(bdev, b),
43177diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
43178index 8a30ad5..72792d3 100644
43179--- a/drivers/md/dm-thin-metadata.c
43180+++ b/drivers/md/dm-thin-metadata.c
43181@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43182 {
43183 pmd->info.tm = pmd->tm;
43184 pmd->info.levels = 2;
43185- pmd->info.value_type.context = pmd->data_sm;
43186+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43187 pmd->info.value_type.size = sizeof(__le64);
43188 pmd->info.value_type.inc = data_block_inc;
43189 pmd->info.value_type.dec = data_block_dec;
43190@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43191
43192 pmd->bl_info.tm = pmd->tm;
43193 pmd->bl_info.levels = 1;
43194- pmd->bl_info.value_type.context = pmd->data_sm;
43195+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43196 pmd->bl_info.value_type.size = sizeof(__le64);
43197 pmd->bl_info.value_type.inc = data_block_inc;
43198 pmd->bl_info.value_type.dec = data_block_dec;
43199diff --git a/drivers/md/dm.c b/drivers/md/dm.c
43200index 0704c52..0a33d61 100644
43201--- a/drivers/md/dm.c
43202+++ b/drivers/md/dm.c
43203@@ -185,9 +185,9 @@ struct mapped_device {
43204 /*
43205 * Event handling.
43206 */
43207- atomic_t event_nr;
43208+ atomic_unchecked_t event_nr;
43209 wait_queue_head_t eventq;
43210- atomic_t uevent_seq;
43211+ atomic_unchecked_t uevent_seq;
43212 struct list_head uevent_list;
43213 spinlock_t uevent_lock; /* Protect access to uevent_list */
43214
43215@@ -2021,8 +2021,8 @@ static struct mapped_device *alloc_dev(int minor)
43216 spin_lock_init(&md->deferred_lock);
43217 atomic_set(&md->holders, 1);
43218 atomic_set(&md->open_count, 0);
43219- atomic_set(&md->event_nr, 0);
43220- atomic_set(&md->uevent_seq, 0);
43221+ atomic_set_unchecked(&md->event_nr, 0);
43222+ atomic_set_unchecked(&md->uevent_seq, 0);
43223 INIT_LIST_HEAD(&md->uevent_list);
43224 spin_lock_init(&md->uevent_lock);
43225
43226@@ -2175,7 +2175,7 @@ static void event_callback(void *context)
43227
43228 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
43229
43230- atomic_inc(&md->event_nr);
43231+ atomic_inc_unchecked(&md->event_nr);
43232 wake_up(&md->eventq);
43233 }
43234
43235@@ -2868,18 +2868,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
43236
43237 uint32_t dm_next_uevent_seq(struct mapped_device *md)
43238 {
43239- return atomic_add_return(1, &md->uevent_seq);
43240+ return atomic_add_return_unchecked(1, &md->uevent_seq);
43241 }
43242
43243 uint32_t dm_get_event_nr(struct mapped_device *md)
43244 {
43245- return atomic_read(&md->event_nr);
43246+ return atomic_read_unchecked(&md->event_nr);
43247 }
43248
43249 int dm_wait_event(struct mapped_device *md, int event_nr)
43250 {
43251 return wait_event_interruptible(md->eventq,
43252- (event_nr != atomic_read(&md->event_nr)));
43253+ (event_nr != atomic_read_unchecked(&md->event_nr)));
43254 }
43255
43256 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
43257diff --git a/drivers/md/md.c b/drivers/md/md.c
43258index 369d919..ba7049c 100644
43259--- a/drivers/md/md.c
43260+++ b/drivers/md/md.c
43261@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
43262 * start build, activate spare
43263 */
43264 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
43265-static atomic_t md_event_count;
43266+static atomic_unchecked_t md_event_count;
43267 void md_new_event(struct mddev *mddev)
43268 {
43269- atomic_inc(&md_event_count);
43270+ atomic_inc_unchecked(&md_event_count);
43271 wake_up(&md_event_waiters);
43272 }
43273 EXPORT_SYMBOL_GPL(md_new_event);
43274@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
43275 */
43276 static void md_new_event_inintr(struct mddev *mddev)
43277 {
43278- atomic_inc(&md_event_count);
43279+ atomic_inc_unchecked(&md_event_count);
43280 wake_up(&md_event_waiters);
43281 }
43282
43283@@ -1463,7 +1463,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
43284 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
43285 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
43286 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
43287- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43288+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43289
43290 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
43291 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
43292@@ -1710,7 +1710,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
43293 else
43294 sb->resync_offset = cpu_to_le64(0);
43295
43296- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
43297+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
43298
43299 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
43300 sb->size = cpu_to_le64(mddev->dev_sectors);
43301@@ -2715,7 +2715,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
43302 static ssize_t
43303 errors_show(struct md_rdev *rdev, char *page)
43304 {
43305- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
43306+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
43307 }
43308
43309 static ssize_t
43310@@ -2724,7 +2724,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
43311 char *e;
43312 unsigned long n = simple_strtoul(buf, &e, 10);
43313 if (*buf && (*e == 0 || *e == '\n')) {
43314- atomic_set(&rdev->corrected_errors, n);
43315+ atomic_set_unchecked(&rdev->corrected_errors, n);
43316 return len;
43317 }
43318 return -EINVAL;
43319@@ -3173,8 +3173,8 @@ int md_rdev_init(struct md_rdev *rdev)
43320 rdev->sb_loaded = 0;
43321 rdev->bb_page = NULL;
43322 atomic_set(&rdev->nr_pending, 0);
43323- atomic_set(&rdev->read_errors, 0);
43324- atomic_set(&rdev->corrected_errors, 0);
43325+ atomic_set_unchecked(&rdev->read_errors, 0);
43326+ atomic_set_unchecked(&rdev->corrected_errors, 0);
43327
43328 INIT_LIST_HEAD(&rdev->same_set);
43329 init_waitqueue_head(&rdev->blocked_wait);
43330@@ -7038,7 +7038,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
43331
43332 spin_unlock(&pers_lock);
43333 seq_printf(seq, "\n");
43334- seq->poll_event = atomic_read(&md_event_count);
43335+ seq->poll_event = atomic_read_unchecked(&md_event_count);
43336 return 0;
43337 }
43338 if (v == (void*)2) {
43339@@ -7141,7 +7141,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
43340 return error;
43341
43342 seq = file->private_data;
43343- seq->poll_event = atomic_read(&md_event_count);
43344+ seq->poll_event = atomic_read_unchecked(&md_event_count);
43345 return error;
43346 }
43347
43348@@ -7155,7 +7155,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
43349 /* always allow read */
43350 mask = POLLIN | POLLRDNORM;
43351
43352- if (seq->poll_event != atomic_read(&md_event_count))
43353+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
43354 mask |= POLLERR | POLLPRI;
43355 return mask;
43356 }
43357@@ -7199,7 +7199,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
43358 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
43359 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
43360 (int)part_stat_read(&disk->part0, sectors[1]) -
43361- atomic_read(&disk->sync_io);
43362+ atomic_read_unchecked(&disk->sync_io);
43363 /* sync IO will cause sync_io to increase before the disk_stats
43364 * as sync_io is counted when a request starts, and
43365 * disk_stats is counted when it completes.
43366diff --git a/drivers/md/md.h b/drivers/md/md.h
43367index 0095ec8..c89277a 100644
43368--- a/drivers/md/md.h
43369+++ b/drivers/md/md.h
43370@@ -94,13 +94,13 @@ struct md_rdev {
43371 * only maintained for arrays that
43372 * support hot removal
43373 */
43374- atomic_t read_errors; /* number of consecutive read errors that
43375+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
43376 * we have tried to ignore.
43377 */
43378 struct timespec last_read_error; /* monotonic time since our
43379 * last read error
43380 */
43381- atomic_t corrected_errors; /* number of corrected read errors,
43382+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
43383 * for reporting to userspace and storing
43384 * in superblock.
43385 */
43386@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
43387
43388 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
43389 {
43390- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
43391+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
43392 }
43393
43394 struct md_personality
43395diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
43396index 3e6d115..ffecdeb 100644
43397--- a/drivers/md/persistent-data/dm-space-map.h
43398+++ b/drivers/md/persistent-data/dm-space-map.h
43399@@ -71,6 +71,7 @@ struct dm_space_map {
43400 dm_sm_threshold_fn fn,
43401 void *context);
43402 };
43403+typedef struct dm_space_map __no_const dm_space_map_no_const;
43404
43405 /*----------------------------------------------------------------*/
43406
43407diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
43408index a49cfcc..20b9a65 100644
43409--- a/drivers/md/raid1.c
43410+++ b/drivers/md/raid1.c
43411@@ -1921,7 +1921,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
43412 if (r1_sync_page_io(rdev, sect, s,
43413 bio->bi_io_vec[idx].bv_page,
43414 READ) != 0)
43415- atomic_add(s, &rdev->corrected_errors);
43416+ atomic_add_unchecked(s, &rdev->corrected_errors);
43417 }
43418 sectors -= s;
43419 sect += s;
43420@@ -2148,7 +2148,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
43421 test_bit(In_sync, &rdev->flags)) {
43422 if (r1_sync_page_io(rdev, sect, s,
43423 conf->tmppage, READ)) {
43424- atomic_add(s, &rdev->corrected_errors);
43425+ atomic_add_unchecked(s, &rdev->corrected_errors);
43426 printk(KERN_INFO
43427 "md/raid1:%s: read error corrected "
43428 "(%d sectors at %llu on %s)\n",
43429diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
43430index 06eeb99..770613e 100644
43431--- a/drivers/md/raid10.c
43432+++ b/drivers/md/raid10.c
43433@@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bio, int error)
43434 /* The write handler will notice the lack of
43435 * R10BIO_Uptodate and record any errors etc
43436 */
43437- atomic_add(r10_bio->sectors,
43438+ atomic_add_unchecked(r10_bio->sectors,
43439 &conf->mirrors[d].rdev->corrected_errors);
43440
43441 /* for reconstruct, we always reschedule after a read.
43442@@ -2321,7 +2321,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
43443 {
43444 struct timespec cur_time_mon;
43445 unsigned long hours_since_last;
43446- unsigned int read_errors = atomic_read(&rdev->read_errors);
43447+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
43448
43449 ktime_get_ts(&cur_time_mon);
43450
43451@@ -2343,9 +2343,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
43452 * overflowing the shift of read_errors by hours_since_last.
43453 */
43454 if (hours_since_last >= 8 * sizeof(read_errors))
43455- atomic_set(&rdev->read_errors, 0);
43456+ atomic_set_unchecked(&rdev->read_errors, 0);
43457 else
43458- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
43459+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
43460 }
43461
43462 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
43463@@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
43464 return;
43465
43466 check_decay_read_errors(mddev, rdev);
43467- atomic_inc(&rdev->read_errors);
43468- if (atomic_read(&rdev->read_errors) > max_read_errors) {
43469+ atomic_inc_unchecked(&rdev->read_errors);
43470+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
43471 char b[BDEVNAME_SIZE];
43472 bdevname(rdev->bdev, b);
43473
43474@@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
43475 "md/raid10:%s: %s: Raid device exceeded "
43476 "read_error threshold [cur %d:max %d]\n",
43477 mdname(mddev), b,
43478- atomic_read(&rdev->read_errors), max_read_errors);
43479+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
43480 printk(KERN_NOTICE
43481 "md/raid10:%s: %s: Failing raid device\n",
43482 mdname(mddev), b);
43483@@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
43484 sect +
43485 choose_data_offset(r10_bio, rdev)),
43486 bdevname(rdev->bdev, b));
43487- atomic_add(s, &rdev->corrected_errors);
43488+ atomic_add_unchecked(s, &rdev->corrected_errors);
43489 }
43490
43491 rdev_dec_pending(rdev, mddev);
43492diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
43493index 03f82ab..374bb38 100644
43494--- a/drivers/md/raid5.c
43495+++ b/drivers/md/raid5.c
43496@@ -1991,21 +1991,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
43497 mdname(conf->mddev), STRIPE_SECTORS,
43498 (unsigned long long)s,
43499 bdevname(rdev->bdev, b));
43500- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
43501+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
43502 clear_bit(R5_ReadError, &sh->dev[i].flags);
43503 clear_bit(R5_ReWrite, &sh->dev[i].flags);
43504 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
43505 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
43506
43507- if (atomic_read(&rdev->read_errors))
43508- atomic_set(&rdev->read_errors, 0);
43509+ if (atomic_read_unchecked(&rdev->read_errors))
43510+ atomic_set_unchecked(&rdev->read_errors, 0);
43511 } else {
43512 const char *bdn = bdevname(rdev->bdev, b);
43513 int retry = 0;
43514 int set_bad = 0;
43515
43516 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
43517- atomic_inc(&rdev->read_errors);
43518+ atomic_inc_unchecked(&rdev->read_errors);
43519 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
43520 printk_ratelimited(
43521 KERN_WARNING
43522@@ -2033,7 +2033,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
43523 mdname(conf->mddev),
43524 (unsigned long long)s,
43525 bdn);
43526- } else if (atomic_read(&rdev->read_errors)
43527+ } else if (atomic_read_unchecked(&rdev->read_errors)
43528 > conf->max_nr_stripes)
43529 printk(KERN_WARNING
43530 "md/raid:%s: Too many read errors, failing device %s.\n",
43531diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
43532index 983db75..ef9248c 100644
43533--- a/drivers/media/dvb-core/dvbdev.c
43534+++ b/drivers/media/dvb-core/dvbdev.c
43535@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
43536 const struct dvb_device *template, void *priv, int type)
43537 {
43538 struct dvb_device *dvbdev;
43539- struct file_operations *dvbdevfops;
43540+ file_operations_no_const *dvbdevfops;
43541 struct device *clsdev;
43542 int minor;
43543 int id;
43544diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
43545index 9b6c3bb..baeb5c7 100644
43546--- a/drivers/media/dvb-frontends/dib3000.h
43547+++ b/drivers/media/dvb-frontends/dib3000.h
43548@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
43549 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
43550 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
43551 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
43552-};
43553+} __no_const;
43554
43555 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
43556 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
43557diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
43558index ed8cb90..5ef7f79 100644
43559--- a/drivers/media/pci/cx88/cx88-video.c
43560+++ b/drivers/media/pci/cx88/cx88-video.c
43561@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
43562
43563 /* ------------------------------------------------------------------ */
43564
43565-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43566-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43567-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43568+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43569+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43570+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43571
43572 module_param_array(video_nr, int, NULL, 0444);
43573 module_param_array(vbi_nr, int, NULL, 0444);
43574diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
43575index 802642d..5534900 100644
43576--- a/drivers/media/pci/ivtv/ivtv-driver.c
43577+++ b/drivers/media/pci/ivtv/ivtv-driver.c
43578@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
43579 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
43580
43581 /* ivtv instance counter */
43582-static atomic_t ivtv_instance = ATOMIC_INIT(0);
43583+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
43584
43585 /* Parameter declarations */
43586 static int cardtype[IVTV_MAX_CARDS];
43587diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
43588index dfd0a21..6bbb465 100644
43589--- a/drivers/media/platform/omap/omap_vout.c
43590+++ b/drivers/media/platform/omap/omap_vout.c
43591@@ -63,7 +63,6 @@ enum omap_vout_channels {
43592 OMAP_VIDEO2,
43593 };
43594
43595-static struct videobuf_queue_ops video_vbq_ops;
43596 /* Variables configurable through module params*/
43597 static u32 video1_numbuffers = 3;
43598 static u32 video2_numbuffers = 3;
43599@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
43600 {
43601 struct videobuf_queue *q;
43602 struct omap_vout_device *vout = NULL;
43603+ static struct videobuf_queue_ops video_vbq_ops = {
43604+ .buf_setup = omap_vout_buffer_setup,
43605+ .buf_prepare = omap_vout_buffer_prepare,
43606+ .buf_release = omap_vout_buffer_release,
43607+ .buf_queue = omap_vout_buffer_queue,
43608+ };
43609
43610 vout = video_drvdata(file);
43611 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
43612@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
43613 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
43614
43615 q = &vout->vbq;
43616- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
43617- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
43618- video_vbq_ops.buf_release = omap_vout_buffer_release;
43619- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
43620 spin_lock_init(&vout->vbq_lock);
43621
43622 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
43623diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
43624index fb2acc5..a2fcbdc4 100644
43625--- a/drivers/media/platform/s5p-tv/mixer.h
43626+++ b/drivers/media/platform/s5p-tv/mixer.h
43627@@ -156,7 +156,7 @@ struct mxr_layer {
43628 /** layer index (unique identifier) */
43629 int idx;
43630 /** callbacks for layer methods */
43631- struct mxr_layer_ops ops;
43632+ struct mxr_layer_ops *ops;
43633 /** format array */
43634 const struct mxr_format **fmt_array;
43635 /** size of format array */
43636diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
43637index 74344c7..a39e70e 100644
43638--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
43639+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
43640@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
43641 {
43642 struct mxr_layer *layer;
43643 int ret;
43644- struct mxr_layer_ops ops = {
43645+ static struct mxr_layer_ops ops = {
43646 .release = mxr_graph_layer_release,
43647 .buffer_set = mxr_graph_buffer_set,
43648 .stream_set = mxr_graph_stream_set,
43649diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
43650index b713403..53cb5ad 100644
43651--- a/drivers/media/platform/s5p-tv/mixer_reg.c
43652+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
43653@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
43654 layer->update_buf = next;
43655 }
43656
43657- layer->ops.buffer_set(layer, layer->update_buf);
43658+ layer->ops->buffer_set(layer, layer->update_buf);
43659
43660 if (done && done != layer->shadow_buf)
43661 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
43662diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
43663index 81b97db..b089ccd 100644
43664--- a/drivers/media/platform/s5p-tv/mixer_video.c
43665+++ b/drivers/media/platform/s5p-tv/mixer_video.c
43666@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
43667 layer->geo.src.height = layer->geo.src.full_height;
43668
43669 mxr_geometry_dump(mdev, &layer->geo);
43670- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43671+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43672 mxr_geometry_dump(mdev, &layer->geo);
43673 }
43674
43675@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
43676 layer->geo.dst.full_width = mbus_fmt.width;
43677 layer->geo.dst.full_height = mbus_fmt.height;
43678 layer->geo.dst.field = mbus_fmt.field;
43679- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43680+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43681
43682 mxr_geometry_dump(mdev, &layer->geo);
43683 }
43684@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
43685 /* set source size to highest accepted value */
43686 geo->src.full_width = max(geo->dst.full_width, pix->width);
43687 geo->src.full_height = max(geo->dst.full_height, pix->height);
43688- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43689+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43690 mxr_geometry_dump(mdev, &layer->geo);
43691 /* set cropping to total visible screen */
43692 geo->src.width = pix->width;
43693@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
43694 geo->src.x_offset = 0;
43695 geo->src.y_offset = 0;
43696 /* assure consistency of geometry */
43697- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
43698+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
43699 mxr_geometry_dump(mdev, &layer->geo);
43700 /* set full size to lowest possible value */
43701 geo->src.full_width = 0;
43702 geo->src.full_height = 0;
43703- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43704+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43705 mxr_geometry_dump(mdev, &layer->geo);
43706
43707 /* returning results */
43708@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
43709 target->width = s->r.width;
43710 target->height = s->r.height;
43711
43712- layer->ops.fix_geometry(layer, stage, s->flags);
43713+ layer->ops->fix_geometry(layer, stage, s->flags);
43714
43715 /* retrieve update selection rectangle */
43716 res.left = target->x_offset;
43717@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
43718 mxr_output_get(mdev);
43719
43720 mxr_layer_update_output(layer);
43721- layer->ops.format_set(layer);
43722+ layer->ops->format_set(layer);
43723 /* enabling layer in hardware */
43724 spin_lock_irqsave(&layer->enq_slock, flags);
43725 layer->state = MXR_LAYER_STREAMING;
43726 spin_unlock_irqrestore(&layer->enq_slock, flags);
43727
43728- layer->ops.stream_set(layer, MXR_ENABLE);
43729+ layer->ops->stream_set(layer, MXR_ENABLE);
43730 mxr_streamer_get(mdev);
43731
43732 return 0;
43733@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
43734 spin_unlock_irqrestore(&layer->enq_slock, flags);
43735
43736 /* disabling layer in hardware */
43737- layer->ops.stream_set(layer, MXR_DISABLE);
43738+ layer->ops->stream_set(layer, MXR_DISABLE);
43739 /* remove one streamer */
43740 mxr_streamer_put(mdev);
43741 /* allow changes in output configuration */
43742@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
43743
43744 void mxr_layer_release(struct mxr_layer *layer)
43745 {
43746- if (layer->ops.release)
43747- layer->ops.release(layer);
43748+ if (layer->ops->release)
43749+ layer->ops->release(layer);
43750 }
43751
43752 void mxr_base_layer_release(struct mxr_layer *layer)
43753@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
43754
43755 layer->mdev = mdev;
43756 layer->idx = idx;
43757- layer->ops = *ops;
43758+ layer->ops = ops;
43759
43760 spin_lock_init(&layer->enq_slock);
43761 INIT_LIST_HEAD(&layer->enq_list);
43762diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
43763index c9388c4..ce71ece 100644
43764--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
43765+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
43766@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
43767 {
43768 struct mxr_layer *layer;
43769 int ret;
43770- struct mxr_layer_ops ops = {
43771+ static struct mxr_layer_ops ops = {
43772 .release = mxr_vp_layer_release,
43773 .buffer_set = mxr_vp_buffer_set,
43774 .stream_set = mxr_vp_stream_set,
43775diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
43776index 2d4e73b..8b4d5b6 100644
43777--- a/drivers/media/platform/vivi.c
43778+++ b/drivers/media/platform/vivi.c
43779@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
43780 MODULE_LICENSE("Dual BSD/GPL");
43781 MODULE_VERSION(VIVI_VERSION);
43782
43783-static unsigned video_nr = -1;
43784-module_param(video_nr, uint, 0644);
43785+static int video_nr = -1;
43786+module_param(video_nr, int, 0644);
43787 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
43788
43789 static unsigned n_devs = 1;
43790diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
43791index 545c04c..a14bded 100644
43792--- a/drivers/media/radio/radio-cadet.c
43793+++ b/drivers/media/radio/radio-cadet.c
43794@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
43795 unsigned char readbuf[RDS_BUFFER];
43796 int i = 0;
43797
43798+ if (count > RDS_BUFFER)
43799+ return -EFAULT;
43800 mutex_lock(&dev->lock);
43801 if (dev->rdsstat == 0)
43802 cadet_start_rds(dev);
43803@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
43804 while (i < count && dev->rdsin != dev->rdsout)
43805 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
43806
43807- if (i && copy_to_user(data, readbuf, i))
43808+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
43809 i = -EFAULT;
43810 unlock:
43811 mutex_unlock(&dev->lock);
43812diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
43813index 5236035..c622c74 100644
43814--- a/drivers/media/radio/radio-maxiradio.c
43815+++ b/drivers/media/radio/radio-maxiradio.c
43816@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
43817 /* TEA5757 pin mappings */
43818 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
43819
43820-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
43821+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
43822
43823 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
43824 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
43825diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
43826index 050b3bb..79f62b9 100644
43827--- a/drivers/media/radio/radio-shark.c
43828+++ b/drivers/media/radio/radio-shark.c
43829@@ -79,7 +79,7 @@ struct shark_device {
43830 u32 last_val;
43831 };
43832
43833-static atomic_t shark_instance = ATOMIC_INIT(0);
43834+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
43835
43836 static void shark_write_val(struct snd_tea575x *tea, u32 val)
43837 {
43838diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
43839index 8654e0d..0608a64 100644
43840--- a/drivers/media/radio/radio-shark2.c
43841+++ b/drivers/media/radio/radio-shark2.c
43842@@ -74,7 +74,7 @@ struct shark_device {
43843 u8 *transfer_buffer;
43844 };
43845
43846-static atomic_t shark_instance = ATOMIC_INIT(0);
43847+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
43848
43849 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
43850 {
43851diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
43852index 2fd9009..278cc1e 100644
43853--- a/drivers/media/radio/radio-si476x.c
43854+++ b/drivers/media/radio/radio-si476x.c
43855@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
43856 struct si476x_radio *radio;
43857 struct v4l2_ctrl *ctrl;
43858
43859- static atomic_t instance = ATOMIC_INIT(0);
43860+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
43861
43862 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
43863 if (!radio)
43864diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
43865index 46da365..3ba4206 100644
43866--- a/drivers/media/rc/rc-main.c
43867+++ b/drivers/media/rc/rc-main.c
43868@@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
43869 int rc_register_device(struct rc_dev *dev)
43870 {
43871 static bool raw_init = false; /* raw decoders loaded? */
43872- static atomic_t devno = ATOMIC_INIT(0);
43873+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
43874 struct rc_map *rc_map;
43875 const char *path;
43876 int rc;
43877@@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *dev)
43878 */
43879 mutex_lock(&dev->lock);
43880
43881- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
43882+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
43883 dev_set_name(&dev->dev, "rc%ld", dev->devno);
43884 dev_set_drvdata(&dev->dev, dev);
43885 rc = device_add(&dev->dev);
43886diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
43887index 20e345d..da56fe4 100644
43888--- a/drivers/media/usb/dvb-usb/cxusb.c
43889+++ b/drivers/media/usb/dvb-usb/cxusb.c
43890@@ -1101,7 +1101,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
43891
43892 struct dib0700_adapter_state {
43893 int (*set_param_save) (struct dvb_frontend *);
43894-};
43895+} __no_const;
43896
43897 static int dib7070_set_param_override(struct dvb_frontend *fe)
43898 {
43899diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
43900index c1a63b2..dbcbfb6 100644
43901--- a/drivers/media/usb/dvb-usb/dw2102.c
43902+++ b/drivers/media/usb/dvb-usb/dw2102.c
43903@@ -121,7 +121,7 @@ struct su3000_state {
43904
43905 struct s6x0_state {
43906 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
43907-};
43908+} __no_const;
43909
43910 /* debug */
43911 static int dvb_usb_dw2102_debug;
43912diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43913index 8f7a6a4..eb0e1d4 100644
43914--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43915+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43916@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
43917 __u32 reserved;
43918 };
43919
43920-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43921+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43922 enum v4l2_memory memory)
43923 {
43924 void __user *up_pln;
43925@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43926 return 0;
43927 }
43928
43929-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43930+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43931 enum v4l2_memory memory)
43932 {
43933 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
43934@@ -425,7 +425,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
43935 * by passing a very big num_planes value */
43936 uplane = compat_alloc_user_space(num_planes *
43937 sizeof(struct v4l2_plane));
43938- kp->m.planes = uplane;
43939+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
43940
43941 while (--num_planes >= 0) {
43942 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
43943@@ -496,7 +496,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
43944 if (num_planes == 0)
43945 return 0;
43946
43947- uplane = kp->m.planes;
43948+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
43949 if (get_user(p, &up->m.planes))
43950 return -EFAULT;
43951 uplane32 = compat_ptr(p);
43952@@ -550,7 +550,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
43953 get_user(kp->capability, &up->capability) ||
43954 get_user(kp->flags, &up->flags))
43955 return -EFAULT;
43956- kp->base = compat_ptr(tmp);
43957+ kp->base = (void __force_kernel *)compat_ptr(tmp);
43958 get_v4l2_pix_format(&kp->fmt, &up->fmt);
43959 return 0;
43960 }
43961@@ -656,7 +656,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
43962 n * sizeof(struct v4l2_ext_control32)))
43963 return -EFAULT;
43964 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
43965- kp->controls = kcontrols;
43966+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
43967 while (--n >= 0) {
43968 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
43969 return -EFAULT;
43970@@ -678,7 +678,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
43971 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
43972 {
43973 struct v4l2_ext_control32 __user *ucontrols;
43974- struct v4l2_ext_control __user *kcontrols = kp->controls;
43975+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
43976 int n = kp->count;
43977 compat_caddr_t p;
43978
43979@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
43980 put_user(kp->start_block, &up->start_block) ||
43981 put_user(kp->blocks, &up->blocks) ||
43982 put_user(tmp, &up->edid) ||
43983- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
43984+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
43985 return -EFAULT;
43986 return 0;
43987 }
43988diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
43989index fb46790..ae1f8fa 100644
43990--- a/drivers/media/v4l2-core/v4l2-ctrls.c
43991+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
43992@@ -1396,8 +1396,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
43993 return 0;
43994
43995 case V4L2_CTRL_TYPE_STRING:
43996- len = strlen(c->string);
43997- if (len < ctrl->minimum)
43998+ len = strlen_user(c->string);
43999+ if (!len || len < ctrl->minimum)
44000 return -ERANGE;
44001 if ((len - ctrl->minimum) % ctrl->step)
44002 return -ERANGE;
44003diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
44004index 02d1b63..5fd6b16 100644
44005--- a/drivers/media/v4l2-core/v4l2-device.c
44006+++ b/drivers/media/v4l2-core/v4l2-device.c
44007@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
44008 EXPORT_SYMBOL_GPL(v4l2_device_put);
44009
44010 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
44011- atomic_t *instance)
44012+ atomic_unchecked_t *instance)
44013 {
44014- int num = atomic_inc_return(instance) - 1;
44015+ int num = atomic_inc_return_unchecked(instance) - 1;
44016 int len = strlen(basename);
44017
44018 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
44019diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
44020index 68e6b5e..8eb2aec 100644
44021--- a/drivers/media/v4l2-core/v4l2-ioctl.c
44022+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
44023@@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
44024 struct file *file, void *fh, void *p);
44025 } u;
44026 void (*debug)(const void *arg, bool write_only);
44027-};
44028+} __do_const;
44029+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
44030
44031 /* This control needs a priority check */
44032 #define INFO_FL_PRIO (1 << 0)
44033@@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file *file,
44034 struct video_device *vfd = video_devdata(file);
44035 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
44036 bool write_only = false;
44037- struct v4l2_ioctl_info default_info;
44038+ v4l2_ioctl_info_no_const default_info;
44039 const struct v4l2_ioctl_info *info;
44040 void *fh = file->private_data;
44041 struct v4l2_fh *vfh = NULL;
44042@@ -2194,7 +2195,7 @@ done:
44043 }
44044
44045 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44046- void * __user *user_ptr, void ***kernel_ptr)
44047+ void __user **user_ptr, void ***kernel_ptr)
44048 {
44049 int ret = 0;
44050
44051@@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44052 ret = -EINVAL;
44053 break;
44054 }
44055- *user_ptr = (void __user *)buf->m.planes;
44056+ *user_ptr = (void __force_user *)buf->m.planes;
44057 *kernel_ptr = (void *)&buf->m.planes;
44058 *array_size = sizeof(struct v4l2_plane) * buf->length;
44059 ret = 1;
44060@@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44061 ret = -EINVAL;
44062 break;
44063 }
44064- *user_ptr = (void __user *)ctrls->controls;
44065+ *user_ptr = (void __force_user *)ctrls->controls;
44066 *kernel_ptr = (void *)&ctrls->controls;
44067 *array_size = sizeof(struct v4l2_ext_control)
44068 * ctrls->count;
44069@@ -2340,7 +2341,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
44070 err = -ENOTTY;
44071
44072 if (has_array_args) {
44073- *kernel_ptr = user_ptr;
44074+ *kernel_ptr = (void __force_kernel *)user_ptr;
44075 if (copy_to_user(user_ptr, mbuf, array_size))
44076 err = -EFAULT;
44077 goto out_array_args;
44078diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
44079index 767ff4d..c69d259 100644
44080--- a/drivers/message/fusion/mptbase.c
44081+++ b/drivers/message/fusion/mptbase.c
44082@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44083 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
44084 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
44085
44086+#ifdef CONFIG_GRKERNSEC_HIDESYM
44087+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
44088+#else
44089 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
44090 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
44091+#endif
44092+
44093 /*
44094 * Rounding UP to nearest 4-kB boundary here...
44095 */
44096@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44097 ioc->facts.GlobalCredits);
44098
44099 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
44100+#ifdef CONFIG_GRKERNSEC_HIDESYM
44101+ NULL, NULL);
44102+#else
44103 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
44104+#endif
44105 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
44106 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
44107 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
44108diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
44109index dd239bd..689c4f7 100644
44110--- a/drivers/message/fusion/mptsas.c
44111+++ b/drivers/message/fusion/mptsas.c
44112@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
44113 return 0;
44114 }
44115
44116+static inline void
44117+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44118+{
44119+ if (phy_info->port_details) {
44120+ phy_info->port_details->rphy = rphy;
44121+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44122+ ioc->name, rphy));
44123+ }
44124+
44125+ if (rphy) {
44126+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44127+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44128+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44129+ ioc->name, rphy, rphy->dev.release));
44130+ }
44131+}
44132+
44133 /* no mutex */
44134 static void
44135 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
44136@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
44137 return NULL;
44138 }
44139
44140-static inline void
44141-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44142-{
44143- if (phy_info->port_details) {
44144- phy_info->port_details->rphy = rphy;
44145- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44146- ioc->name, rphy));
44147- }
44148-
44149- if (rphy) {
44150- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44151- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44152- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44153- ioc->name, rphy, rphy->dev.release));
44154- }
44155-}
44156-
44157 static inline struct sas_port *
44158 mptsas_get_port(struct mptsas_phyinfo *phy_info)
44159 {
44160diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
44161index 727819c..ad74694 100644
44162--- a/drivers/message/fusion/mptscsih.c
44163+++ b/drivers/message/fusion/mptscsih.c
44164@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
44165
44166 h = shost_priv(SChost);
44167
44168- if (h) {
44169- if (h->info_kbuf == NULL)
44170- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44171- return h->info_kbuf;
44172- h->info_kbuf[0] = '\0';
44173+ if (!h)
44174+ return NULL;
44175
44176- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44177- h->info_kbuf[size-1] = '\0';
44178- }
44179+ if (h->info_kbuf == NULL)
44180+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44181+ return h->info_kbuf;
44182+ h->info_kbuf[0] = '\0';
44183+
44184+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44185+ h->info_kbuf[size-1] = '\0';
44186
44187 return h->info_kbuf;
44188 }
44189diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
44190index b7d87cd..3fb36da 100644
44191--- a/drivers/message/i2o/i2o_proc.c
44192+++ b/drivers/message/i2o/i2o_proc.c
44193@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
44194 "Array Controller Device"
44195 };
44196
44197-static char *chtostr(char *tmp, u8 *chars, int n)
44198-{
44199- tmp[0] = 0;
44200- return strncat(tmp, (char *)chars, n);
44201-}
44202-
44203 static int i2o_report_query_status(struct seq_file *seq, int block_status,
44204 char *group)
44205 {
44206@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
44207 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
44208 {
44209 struct i2o_controller *c = (struct i2o_controller *)seq->private;
44210- static u32 work32[5];
44211- static u8 *work8 = (u8 *) work32;
44212- static u16 *work16 = (u16 *) work32;
44213+ u32 work32[5];
44214+ u8 *work8 = (u8 *) work32;
44215+ u16 *work16 = (u16 *) work32;
44216 int token;
44217 u32 hwcap;
44218
44219@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44220 } *result;
44221
44222 i2o_exec_execute_ddm_table ddm_table;
44223- char tmp[28 + 1];
44224
44225 result = kmalloc(sizeof(*result), GFP_KERNEL);
44226 if (!result)
44227@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44228
44229 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
44230 seq_printf(seq, "%-#8x", ddm_table.module_id);
44231- seq_printf(seq, "%-29s",
44232- chtostr(tmp, ddm_table.module_name_version, 28));
44233+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
44234 seq_printf(seq, "%9d ", ddm_table.data_size);
44235 seq_printf(seq, "%8d", ddm_table.code_size);
44236
44237@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44238
44239 i2o_driver_result_table *result;
44240 i2o_driver_store_table *dst;
44241- char tmp[28 + 1];
44242
44243 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
44244 if (result == NULL)
44245@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44246
44247 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
44248 seq_printf(seq, "%-#8x", dst->module_id);
44249- seq_printf(seq, "%-29s",
44250- chtostr(tmp, dst->module_name_version, 28));
44251- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
44252+ seq_printf(seq, "%-.28s", dst->module_name_version);
44253+ seq_printf(seq, "%-.8s", dst->date);
44254 seq_printf(seq, "%8d ", dst->module_size);
44255 seq_printf(seq, "%8d ", dst->mpb_size);
44256 seq_printf(seq, "0x%04x", dst->module_flags);
44257@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
44258 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44259 {
44260 struct i2o_device *d = (struct i2o_device *)seq->private;
44261- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44262+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44263 // == (allow) 512d bytes (max)
44264- static u16 *work16 = (u16 *) work32;
44265+ u16 *work16 = (u16 *) work32;
44266 int token;
44267- char tmp[16 + 1];
44268
44269 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
44270
44271@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44272 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
44273 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
44274 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
44275- seq_printf(seq, "Vendor info : %s\n",
44276- chtostr(tmp, (u8 *) (work32 + 2), 16));
44277- seq_printf(seq, "Product info : %s\n",
44278- chtostr(tmp, (u8 *) (work32 + 6), 16));
44279- seq_printf(seq, "Description : %s\n",
44280- chtostr(tmp, (u8 *) (work32 + 10), 16));
44281- seq_printf(seq, "Product rev. : %s\n",
44282- chtostr(tmp, (u8 *) (work32 + 14), 8));
44283+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
44284+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
44285+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
44286+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
44287
44288 seq_printf(seq, "Serial number : ");
44289 print_serial_number(seq, (u8 *) (work32 + 16),
44290@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44291 u8 pad[256]; // allow up to 256 byte (max) serial number
44292 } result;
44293
44294- char tmp[24 + 1];
44295-
44296 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
44297
44298 if (token < 0) {
44299@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44300 }
44301
44302 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
44303- seq_printf(seq, "Module name : %s\n",
44304- chtostr(tmp, result.module_name, 24));
44305- seq_printf(seq, "Module revision : %s\n",
44306- chtostr(tmp, result.module_rev, 8));
44307+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
44308+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
44309
44310 seq_printf(seq, "Serial number : ");
44311 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
44312@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44313 u8 instance_number[4];
44314 } result;
44315
44316- char tmp[64 + 1];
44317-
44318 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
44319
44320 if (token < 0) {
44321@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44322 return 0;
44323 }
44324
44325- seq_printf(seq, "Device name : %s\n",
44326- chtostr(tmp, result.device_name, 64));
44327- seq_printf(seq, "Service name : %s\n",
44328- chtostr(tmp, result.service_name, 64));
44329- seq_printf(seq, "Physical name : %s\n",
44330- chtostr(tmp, result.physical_location, 64));
44331- seq_printf(seq, "Instance number : %s\n",
44332- chtostr(tmp, result.instance_number, 4));
44333+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
44334+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
44335+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
44336+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
44337
44338 return 0;
44339 }
44340@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44341 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
44342 {
44343 struct i2o_device *d = (struct i2o_device *)seq->private;
44344- static u32 work32[12];
44345- static u16 *work16 = (u16 *) work32;
44346- static u8 *work8 = (u8 *) work32;
44347+ u32 work32[12];
44348+ u16 *work16 = (u16 *) work32;
44349+ u8 *work8 = (u8 *) work32;
44350 int token;
44351
44352 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
44353diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
44354index a8c08f3..155fe3d 100644
44355--- a/drivers/message/i2o/iop.c
44356+++ b/drivers/message/i2o/iop.c
44357@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
44358
44359 spin_lock_irqsave(&c->context_list_lock, flags);
44360
44361- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
44362- atomic_inc(&c->context_list_counter);
44363+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
44364+ atomic_inc_unchecked(&c->context_list_counter);
44365
44366- entry->context = atomic_read(&c->context_list_counter);
44367+ entry->context = atomic_read_unchecked(&c->context_list_counter);
44368
44369 list_add(&entry->list, &c->context_list);
44370
44371@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
44372
44373 #if BITS_PER_LONG == 64
44374 spin_lock_init(&c->context_list_lock);
44375- atomic_set(&c->context_list_counter, 0);
44376+ atomic_set_unchecked(&c->context_list_counter, 0);
44377 INIT_LIST_HEAD(&c->context_list);
44378 #endif
44379
44380diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
44381index fcbb2e9..2635e11 100644
44382--- a/drivers/mfd/janz-cmodio.c
44383+++ b/drivers/mfd/janz-cmodio.c
44384@@ -13,6 +13,7 @@
44385
44386 #include <linux/kernel.h>
44387 #include <linux/module.h>
44388+#include <linux/slab.h>
44389 #include <linux/init.h>
44390 #include <linux/pci.h>
44391 #include <linux/interrupt.h>
44392diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
44393index 176aa26..27811b2 100644
44394--- a/drivers/mfd/max8925-i2c.c
44395+++ b/drivers/mfd/max8925-i2c.c
44396@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
44397 const struct i2c_device_id *id)
44398 {
44399 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
44400- static struct max8925_chip *chip;
44401+ struct max8925_chip *chip;
44402 struct device_node *node = client->dev.of_node;
44403
44404 if (node && !pdata) {
44405diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
44406index c0f608e..286f8ec 100644
44407--- a/drivers/mfd/tps65910.c
44408+++ b/drivers/mfd/tps65910.c
44409@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
44410 struct tps65910_platform_data *pdata)
44411 {
44412 int ret = 0;
44413- static struct regmap_irq_chip *tps6591x_irqs_chip;
44414+ struct regmap_irq_chip *tps6591x_irqs_chip;
44415
44416 if (!irq) {
44417 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
44418diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
44419index 9aa6d1e..1631bfc 100644
44420--- a/drivers/mfd/twl4030-irq.c
44421+++ b/drivers/mfd/twl4030-irq.c
44422@@ -35,6 +35,7 @@
44423 #include <linux/of.h>
44424 #include <linux/irqdomain.h>
44425 #include <linux/i2c/twl.h>
44426+#include <asm/pgtable.h>
44427
44428 #include "twl-core.h"
44429
44430@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
44431 * Install an irq handler for each of the SIH modules;
44432 * clone dummy irq_chip since PIH can't *do* anything
44433 */
44434- twl4030_irq_chip = dummy_irq_chip;
44435- twl4030_irq_chip.name = "twl4030";
44436+ pax_open_kernel();
44437+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
44438+ *(const char **)&twl4030_irq_chip.name = "twl4030";
44439
44440- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
44441+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
44442+ pax_close_kernel();
44443
44444 for (i = irq_base; i < irq_end; i++) {
44445 irq_set_chip_and_handler(i, &twl4030_irq_chip,
44446diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
44447index 464419b..64bae8d 100644
44448--- a/drivers/misc/c2port/core.c
44449+++ b/drivers/misc/c2port/core.c
44450@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
44451 goto error_idr_alloc;
44452 c2dev->id = ret;
44453
44454- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
44455+ pax_open_kernel();
44456+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
44457+ pax_close_kernel();
44458
44459 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
44460 "c2port%d", c2dev->id);
44461diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
44462index 9c34e57..b981cda 100644
44463--- a/drivers/misc/eeprom/sunxi_sid.c
44464+++ b/drivers/misc/eeprom/sunxi_sid.c
44465@@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
44466
44467 platform_set_drvdata(pdev, sid_data);
44468
44469- sid_bin_attr.size = sid_data->keysize;
44470+ pax_open_kernel();
44471+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
44472+ pax_close_kernel();
44473 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
44474 return -ENODEV;
44475
44476diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
44477index 36f5d52..32311c3 100644
44478--- a/drivers/misc/kgdbts.c
44479+++ b/drivers/misc/kgdbts.c
44480@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
44481 char before[BREAK_INSTR_SIZE];
44482 char after[BREAK_INSTR_SIZE];
44483
44484- probe_kernel_read(before, (char *)kgdbts_break_test,
44485+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
44486 BREAK_INSTR_SIZE);
44487 init_simple_test();
44488 ts.tst = plant_and_detach_test;
44489@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
44490 /* Activate test with initial breakpoint */
44491 if (!is_early)
44492 kgdb_breakpoint();
44493- probe_kernel_read(after, (char *)kgdbts_break_test,
44494+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
44495 BREAK_INSTR_SIZE);
44496 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
44497 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
44498diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
44499index 036effe..b3a6336 100644
44500--- a/drivers/misc/lis3lv02d/lis3lv02d.c
44501+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
44502@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
44503 * the lid is closed. This leads to interrupts as soon as a little move
44504 * is done.
44505 */
44506- atomic_inc(&lis3->count);
44507+ atomic_inc_unchecked(&lis3->count);
44508
44509 wake_up_interruptible(&lis3->misc_wait);
44510 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
44511@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
44512 if (lis3->pm_dev)
44513 pm_runtime_get_sync(lis3->pm_dev);
44514
44515- atomic_set(&lis3->count, 0);
44516+ atomic_set_unchecked(&lis3->count, 0);
44517 return 0;
44518 }
44519
44520@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
44521 add_wait_queue(&lis3->misc_wait, &wait);
44522 while (true) {
44523 set_current_state(TASK_INTERRUPTIBLE);
44524- data = atomic_xchg(&lis3->count, 0);
44525+ data = atomic_xchg_unchecked(&lis3->count, 0);
44526 if (data)
44527 break;
44528
44529@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
44530 struct lis3lv02d, miscdev);
44531
44532 poll_wait(file, &lis3->misc_wait, wait);
44533- if (atomic_read(&lis3->count))
44534+ if (atomic_read_unchecked(&lis3->count))
44535 return POLLIN | POLLRDNORM;
44536 return 0;
44537 }
44538diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
44539index c439c82..1f20f57 100644
44540--- a/drivers/misc/lis3lv02d/lis3lv02d.h
44541+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
44542@@ -297,7 +297,7 @@ struct lis3lv02d {
44543 struct input_polled_dev *idev; /* input device */
44544 struct platform_device *pdev; /* platform device */
44545 struct regulator_bulk_data regulators[2];
44546- atomic_t count; /* interrupt count after last read */
44547+ atomic_unchecked_t count; /* interrupt count after last read */
44548 union axis_conversion ac; /* hw -> logical axis */
44549 int mapped_btns[3];
44550
44551diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
44552index 2f30bad..c4c13d0 100644
44553--- a/drivers/misc/sgi-gru/gruhandles.c
44554+++ b/drivers/misc/sgi-gru/gruhandles.c
44555@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
44556 unsigned long nsec;
44557
44558 nsec = CLKS2NSEC(clks);
44559- atomic_long_inc(&mcs_op_statistics[op].count);
44560- atomic_long_add(nsec, &mcs_op_statistics[op].total);
44561+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
44562+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
44563 if (mcs_op_statistics[op].max < nsec)
44564 mcs_op_statistics[op].max = nsec;
44565 }
44566diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
44567index 4f76359..cdfcb2e 100644
44568--- a/drivers/misc/sgi-gru/gruprocfs.c
44569+++ b/drivers/misc/sgi-gru/gruprocfs.c
44570@@ -32,9 +32,9 @@
44571
44572 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
44573
44574-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
44575+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
44576 {
44577- unsigned long val = atomic_long_read(v);
44578+ unsigned long val = atomic_long_read_unchecked(v);
44579
44580 seq_printf(s, "%16lu %s\n", val, id);
44581 }
44582@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
44583
44584 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
44585 for (op = 0; op < mcsop_last; op++) {
44586- count = atomic_long_read(&mcs_op_statistics[op].count);
44587- total = atomic_long_read(&mcs_op_statistics[op].total);
44588+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
44589+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
44590 max = mcs_op_statistics[op].max;
44591 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
44592 count ? total / count : 0, max);
44593diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
44594index 5c3ce24..4915ccb 100644
44595--- a/drivers/misc/sgi-gru/grutables.h
44596+++ b/drivers/misc/sgi-gru/grutables.h
44597@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
44598 * GRU statistics.
44599 */
44600 struct gru_stats_s {
44601- atomic_long_t vdata_alloc;
44602- atomic_long_t vdata_free;
44603- atomic_long_t gts_alloc;
44604- atomic_long_t gts_free;
44605- atomic_long_t gms_alloc;
44606- atomic_long_t gms_free;
44607- atomic_long_t gts_double_allocate;
44608- atomic_long_t assign_context;
44609- atomic_long_t assign_context_failed;
44610- atomic_long_t free_context;
44611- atomic_long_t load_user_context;
44612- atomic_long_t load_kernel_context;
44613- atomic_long_t lock_kernel_context;
44614- atomic_long_t unlock_kernel_context;
44615- atomic_long_t steal_user_context;
44616- atomic_long_t steal_kernel_context;
44617- atomic_long_t steal_context_failed;
44618- atomic_long_t nopfn;
44619- atomic_long_t asid_new;
44620- atomic_long_t asid_next;
44621- atomic_long_t asid_wrap;
44622- atomic_long_t asid_reuse;
44623- atomic_long_t intr;
44624- atomic_long_t intr_cbr;
44625- atomic_long_t intr_tfh;
44626- atomic_long_t intr_spurious;
44627- atomic_long_t intr_mm_lock_failed;
44628- atomic_long_t call_os;
44629- atomic_long_t call_os_wait_queue;
44630- atomic_long_t user_flush_tlb;
44631- atomic_long_t user_unload_context;
44632- atomic_long_t user_exception;
44633- atomic_long_t set_context_option;
44634- atomic_long_t check_context_retarget_intr;
44635- atomic_long_t check_context_unload;
44636- atomic_long_t tlb_dropin;
44637- atomic_long_t tlb_preload_page;
44638- atomic_long_t tlb_dropin_fail_no_asid;
44639- atomic_long_t tlb_dropin_fail_upm;
44640- atomic_long_t tlb_dropin_fail_invalid;
44641- atomic_long_t tlb_dropin_fail_range_active;
44642- atomic_long_t tlb_dropin_fail_idle;
44643- atomic_long_t tlb_dropin_fail_fmm;
44644- atomic_long_t tlb_dropin_fail_no_exception;
44645- atomic_long_t tfh_stale_on_fault;
44646- atomic_long_t mmu_invalidate_range;
44647- atomic_long_t mmu_invalidate_page;
44648- atomic_long_t flush_tlb;
44649- atomic_long_t flush_tlb_gru;
44650- atomic_long_t flush_tlb_gru_tgh;
44651- atomic_long_t flush_tlb_gru_zero_asid;
44652+ atomic_long_unchecked_t vdata_alloc;
44653+ atomic_long_unchecked_t vdata_free;
44654+ atomic_long_unchecked_t gts_alloc;
44655+ atomic_long_unchecked_t gts_free;
44656+ atomic_long_unchecked_t gms_alloc;
44657+ atomic_long_unchecked_t gms_free;
44658+ atomic_long_unchecked_t gts_double_allocate;
44659+ atomic_long_unchecked_t assign_context;
44660+ atomic_long_unchecked_t assign_context_failed;
44661+ atomic_long_unchecked_t free_context;
44662+ atomic_long_unchecked_t load_user_context;
44663+ atomic_long_unchecked_t load_kernel_context;
44664+ atomic_long_unchecked_t lock_kernel_context;
44665+ atomic_long_unchecked_t unlock_kernel_context;
44666+ atomic_long_unchecked_t steal_user_context;
44667+ atomic_long_unchecked_t steal_kernel_context;
44668+ atomic_long_unchecked_t steal_context_failed;
44669+ atomic_long_unchecked_t nopfn;
44670+ atomic_long_unchecked_t asid_new;
44671+ atomic_long_unchecked_t asid_next;
44672+ atomic_long_unchecked_t asid_wrap;
44673+ atomic_long_unchecked_t asid_reuse;
44674+ atomic_long_unchecked_t intr;
44675+ atomic_long_unchecked_t intr_cbr;
44676+ atomic_long_unchecked_t intr_tfh;
44677+ atomic_long_unchecked_t intr_spurious;
44678+ atomic_long_unchecked_t intr_mm_lock_failed;
44679+ atomic_long_unchecked_t call_os;
44680+ atomic_long_unchecked_t call_os_wait_queue;
44681+ atomic_long_unchecked_t user_flush_tlb;
44682+ atomic_long_unchecked_t user_unload_context;
44683+ atomic_long_unchecked_t user_exception;
44684+ atomic_long_unchecked_t set_context_option;
44685+ atomic_long_unchecked_t check_context_retarget_intr;
44686+ atomic_long_unchecked_t check_context_unload;
44687+ atomic_long_unchecked_t tlb_dropin;
44688+ atomic_long_unchecked_t tlb_preload_page;
44689+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
44690+ atomic_long_unchecked_t tlb_dropin_fail_upm;
44691+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
44692+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
44693+ atomic_long_unchecked_t tlb_dropin_fail_idle;
44694+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
44695+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
44696+ atomic_long_unchecked_t tfh_stale_on_fault;
44697+ atomic_long_unchecked_t mmu_invalidate_range;
44698+ atomic_long_unchecked_t mmu_invalidate_page;
44699+ atomic_long_unchecked_t flush_tlb;
44700+ atomic_long_unchecked_t flush_tlb_gru;
44701+ atomic_long_unchecked_t flush_tlb_gru_tgh;
44702+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
44703
44704- atomic_long_t copy_gpa;
44705- atomic_long_t read_gpa;
44706+ atomic_long_unchecked_t copy_gpa;
44707+ atomic_long_unchecked_t read_gpa;
44708
44709- atomic_long_t mesq_receive;
44710- atomic_long_t mesq_receive_none;
44711- atomic_long_t mesq_send;
44712- atomic_long_t mesq_send_failed;
44713- atomic_long_t mesq_noop;
44714- atomic_long_t mesq_send_unexpected_error;
44715- atomic_long_t mesq_send_lb_overflow;
44716- atomic_long_t mesq_send_qlimit_reached;
44717- atomic_long_t mesq_send_amo_nacked;
44718- atomic_long_t mesq_send_put_nacked;
44719- atomic_long_t mesq_page_overflow;
44720- atomic_long_t mesq_qf_locked;
44721- atomic_long_t mesq_qf_noop_not_full;
44722- atomic_long_t mesq_qf_switch_head_failed;
44723- atomic_long_t mesq_qf_unexpected_error;
44724- atomic_long_t mesq_noop_unexpected_error;
44725- atomic_long_t mesq_noop_lb_overflow;
44726- atomic_long_t mesq_noop_qlimit_reached;
44727- atomic_long_t mesq_noop_amo_nacked;
44728- atomic_long_t mesq_noop_put_nacked;
44729- atomic_long_t mesq_noop_page_overflow;
44730+ atomic_long_unchecked_t mesq_receive;
44731+ atomic_long_unchecked_t mesq_receive_none;
44732+ atomic_long_unchecked_t mesq_send;
44733+ atomic_long_unchecked_t mesq_send_failed;
44734+ atomic_long_unchecked_t mesq_noop;
44735+ atomic_long_unchecked_t mesq_send_unexpected_error;
44736+ atomic_long_unchecked_t mesq_send_lb_overflow;
44737+ atomic_long_unchecked_t mesq_send_qlimit_reached;
44738+ atomic_long_unchecked_t mesq_send_amo_nacked;
44739+ atomic_long_unchecked_t mesq_send_put_nacked;
44740+ atomic_long_unchecked_t mesq_page_overflow;
44741+ atomic_long_unchecked_t mesq_qf_locked;
44742+ atomic_long_unchecked_t mesq_qf_noop_not_full;
44743+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
44744+ atomic_long_unchecked_t mesq_qf_unexpected_error;
44745+ atomic_long_unchecked_t mesq_noop_unexpected_error;
44746+ atomic_long_unchecked_t mesq_noop_lb_overflow;
44747+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
44748+ atomic_long_unchecked_t mesq_noop_amo_nacked;
44749+ atomic_long_unchecked_t mesq_noop_put_nacked;
44750+ atomic_long_unchecked_t mesq_noop_page_overflow;
44751
44752 };
44753
44754@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
44755 tghop_invalidate, mcsop_last};
44756
44757 struct mcs_op_statistic {
44758- atomic_long_t count;
44759- atomic_long_t total;
44760+ atomic_long_unchecked_t count;
44761+ atomic_long_unchecked_t total;
44762 unsigned long max;
44763 };
44764
44765@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
44766
44767 #define STAT(id) do { \
44768 if (gru_options & OPT_STATS) \
44769- atomic_long_inc(&gru_stats.id); \
44770+ atomic_long_inc_unchecked(&gru_stats.id); \
44771 } while (0)
44772
44773 #ifdef CONFIG_SGI_GRU_DEBUG
44774diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
44775index c862cd4..0d176fe 100644
44776--- a/drivers/misc/sgi-xp/xp.h
44777+++ b/drivers/misc/sgi-xp/xp.h
44778@@ -288,7 +288,7 @@ struct xpc_interface {
44779 xpc_notify_func, void *);
44780 void (*received) (short, int, void *);
44781 enum xp_retval (*partid_to_nasids) (short, void *);
44782-};
44783+} __no_const;
44784
44785 extern struct xpc_interface xpc_interface;
44786
44787diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
44788index 01be66d..e3a0c7e 100644
44789--- a/drivers/misc/sgi-xp/xp_main.c
44790+++ b/drivers/misc/sgi-xp/xp_main.c
44791@@ -78,13 +78,13 @@ xpc_notloaded(void)
44792 }
44793
44794 struct xpc_interface xpc_interface = {
44795- (void (*)(int))xpc_notloaded,
44796- (void (*)(int))xpc_notloaded,
44797- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
44798- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
44799+ .connect = (void (*)(int))xpc_notloaded,
44800+ .disconnect = (void (*)(int))xpc_notloaded,
44801+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
44802+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
44803 void *))xpc_notloaded,
44804- (void (*)(short, int, void *))xpc_notloaded,
44805- (enum xp_retval(*)(short, void *))xpc_notloaded
44806+ .received = (void (*)(short, int, void *))xpc_notloaded,
44807+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
44808 };
44809 EXPORT_SYMBOL_GPL(xpc_interface);
44810
44811diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
44812index b94d5f7..7f494c5 100644
44813--- a/drivers/misc/sgi-xp/xpc.h
44814+++ b/drivers/misc/sgi-xp/xpc.h
44815@@ -835,6 +835,7 @@ struct xpc_arch_operations {
44816 void (*received_payload) (struct xpc_channel *, void *);
44817 void (*notify_senders_of_disconnect) (struct xpc_channel *);
44818 };
44819+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
44820
44821 /* struct xpc_partition act_state values (for XPC HB) */
44822
44823@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
44824 /* found in xpc_main.c */
44825 extern struct device *xpc_part;
44826 extern struct device *xpc_chan;
44827-extern struct xpc_arch_operations xpc_arch_ops;
44828+extern xpc_arch_operations_no_const xpc_arch_ops;
44829 extern int xpc_disengage_timelimit;
44830 extern int xpc_disengage_timedout;
44831 extern int xpc_activate_IRQ_rcvd;
44832diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
44833index 82dc574..8539ab2 100644
44834--- a/drivers/misc/sgi-xp/xpc_main.c
44835+++ b/drivers/misc/sgi-xp/xpc_main.c
44836@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
44837 .notifier_call = xpc_system_die,
44838 };
44839
44840-struct xpc_arch_operations xpc_arch_ops;
44841+xpc_arch_operations_no_const xpc_arch_ops;
44842
44843 /*
44844 * Timer function to enforce the timelimit on the partition disengage.
44845@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
44846
44847 if (((die_args->trapnr == X86_TRAP_MF) ||
44848 (die_args->trapnr == X86_TRAP_XF)) &&
44849- !user_mode_vm(die_args->regs))
44850+ !user_mode(die_args->regs))
44851 xpc_die_deactivate();
44852
44853 break;
44854diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
44855index 29d5d98..fea356f 100644
44856--- a/drivers/mmc/card/block.c
44857+++ b/drivers/mmc/card/block.c
44858@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
44859 if (idata->ic.postsleep_min_us)
44860 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
44861
44862- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
44863+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
44864 err = -EFAULT;
44865 goto cmd_rel_host;
44866 }
44867diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
44868index e5b5eeb..7bf2212 100644
44869--- a/drivers/mmc/core/mmc_ops.c
44870+++ b/drivers/mmc/core/mmc_ops.c
44871@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
44872 void *data_buf;
44873 int is_on_stack;
44874
44875- is_on_stack = object_is_on_stack(buf);
44876+ is_on_stack = object_starts_on_stack(buf);
44877 if (is_on_stack) {
44878 /*
44879 * dma onto stack is unsafe/nonportable, but callers to this
44880diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
44881index 6bf24ab..13d0293b 100644
44882--- a/drivers/mmc/host/dw_mmc.h
44883+++ b/drivers/mmc/host/dw_mmc.h
44884@@ -258,5 +258,5 @@ struct dw_mci_drv_data {
44885 int (*parse_dt)(struct dw_mci *host);
44886 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
44887 struct dw_mci_tuning_data *tuning_data);
44888-};
44889+} __do_const;
44890 #endif /* _DW_MMC_H_ */
44891diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
44892index f320579..7b7ebac 100644
44893--- a/drivers/mmc/host/mmci.c
44894+++ b/drivers/mmc/host/mmci.c
44895@@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device *dev,
44896 }
44897
44898 if (variant->busy_detect) {
44899- mmci_ops.card_busy = mmci_card_busy;
44900+ pax_open_kernel();
44901+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
44902+ pax_close_kernel();
44903 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
44904 }
44905
44906diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
44907index 1dcaf8a..025af25 100644
44908--- a/drivers/mmc/host/sdhci-esdhc-imx.c
44909+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
44910@@ -1009,9 +1009,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
44911 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
44912 }
44913
44914- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
44915- sdhci_esdhc_ops.platform_execute_tuning =
44916+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
44917+ pax_open_kernel();
44918+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
44919 esdhc_executing_tuning;
44920+ pax_close_kernel();
44921+ }
44922 boarddata = &imx_data->boarddata;
44923 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
44924 if (!host->mmc->parent->platform_data) {
44925diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
44926index 6debda9..2ba7427 100644
44927--- a/drivers/mmc/host/sdhci-s3c.c
44928+++ b/drivers/mmc/host/sdhci-s3c.c
44929@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
44930 * we can use overriding functions instead of default.
44931 */
44932 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
44933- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
44934- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
44935- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
44936+ pax_open_kernel();
44937+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
44938+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
44939+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
44940+ pax_close_kernel();
44941 }
44942
44943 /* It supports additional host capabilities if needed */
44944diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
44945index 096993f..f02c23b 100644
44946--- a/drivers/mtd/chips/cfi_cmdset_0020.c
44947+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
44948@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
44949 size_t totlen = 0, thislen;
44950 int ret = 0;
44951 size_t buflen = 0;
44952- static char *buffer;
44953+ char *buffer;
44954
44955 if (!ECCBUF_SIZE) {
44956 /* We should fall back to a general writev implementation.
44957diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
44958index 370b9dd..1a1176b 100644
44959--- a/drivers/mtd/nand/denali.c
44960+++ b/drivers/mtd/nand/denali.c
44961@@ -24,6 +24,7 @@
44962 #include <linux/slab.h>
44963 #include <linux/mtd/mtd.h>
44964 #include <linux/module.h>
44965+#include <linux/slab.h>
44966
44967 #include "denali.h"
44968
44969diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
44970index 51b9d6a..52af9a7 100644
44971--- a/drivers/mtd/nftlmount.c
44972+++ b/drivers/mtd/nftlmount.c
44973@@ -24,6 +24,7 @@
44974 #include <asm/errno.h>
44975 #include <linux/delay.h>
44976 #include <linux/slab.h>
44977+#include <linux/sched.h>
44978 #include <linux/mtd/mtd.h>
44979 #include <linux/mtd/nand.h>
44980 #include <linux/mtd/nftl.h>
44981diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
44982index 4b8e895..6b3c498 100644
44983--- a/drivers/mtd/sm_ftl.c
44984+++ b/drivers/mtd/sm_ftl.c
44985@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
44986 #define SM_CIS_VENDOR_OFFSET 0x59
44987 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
44988 {
44989- struct attribute_group *attr_group;
44990+ attribute_group_no_const *attr_group;
44991 struct attribute **attributes;
44992 struct sm_sysfs_attribute *vendor_attribute;
44993
44994diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
44995index 4b8c58b..a200546 100644
44996--- a/drivers/net/bonding/bond_main.c
44997+++ b/drivers/net/bonding/bond_main.c
44998@@ -4527,6 +4527,7 @@ static void __exit bonding_exit(void)
44999
45000 bond_netlink_fini();
45001 unregister_pernet_subsys(&bond_net_ops);
45002+ rtnl_link_unregister(&bond_link_ops);
45003
45004 #ifdef CONFIG_NET_POLL_CONTROLLER
45005 /*
45006diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
45007index 40e7b1c..6a70fff 100644
45008--- a/drivers/net/bonding/bond_netlink.c
45009+++ b/drivers/net/bonding/bond_netlink.c
45010@@ -102,7 +102,7 @@ nla_put_failure:
45011 return -EMSGSIZE;
45012 }
45013
45014-struct rtnl_link_ops bond_link_ops __read_mostly = {
45015+struct rtnl_link_ops bond_link_ops = {
45016 .kind = "bond",
45017 .priv_size = sizeof(struct bonding),
45018 .setup = bond_setup,
45019diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
45020index 36fa577..a158806 100644
45021--- a/drivers/net/ethernet/8390/ax88796.c
45022+++ b/drivers/net/ethernet/8390/ax88796.c
45023@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
45024 if (ax->plat->reg_offsets)
45025 ei_local->reg_offset = ax->plat->reg_offsets;
45026 else {
45027+ resource_size_t _mem_size = mem_size;
45028+ do_div(_mem_size, 0x18);
45029 ei_local->reg_offset = ax->reg_offsets;
45030 for (ret = 0; ret < 0x18; ret++)
45031- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
45032+ ax->reg_offsets[ret] = _mem_size * ret;
45033 }
45034
45035 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
45036diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45037index 41f3ca5a..1ee5364 100644
45038--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45039+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45040@@ -1139,7 +1139,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
45041 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
45042 {
45043 /* RX_MODE controlling object */
45044- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
45045+ bnx2x_init_rx_mode_obj(bp);
45046
45047 /* multicast configuration controlling object */
45048 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
45049diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45050index 18438a5..c923b8e 100644
45051--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45052+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45053@@ -2591,15 +2591,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
45054 return rc;
45055 }
45056
45057-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45058- struct bnx2x_rx_mode_obj *o)
45059+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
45060 {
45061 if (CHIP_IS_E1x(bp)) {
45062- o->wait_comp = bnx2x_empty_rx_mode_wait;
45063- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
45064+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
45065+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
45066 } else {
45067- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
45068- o->config_rx_mode = bnx2x_set_rx_mode_e2;
45069+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
45070+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
45071 }
45072 }
45073
45074diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45075index 6a53c15..6e7d1e7 100644
45076--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45077+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45078@@ -1332,8 +1332,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
45079
45080 /********************* RX MODE ****************/
45081
45082-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45083- struct bnx2x_rx_mode_obj *o);
45084+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
45085
45086 /**
45087 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
45088diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
45089index 5c3835a..d18b952 100644
45090--- a/drivers/net/ethernet/broadcom/tg3.h
45091+++ b/drivers/net/ethernet/broadcom/tg3.h
45092@@ -150,6 +150,7 @@
45093 #define CHIPREV_ID_5750_A0 0x4000
45094 #define CHIPREV_ID_5750_A1 0x4001
45095 #define CHIPREV_ID_5750_A3 0x4003
45096+#define CHIPREV_ID_5750_C1 0x4201
45097 #define CHIPREV_ID_5750_C2 0x4202
45098 #define CHIPREV_ID_5752_A0_HW 0x5000
45099 #define CHIPREV_ID_5752_A0 0x6000
45100diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
45101index 3ca77fa..fcc015f 100644
45102--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
45103+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
45104@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
45105 }
45106
45107 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
45108- bna_cb_ioceth_enable,
45109- bna_cb_ioceth_disable,
45110- bna_cb_ioceth_hbfail,
45111- bna_cb_ioceth_reset
45112+ .enable_cbfn = bna_cb_ioceth_enable,
45113+ .disable_cbfn = bna_cb_ioceth_disable,
45114+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
45115+ .reset_cbfn = bna_cb_ioceth_reset
45116 };
45117
45118 static void bna_attr_init(struct bna_ioceth *ioceth)
45119diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45120index 8cffcdf..aadf043 100644
45121--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45122+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45123@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
45124 */
45125 struct l2t_skb_cb {
45126 arp_failure_handler_func arp_failure_handler;
45127-};
45128+} __no_const;
45129
45130 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
45131
45132diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45133index fff02ed..d421412 100644
45134--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45135+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45136@@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
45137
45138 int i;
45139 struct adapter *ap = netdev2adap(dev);
45140- static const unsigned int *reg_ranges;
45141+ const unsigned int *reg_ranges;
45142 int arr_size = 0, buf_size = 0;
45143
45144 if (is_t4(ap->params.chip)) {
45145diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
45146index c05b66d..ed69872 100644
45147--- a/drivers/net/ethernet/dec/tulip/de4x5.c
45148+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
45149@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45150 for (i=0; i<ETH_ALEN; i++) {
45151 tmp.addr[i] = dev->dev_addr[i];
45152 }
45153- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45154+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45155 break;
45156
45157 case DE4X5_SET_HWADDR: /* Set the hardware address */
45158@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45159 spin_lock_irqsave(&lp->lock, flags);
45160 memcpy(&statbuf, &lp->pktStats, ioc->len);
45161 spin_unlock_irqrestore(&lp->lock, flags);
45162- if (copy_to_user(ioc->data, &statbuf, ioc->len))
45163+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
45164 return -EFAULT;
45165 break;
45166 }
45167diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
45168index a37039d..a51d7e8 100644
45169--- a/drivers/net/ethernet/emulex/benet/be_main.c
45170+++ b/drivers/net/ethernet/emulex/benet/be_main.c
45171@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
45172
45173 if (wrapped)
45174 newacc += 65536;
45175- ACCESS_ONCE(*acc) = newacc;
45176+ ACCESS_ONCE_RW(*acc) = newacc;
45177 }
45178
45179 static void populate_erx_stats(struct be_adapter *adapter,
45180diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
45181index 212f44b..fb69959 100644
45182--- a/drivers/net/ethernet/faraday/ftgmac100.c
45183+++ b/drivers/net/ethernet/faraday/ftgmac100.c
45184@@ -31,6 +31,8 @@
45185 #include <linux/netdevice.h>
45186 #include <linux/phy.h>
45187 #include <linux/platform_device.h>
45188+#include <linux/interrupt.h>
45189+#include <linux/irqreturn.h>
45190 #include <net/ip.h>
45191
45192 #include "ftgmac100.h"
45193diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
45194index 8be5b40..081bc1b 100644
45195--- a/drivers/net/ethernet/faraday/ftmac100.c
45196+++ b/drivers/net/ethernet/faraday/ftmac100.c
45197@@ -31,6 +31,8 @@
45198 #include <linux/module.h>
45199 #include <linux/netdevice.h>
45200 #include <linux/platform_device.h>
45201+#include <linux/interrupt.h>
45202+#include <linux/irqreturn.h>
45203
45204 #include "ftmac100.h"
45205
45206diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45207index 5184e2a..acb28c3 100644
45208--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45209+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45210@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
45211 }
45212
45213 /* update the base incval used to calculate frequency adjustment */
45214- ACCESS_ONCE(adapter->base_incval) = incval;
45215+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
45216 smp_mb();
45217
45218 /* need lock to prevent incorrect read while modifying cyclecounter */
45219diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45220index fbe5363..266b4e3 100644
45221--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
45222+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45223@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45224 struct __vxge_hw_fifo *fifo;
45225 struct vxge_hw_fifo_config *config;
45226 u32 txdl_size, txdl_per_memblock;
45227- struct vxge_hw_mempool_cbs fifo_mp_callback;
45228+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
45229+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
45230+ };
45231+
45232 struct __vxge_hw_virtualpath *vpath;
45233
45234 if ((vp == NULL) || (attr == NULL)) {
45235@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45236 goto exit;
45237 }
45238
45239- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
45240-
45241 fifo->mempool =
45242 __vxge_hw_mempool_create(vpath->hldev,
45243 fifo->config->memblock_size,
45244diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45245index 918e18d..4ca3650 100644
45246--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45247+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45248@@ -2086,7 +2086,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
45249 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
45250 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
45251 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
45252- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45253+ pax_open_kernel();
45254+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45255+ pax_close_kernel();
45256 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45257 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
45258 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
45259diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45260index 734d286..b017bf5 100644
45261--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45262+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45263@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
45264 case QLCNIC_NON_PRIV_FUNC:
45265 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
45266 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45267- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45268+ pax_open_kernel();
45269+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45270+ pax_close_kernel();
45271 break;
45272 case QLCNIC_PRIV_FUNC:
45273 ahw->op_mode = QLCNIC_PRIV_FUNC;
45274 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
45275- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45276+ pax_open_kernel();
45277+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45278+ pax_close_kernel();
45279 break;
45280 case QLCNIC_MGMT_FUNC:
45281 ahw->op_mode = QLCNIC_MGMT_FUNC;
45282 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45283- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45284+ pax_open_kernel();
45285+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45286+ pax_close_kernel();
45287 break;
45288 default:
45289 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
45290diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45291index 7763962..c3499a7 100644
45292--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45293+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45294@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
45295 struct qlcnic_dump_entry *entry;
45296 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
45297 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
45298- static const struct qlcnic_dump_operations *fw_dump_ops;
45299+ const struct qlcnic_dump_operations *fw_dump_ops;
45300 struct device *dev = &adapter->pdev->dev;
45301 struct qlcnic_hardware_context *ahw;
45302 void *temp_buffer;
45303diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
45304index c737f0e..32b8682 100644
45305--- a/drivers/net/ethernet/realtek/r8169.c
45306+++ b/drivers/net/ethernet/realtek/r8169.c
45307@@ -759,22 +759,22 @@ struct rtl8169_private {
45308 struct mdio_ops {
45309 void (*write)(struct rtl8169_private *, int, int);
45310 int (*read)(struct rtl8169_private *, int);
45311- } mdio_ops;
45312+ } __no_const mdio_ops;
45313
45314 struct pll_power_ops {
45315 void (*down)(struct rtl8169_private *);
45316 void (*up)(struct rtl8169_private *);
45317- } pll_power_ops;
45318+ } __no_const pll_power_ops;
45319
45320 struct jumbo_ops {
45321 void (*enable)(struct rtl8169_private *);
45322 void (*disable)(struct rtl8169_private *);
45323- } jumbo_ops;
45324+ } __no_const jumbo_ops;
45325
45326 struct csi_ops {
45327 void (*write)(struct rtl8169_private *, int, int);
45328 u32 (*read)(struct rtl8169_private *, int);
45329- } csi_ops;
45330+ } __no_const csi_ops;
45331
45332 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
45333 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
45334diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
45335index 3dd39dc..85efa46 100644
45336--- a/drivers/net/ethernet/sfc/ptp.c
45337+++ b/drivers/net/ethernet/sfc/ptp.c
45338@@ -541,7 +541,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
45339 ptp->start.dma_addr);
45340
45341 /* Clear flag that signals MC ready */
45342- ACCESS_ONCE(*start) = 0;
45343+ ACCESS_ONCE_RW(*start) = 0;
45344 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
45345 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
45346 EFX_BUG_ON_PARANOID(rc);
45347diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45348index 50617c5..b13724c 100644
45349--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45350+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45351@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
45352
45353 writel(value, ioaddr + MMC_CNTRL);
45354
45355- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
45356- MMC_CNTRL, value);
45357+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
45358+// MMC_CNTRL, value);
45359 }
45360
45361 /* To mask all all interrupts.*/
45362diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
45363index e6fe0d8..2b7d752 100644
45364--- a/drivers/net/hyperv/hyperv_net.h
45365+++ b/drivers/net/hyperv/hyperv_net.h
45366@@ -101,7 +101,7 @@ struct rndis_device {
45367
45368 enum rndis_device_state state;
45369 bool link_state;
45370- atomic_t new_req_id;
45371+ atomic_unchecked_t new_req_id;
45372
45373 spinlock_t request_lock;
45374 struct list_head req_list;
45375diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
45376index 0775f0a..d4fb316 100644
45377--- a/drivers/net/hyperv/rndis_filter.c
45378+++ b/drivers/net/hyperv/rndis_filter.c
45379@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
45380 * template
45381 */
45382 set = &rndis_msg->msg.set_req;
45383- set->req_id = atomic_inc_return(&dev->new_req_id);
45384+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
45385
45386 /* Add to the request list */
45387 spin_lock_irqsave(&dev->request_lock, flags);
45388@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
45389
45390 /* Setup the rndis set */
45391 halt = &request->request_msg.msg.halt_req;
45392- halt->req_id = atomic_inc_return(&dev->new_req_id);
45393+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
45394
45395 /* Ignore return since this msg is optional. */
45396 rndis_filter_send_request(dev, request);
45397diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
45398index bf0d55e..82bcfbd1 100644
45399--- a/drivers/net/ieee802154/fakehard.c
45400+++ b/drivers/net/ieee802154/fakehard.c
45401@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
45402 phy->transmit_power = 0xbf;
45403
45404 dev->netdev_ops = &fake_ops;
45405- dev->ml_priv = &fake_mlme;
45406+ dev->ml_priv = (void *)&fake_mlme;
45407
45408 priv = netdev_priv(dev);
45409 priv->phy = phy;
45410diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
45411index bc8faae..e51e25d 100644
45412--- a/drivers/net/macvlan.c
45413+++ b/drivers/net/macvlan.c
45414@@ -990,13 +990,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
45415 int macvlan_link_register(struct rtnl_link_ops *ops)
45416 {
45417 /* common fields */
45418- ops->priv_size = sizeof(struct macvlan_dev);
45419- ops->validate = macvlan_validate;
45420- ops->maxtype = IFLA_MACVLAN_MAX;
45421- ops->policy = macvlan_policy;
45422- ops->changelink = macvlan_changelink;
45423- ops->get_size = macvlan_get_size;
45424- ops->fill_info = macvlan_fill_info;
45425+ pax_open_kernel();
45426+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
45427+ *(void **)&ops->validate = macvlan_validate;
45428+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
45429+ *(const void **)&ops->policy = macvlan_policy;
45430+ *(void **)&ops->changelink = macvlan_changelink;
45431+ *(void **)&ops->get_size = macvlan_get_size;
45432+ *(void **)&ops->fill_info = macvlan_fill_info;
45433+ pax_close_kernel();
45434
45435 return rtnl_link_register(ops);
45436 };
45437@@ -1051,7 +1053,7 @@ static int macvlan_device_event(struct notifier_block *unused,
45438 return NOTIFY_DONE;
45439 }
45440
45441-static struct notifier_block macvlan_notifier_block __read_mostly = {
45442+static struct notifier_block macvlan_notifier_block = {
45443 .notifier_call = macvlan_device_event,
45444 };
45445
45446diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
45447index 2a89da0..c17fe1d 100644
45448--- a/drivers/net/macvtap.c
45449+++ b/drivers/net/macvtap.c
45450@@ -1012,7 +1012,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
45451 }
45452
45453 ret = 0;
45454- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
45455+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
45456 put_user(q->flags, &ifr->ifr_flags))
45457 ret = -EFAULT;
45458 macvtap_put_vlan(vlan);
45459@@ -1182,7 +1182,7 @@ static int macvtap_device_event(struct notifier_block *unused,
45460 return NOTIFY_DONE;
45461 }
45462
45463-static struct notifier_block macvtap_notifier_block __read_mostly = {
45464+static struct notifier_block macvtap_notifier_block = {
45465 .notifier_call = macvtap_device_event,
45466 };
45467
45468diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
45469index daec9b0..6428fcb 100644
45470--- a/drivers/net/phy/mdio-bitbang.c
45471+++ b/drivers/net/phy/mdio-bitbang.c
45472@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
45473 struct mdiobb_ctrl *ctrl = bus->priv;
45474
45475 module_put(ctrl->ops->owner);
45476+ mdiobus_unregister(bus);
45477 mdiobus_free(bus);
45478 }
45479 EXPORT_SYMBOL(free_mdio_bitbang);
45480diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
45481index 72ff14b..11d442d 100644
45482--- a/drivers/net/ppp/ppp_generic.c
45483+++ b/drivers/net/ppp/ppp_generic.c
45484@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
45485 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
45486 struct ppp_stats stats;
45487 struct ppp_comp_stats cstats;
45488- char *vers;
45489
45490 switch (cmd) {
45491 case SIOCGPPPSTATS:
45492@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
45493 break;
45494
45495 case SIOCGPPPVER:
45496- vers = PPP_VERSION;
45497- if (copy_to_user(addr, vers, strlen(vers) + 1))
45498+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
45499 break;
45500 err = 0;
45501 break;
45502diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
45503index 1252d9c..80e660b 100644
45504--- a/drivers/net/slip/slhc.c
45505+++ b/drivers/net/slip/slhc.c
45506@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
45507 register struct tcphdr *thp;
45508 register struct iphdr *ip;
45509 register struct cstate *cs;
45510- int len, hdrlen;
45511+ long len, hdrlen;
45512 unsigned char *cp = icp;
45513
45514 /* We've got a compressed packet; read the change byte */
45515diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
45516index b75ae5b..953c157 100644
45517--- a/drivers/net/team/team.c
45518+++ b/drivers/net/team/team.c
45519@@ -2865,7 +2865,7 @@ static int team_device_event(struct notifier_block *unused,
45520 return NOTIFY_DONE;
45521 }
45522
45523-static struct notifier_block team_notifier_block __read_mostly = {
45524+static struct notifier_block team_notifier_block = {
45525 .notifier_call = team_device_event,
45526 };
45527
45528diff --git a/drivers/net/tun.c b/drivers/net/tun.c
45529index ecec802..614f08f 100644
45530--- a/drivers/net/tun.c
45531+++ b/drivers/net/tun.c
45532@@ -1839,7 +1839,7 @@ unlock:
45533 }
45534
45535 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
45536- unsigned long arg, int ifreq_len)
45537+ unsigned long arg, size_t ifreq_len)
45538 {
45539 struct tun_file *tfile = file->private_data;
45540 struct tun_struct *tun;
45541@@ -1852,6 +1852,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
45542 unsigned int ifindex;
45543 int ret;
45544
45545+ if (ifreq_len > sizeof ifr)
45546+ return -EFAULT;
45547+
45548 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
45549 if (copy_from_user(&ifr, argp, ifreq_len))
45550 return -EFAULT;
45551diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
45552index 1a48234..a555339 100644
45553--- a/drivers/net/usb/hso.c
45554+++ b/drivers/net/usb/hso.c
45555@@ -71,7 +71,7 @@
45556 #include <asm/byteorder.h>
45557 #include <linux/serial_core.h>
45558 #include <linux/serial.h>
45559-
45560+#include <asm/local.h>
45561
45562 #define MOD_AUTHOR "Option Wireless"
45563 #define MOD_DESCRIPTION "USB High Speed Option driver"
45564@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
45565 struct urb *urb;
45566
45567 urb = serial->rx_urb[0];
45568- if (serial->port.count > 0) {
45569+ if (atomic_read(&serial->port.count) > 0) {
45570 count = put_rxbuf_data(urb, serial);
45571 if (count == -1)
45572 return;
45573@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
45574 DUMP1(urb->transfer_buffer, urb->actual_length);
45575
45576 /* Anyone listening? */
45577- if (serial->port.count == 0)
45578+ if (atomic_read(&serial->port.count) == 0)
45579 return;
45580
45581 if (status == 0) {
45582@@ -1297,8 +1297,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
45583 tty_port_tty_set(&serial->port, tty);
45584
45585 /* check for port already opened, if not set the termios */
45586- serial->port.count++;
45587- if (serial->port.count == 1) {
45588+ if (atomic_inc_return(&serial->port.count) == 1) {
45589 serial->rx_state = RX_IDLE;
45590 /* Force default termio settings */
45591 _hso_serial_set_termios(tty, NULL);
45592@@ -1310,7 +1309,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
45593 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
45594 if (result) {
45595 hso_stop_serial_device(serial->parent);
45596- serial->port.count--;
45597+ atomic_dec(&serial->port.count);
45598 kref_put(&serial->parent->ref, hso_serial_ref_free);
45599 }
45600 } else {
45601@@ -1347,10 +1346,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
45602
45603 /* reset the rts and dtr */
45604 /* do the actual close */
45605- serial->port.count--;
45606+ atomic_dec(&serial->port.count);
45607
45608- if (serial->port.count <= 0) {
45609- serial->port.count = 0;
45610+ if (atomic_read(&serial->port.count) <= 0) {
45611+ atomic_set(&serial->port.count, 0);
45612 tty_port_tty_set(&serial->port, NULL);
45613 if (!usb_gone)
45614 hso_stop_serial_device(serial->parent);
45615@@ -1426,7 +1425,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
45616
45617 /* the actual setup */
45618 spin_lock_irqsave(&serial->serial_lock, flags);
45619- if (serial->port.count)
45620+ if (atomic_read(&serial->port.count))
45621 _hso_serial_set_termios(tty, old);
45622 else
45623 tty->termios = *old;
45624@@ -1895,7 +1894,7 @@ static void intr_callback(struct urb *urb)
45625 D1("Pending read interrupt on port %d\n", i);
45626 spin_lock(&serial->serial_lock);
45627 if (serial->rx_state == RX_IDLE &&
45628- serial->port.count > 0) {
45629+ atomic_read(&serial->port.count) > 0) {
45630 /* Setup and send a ctrl req read on
45631 * port i */
45632 if (!serial->rx_urb_filled[0]) {
45633@@ -3071,7 +3070,7 @@ static int hso_resume(struct usb_interface *iface)
45634 /* Start all serial ports */
45635 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
45636 if (serial_table[i] && (serial_table[i]->interface == iface)) {
45637- if (dev2ser(serial_table[i])->port.count) {
45638+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
45639 result =
45640 hso_start_serial_device(serial_table[i], GFP_NOIO);
45641 hso_kick_transmit(dev2ser(serial_table[i]));
45642diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
45643index a79e9d3..78cd4fa 100644
45644--- a/drivers/net/usb/sierra_net.c
45645+++ b/drivers/net/usb/sierra_net.c
45646@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
45647 /* atomic counter partially included in MAC address to make sure 2 devices
45648 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
45649 */
45650-static atomic_t iface_counter = ATOMIC_INIT(0);
45651+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
45652
45653 /*
45654 * SYNC Timer Delay definition used to set the expiry time
45655@@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
45656 dev->net->netdev_ops = &sierra_net_device_ops;
45657
45658 /* change MAC addr to include, ifacenum, and to be unique */
45659- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
45660+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
45661 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
45662
45663 /* we will have to manufacture ethernet headers, prepare template */
45664diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
45665index 0247973..088193a 100644
45666--- a/drivers/net/vxlan.c
45667+++ b/drivers/net/vxlan.c
45668@@ -2615,7 +2615,7 @@ nla_put_failure:
45669 return -EMSGSIZE;
45670 }
45671
45672-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
45673+static struct rtnl_link_ops vxlan_link_ops = {
45674 .kind = "vxlan",
45675 .maxtype = IFLA_VXLAN_MAX,
45676 .policy = vxlan_policy,
45677diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
45678index 5920c99..ff2e4a5 100644
45679--- a/drivers/net/wan/lmc/lmc_media.c
45680+++ b/drivers/net/wan/lmc/lmc_media.c
45681@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
45682 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
45683
45684 lmc_media_t lmc_ds3_media = {
45685- lmc_ds3_init, /* special media init stuff */
45686- lmc_ds3_default, /* reset to default state */
45687- lmc_ds3_set_status, /* reset status to state provided */
45688- lmc_dummy_set_1, /* set clock source */
45689- lmc_dummy_set2_1, /* set line speed */
45690- lmc_ds3_set_100ft, /* set cable length */
45691- lmc_ds3_set_scram, /* set scrambler */
45692- lmc_ds3_get_link_status, /* get link status */
45693- lmc_dummy_set_1, /* set link status */
45694- lmc_ds3_set_crc_length, /* set CRC length */
45695- lmc_dummy_set_1, /* set T1 or E1 circuit type */
45696- lmc_ds3_watchdog
45697+ .init = lmc_ds3_init, /* special media init stuff */
45698+ .defaults = lmc_ds3_default, /* reset to default state */
45699+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
45700+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
45701+ .set_speed = lmc_dummy_set2_1, /* set line speed */
45702+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
45703+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
45704+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
45705+ .set_link_status = lmc_dummy_set_1, /* set link status */
45706+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
45707+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
45708+ .watchdog = lmc_ds3_watchdog
45709 };
45710
45711 lmc_media_t lmc_hssi_media = {
45712- lmc_hssi_init, /* special media init stuff */
45713- lmc_hssi_default, /* reset to default state */
45714- lmc_hssi_set_status, /* reset status to state provided */
45715- lmc_hssi_set_clock, /* set clock source */
45716- lmc_dummy_set2_1, /* set line speed */
45717- lmc_dummy_set_1, /* set cable length */
45718- lmc_dummy_set_1, /* set scrambler */
45719- lmc_hssi_get_link_status, /* get link status */
45720- lmc_hssi_set_link_status, /* set link status */
45721- lmc_hssi_set_crc_length, /* set CRC length */
45722- lmc_dummy_set_1, /* set T1 or E1 circuit type */
45723- lmc_hssi_watchdog
45724+ .init = lmc_hssi_init, /* special media init stuff */
45725+ .defaults = lmc_hssi_default, /* reset to default state */
45726+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
45727+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
45728+ .set_speed = lmc_dummy_set2_1, /* set line speed */
45729+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
45730+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
45731+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
45732+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
45733+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
45734+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
45735+ .watchdog = lmc_hssi_watchdog
45736 };
45737
45738-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
45739- lmc_ssi_default, /* reset to default state */
45740- lmc_ssi_set_status, /* reset status to state provided */
45741- lmc_ssi_set_clock, /* set clock source */
45742- lmc_ssi_set_speed, /* set line speed */
45743- lmc_dummy_set_1, /* set cable length */
45744- lmc_dummy_set_1, /* set scrambler */
45745- lmc_ssi_get_link_status, /* get link status */
45746- lmc_ssi_set_link_status, /* set link status */
45747- lmc_ssi_set_crc_length, /* set CRC length */
45748- lmc_dummy_set_1, /* set T1 or E1 circuit type */
45749- lmc_ssi_watchdog
45750+lmc_media_t lmc_ssi_media = {
45751+ .init = lmc_ssi_init, /* special media init stuff */
45752+ .defaults = lmc_ssi_default, /* reset to default state */
45753+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
45754+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
45755+ .set_speed = lmc_ssi_set_speed, /* set line speed */
45756+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
45757+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
45758+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
45759+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
45760+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
45761+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
45762+ .watchdog = lmc_ssi_watchdog
45763 };
45764
45765 lmc_media_t lmc_t1_media = {
45766- lmc_t1_init, /* special media init stuff */
45767- lmc_t1_default, /* reset to default state */
45768- lmc_t1_set_status, /* reset status to state provided */
45769- lmc_t1_set_clock, /* set clock source */
45770- lmc_dummy_set2_1, /* set line speed */
45771- lmc_dummy_set_1, /* set cable length */
45772- lmc_dummy_set_1, /* set scrambler */
45773- lmc_t1_get_link_status, /* get link status */
45774- lmc_dummy_set_1, /* set link status */
45775- lmc_t1_set_crc_length, /* set CRC length */
45776- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
45777- lmc_t1_watchdog
45778+ .init = lmc_t1_init, /* special media init stuff */
45779+ .defaults = lmc_t1_default, /* reset to default state */
45780+ .set_status = lmc_t1_set_status, /* reset status to state provided */
45781+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
45782+ .set_speed = lmc_dummy_set2_1, /* set line speed */
45783+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
45784+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
45785+ .get_link_status = lmc_t1_get_link_status, /* get link status */
45786+ .set_link_status = lmc_dummy_set_1, /* set link status */
45787+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
45788+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
45789+ .watchdog = lmc_t1_watchdog
45790 };
45791
45792 static void
45793diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
45794index feacc3b..5bac0de 100644
45795--- a/drivers/net/wan/z85230.c
45796+++ b/drivers/net/wan/z85230.c
45797@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
45798
45799 struct z8530_irqhandler z8530_sync =
45800 {
45801- z8530_rx,
45802- z8530_tx,
45803- z8530_status
45804+ .rx = z8530_rx,
45805+ .tx = z8530_tx,
45806+ .status = z8530_status
45807 };
45808
45809 EXPORT_SYMBOL(z8530_sync);
45810@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
45811 }
45812
45813 static struct z8530_irqhandler z8530_dma_sync = {
45814- z8530_dma_rx,
45815- z8530_dma_tx,
45816- z8530_dma_status
45817+ .rx = z8530_dma_rx,
45818+ .tx = z8530_dma_tx,
45819+ .status = z8530_dma_status
45820 };
45821
45822 static struct z8530_irqhandler z8530_txdma_sync = {
45823- z8530_rx,
45824- z8530_dma_tx,
45825- z8530_dma_status
45826+ .rx = z8530_rx,
45827+ .tx = z8530_dma_tx,
45828+ .status = z8530_dma_status
45829 };
45830
45831 /**
45832@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
45833
45834 struct z8530_irqhandler z8530_nop=
45835 {
45836- z8530_rx_clear,
45837- z8530_tx_clear,
45838- z8530_status_clear
45839+ .rx = z8530_rx_clear,
45840+ .tx = z8530_tx_clear,
45841+ .status = z8530_status_clear
45842 };
45843
45844
45845diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
45846index 0b60295..b8bfa5b 100644
45847--- a/drivers/net/wimax/i2400m/rx.c
45848+++ b/drivers/net/wimax/i2400m/rx.c
45849@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
45850 if (i2400m->rx_roq == NULL)
45851 goto error_roq_alloc;
45852
45853- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
45854+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
45855 GFP_KERNEL);
45856 if (rd == NULL) {
45857 result = -ENOMEM;
45858diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
45859index edf4b57..68b51c0 100644
45860--- a/drivers/net/wireless/airo.c
45861+++ b/drivers/net/wireless/airo.c
45862@@ -7843,7 +7843,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
45863 struct airo_info *ai = dev->ml_priv;
45864 int ridcode;
45865 int enabled;
45866- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
45867+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
45868 unsigned char *iobuf;
45869
45870 /* Only super-user can write RIDs */
45871diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
45872index 34c8a33..3261fdc 100644
45873--- a/drivers/net/wireless/at76c50x-usb.c
45874+++ b/drivers/net/wireless/at76c50x-usb.c
45875@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
45876 }
45877
45878 /* Convert timeout from the DFU status to jiffies */
45879-static inline unsigned long at76_get_timeout(struct dfu_status *s)
45880+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
45881 {
45882 return msecs_to_jiffies((s->poll_timeout[2] << 16)
45883 | (s->poll_timeout[1] << 8)
45884diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
45885index edae50b..b24278c 100644
45886--- a/drivers/net/wireless/ath/ath10k/htc.c
45887+++ b/drivers/net/wireless/ath/ath10k/htc.c
45888@@ -842,7 +842,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
45889 /* registered target arrival callback from the HIF layer */
45890 int ath10k_htc_init(struct ath10k *ar)
45891 {
45892- struct ath10k_hif_cb htc_callbacks;
45893+ static struct ath10k_hif_cb htc_callbacks = {
45894+ .rx_completion = ath10k_htc_rx_completion_handler,
45895+ .tx_completion = ath10k_htc_tx_completion_handler,
45896+ };
45897 struct ath10k_htc_ep *ep = NULL;
45898 struct ath10k_htc *htc = &ar->htc;
45899
45900@@ -852,8 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
45901 ath10k_htc_reset_endpoint_states(htc);
45902
45903 /* setup HIF layer callbacks */
45904- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
45905- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
45906 htc->ar = ar;
45907
45908 /* Get HIF default pipe for HTC message exchange */
45909diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
45910index 4716d33..a688310 100644
45911--- a/drivers/net/wireless/ath/ath10k/htc.h
45912+++ b/drivers/net/wireless/ath/ath10k/htc.h
45913@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
45914
45915 struct ath10k_htc_ops {
45916 void (*target_send_suspend_complete)(struct ath10k *ar);
45917-};
45918+} __no_const;
45919
45920 struct ath10k_htc_ep_ops {
45921 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
45922 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
45923 void (*ep_tx_credits)(struct ath10k *);
45924-};
45925+} __no_const;
45926
45927 /* service connection information */
45928 struct ath10k_htc_svc_conn_req {
45929diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
45930index a366d6b..b6f28f8 100644
45931--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
45932+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
45933@@ -218,8 +218,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
45934 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
45935 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
45936
45937- ACCESS_ONCE(ads->ds_link) = i->link;
45938- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
45939+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
45940+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
45941
45942 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
45943 ctl6 = SM(i->keytype, AR_EncrType);
45944@@ -233,26 +233,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
45945
45946 if ((i->is_first || i->is_last) &&
45947 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
45948- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
45949+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
45950 | set11nTries(i->rates, 1)
45951 | set11nTries(i->rates, 2)
45952 | set11nTries(i->rates, 3)
45953 | (i->dur_update ? AR_DurUpdateEna : 0)
45954 | SM(0, AR_BurstDur);
45955
45956- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
45957+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
45958 | set11nRate(i->rates, 1)
45959 | set11nRate(i->rates, 2)
45960 | set11nRate(i->rates, 3);
45961 } else {
45962- ACCESS_ONCE(ads->ds_ctl2) = 0;
45963- ACCESS_ONCE(ads->ds_ctl3) = 0;
45964+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
45965+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
45966 }
45967
45968 if (!i->is_first) {
45969- ACCESS_ONCE(ads->ds_ctl0) = 0;
45970- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
45971- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
45972+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
45973+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
45974+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
45975 return;
45976 }
45977
45978@@ -277,7 +277,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
45979 break;
45980 }
45981
45982- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
45983+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
45984 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
45985 | SM(i->txpower, AR_XmitPower)
45986 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
45987@@ -287,19 +287,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
45988 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
45989 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
45990
45991- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
45992- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
45993+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
45994+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
45995
45996 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
45997 return;
45998
45999- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46000+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46001 | set11nPktDurRTSCTS(i->rates, 1);
46002
46003- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46004+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46005 | set11nPktDurRTSCTS(i->rates, 3);
46006
46007- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46008+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46009 | set11nRateFlags(i->rates, 1)
46010 | set11nRateFlags(i->rates, 2)
46011 | set11nRateFlags(i->rates, 3)
46012diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46013index f6c5c1b..6058354 100644
46014--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46015+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46016@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46017 (i->qcu << AR_TxQcuNum_S) | desc_len;
46018
46019 checksum += val;
46020- ACCESS_ONCE(ads->info) = val;
46021+ ACCESS_ONCE_RW(ads->info) = val;
46022
46023 checksum += i->link;
46024- ACCESS_ONCE(ads->link) = i->link;
46025+ ACCESS_ONCE_RW(ads->link) = i->link;
46026
46027 checksum += i->buf_addr[0];
46028- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
46029+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
46030 checksum += i->buf_addr[1];
46031- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
46032+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
46033 checksum += i->buf_addr[2];
46034- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
46035+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
46036 checksum += i->buf_addr[3];
46037- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
46038+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
46039
46040 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
46041- ACCESS_ONCE(ads->ctl3) = val;
46042+ ACCESS_ONCE_RW(ads->ctl3) = val;
46043 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
46044- ACCESS_ONCE(ads->ctl5) = val;
46045+ ACCESS_ONCE_RW(ads->ctl5) = val;
46046 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
46047- ACCESS_ONCE(ads->ctl7) = val;
46048+ ACCESS_ONCE_RW(ads->ctl7) = val;
46049 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
46050- ACCESS_ONCE(ads->ctl9) = val;
46051+ ACCESS_ONCE_RW(ads->ctl9) = val;
46052
46053 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
46054- ACCESS_ONCE(ads->ctl10) = checksum;
46055+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
46056
46057 if (i->is_first || i->is_last) {
46058- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
46059+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
46060 | set11nTries(i->rates, 1)
46061 | set11nTries(i->rates, 2)
46062 | set11nTries(i->rates, 3)
46063 | (i->dur_update ? AR_DurUpdateEna : 0)
46064 | SM(0, AR_BurstDur);
46065
46066- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
46067+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
46068 | set11nRate(i->rates, 1)
46069 | set11nRate(i->rates, 2)
46070 | set11nRate(i->rates, 3);
46071 } else {
46072- ACCESS_ONCE(ads->ctl13) = 0;
46073- ACCESS_ONCE(ads->ctl14) = 0;
46074+ ACCESS_ONCE_RW(ads->ctl13) = 0;
46075+ ACCESS_ONCE_RW(ads->ctl14) = 0;
46076 }
46077
46078 ads->ctl20 = 0;
46079@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46080
46081 ctl17 = SM(i->keytype, AR_EncrType);
46082 if (!i->is_first) {
46083- ACCESS_ONCE(ads->ctl11) = 0;
46084- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46085- ACCESS_ONCE(ads->ctl15) = 0;
46086- ACCESS_ONCE(ads->ctl16) = 0;
46087- ACCESS_ONCE(ads->ctl17) = ctl17;
46088- ACCESS_ONCE(ads->ctl18) = 0;
46089- ACCESS_ONCE(ads->ctl19) = 0;
46090+ ACCESS_ONCE_RW(ads->ctl11) = 0;
46091+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46092+ ACCESS_ONCE_RW(ads->ctl15) = 0;
46093+ ACCESS_ONCE_RW(ads->ctl16) = 0;
46094+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46095+ ACCESS_ONCE_RW(ads->ctl18) = 0;
46096+ ACCESS_ONCE_RW(ads->ctl19) = 0;
46097 return;
46098 }
46099
46100- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46101+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46102 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46103 | SM(i->txpower, AR_XmitPower)
46104 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46105@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46106 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
46107 ctl12 |= SM(val, AR_PAPRDChainMask);
46108
46109- ACCESS_ONCE(ads->ctl12) = ctl12;
46110- ACCESS_ONCE(ads->ctl17) = ctl17;
46111+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
46112+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46113
46114- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46115+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46116 | set11nPktDurRTSCTS(i->rates, 1);
46117
46118- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46119+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46120 | set11nPktDurRTSCTS(i->rates, 3);
46121
46122- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
46123+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
46124 | set11nRateFlags(i->rates, 1)
46125 | set11nRateFlags(i->rates, 2)
46126 | set11nRateFlags(i->rates, 3)
46127 | SM(i->rtscts_rate, AR_RTSCTSRate);
46128
46129- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
46130+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
46131 }
46132
46133 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
46134diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
46135index a2c9a5d..b52273e 100644
46136--- a/drivers/net/wireless/ath/ath9k/hw.h
46137+++ b/drivers/net/wireless/ath/ath9k/hw.h
46138@@ -635,7 +635,7 @@ struct ath_hw_private_ops {
46139
46140 /* ANI */
46141 void (*ani_cache_ini_regs)(struct ath_hw *ah);
46142-};
46143+} __no_const;
46144
46145 /**
46146 * struct ath_spec_scan - parameters for Atheros spectral scan
46147@@ -711,7 +711,7 @@ struct ath_hw_ops {
46148 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
46149 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
46150 #endif
46151-};
46152+} __no_const;
46153
46154 struct ath_nf_limits {
46155 s16 max;
46156diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
46157index 92190da..f3a4c4c 100644
46158--- a/drivers/net/wireless/b43/phy_lp.c
46159+++ b/drivers/net/wireless/b43/phy_lp.c
46160@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
46161 {
46162 struct ssb_bus *bus = dev->dev->sdev->bus;
46163
46164- static const struct b206x_channel *chandata = NULL;
46165+ const struct b206x_channel *chandata = NULL;
46166 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
46167 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
46168 u16 old_comm15, scale;
46169diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
46170index dea3b50..543db99 100644
46171--- a/drivers/net/wireless/iwlegacy/3945-mac.c
46172+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
46173@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46174 */
46175 if (il3945_mod_params.disable_hw_scan) {
46176 D_INFO("Disabling hw_scan\n");
46177- il3945_mac_ops.hw_scan = NULL;
46178+ pax_open_kernel();
46179+ *(void **)&il3945_mac_ops.hw_scan = NULL;
46180+ pax_close_kernel();
46181 }
46182
46183 D_INFO("*** LOAD DRIVER ***\n");
46184diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46185index d94f8ab..5b568c8 100644
46186--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46187+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46188@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
46189 {
46190 struct iwl_priv *priv = file->private_data;
46191 char buf[64];
46192- int buf_size;
46193+ size_t buf_size;
46194 u32 offset, len;
46195
46196 memset(buf, 0, sizeof(buf));
46197@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
46198 struct iwl_priv *priv = file->private_data;
46199
46200 char buf[8];
46201- int buf_size;
46202+ size_t buf_size;
46203 u32 reset_flag;
46204
46205 memset(buf, 0, sizeof(buf));
46206@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
46207 {
46208 struct iwl_priv *priv = file->private_data;
46209 char buf[8];
46210- int buf_size;
46211+ size_t buf_size;
46212 int ht40;
46213
46214 memset(buf, 0, sizeof(buf));
46215@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
46216 {
46217 struct iwl_priv *priv = file->private_data;
46218 char buf[8];
46219- int buf_size;
46220+ size_t buf_size;
46221 int value;
46222
46223 memset(buf, 0, sizeof(buf));
46224@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
46225 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
46226 DEBUGFS_READ_FILE_OPS(current_sleep_command);
46227
46228-static const char *fmt_value = " %-30s %10u\n";
46229-static const char *fmt_hex = " %-30s 0x%02X\n";
46230-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
46231-static const char *fmt_header =
46232+static const char fmt_value[] = " %-30s %10u\n";
46233+static const char fmt_hex[] = " %-30s 0x%02X\n";
46234+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
46235+static const char fmt_header[] =
46236 "%-32s current cumulative delta max\n";
46237
46238 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
46239@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
46240 {
46241 struct iwl_priv *priv = file->private_data;
46242 char buf[8];
46243- int buf_size;
46244+ size_t buf_size;
46245 int clear;
46246
46247 memset(buf, 0, sizeof(buf));
46248@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
46249 {
46250 struct iwl_priv *priv = file->private_data;
46251 char buf[8];
46252- int buf_size;
46253+ size_t buf_size;
46254 int trace;
46255
46256 memset(buf, 0, sizeof(buf));
46257@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
46258 {
46259 struct iwl_priv *priv = file->private_data;
46260 char buf[8];
46261- int buf_size;
46262+ size_t buf_size;
46263 int missed;
46264
46265 memset(buf, 0, sizeof(buf));
46266@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
46267
46268 struct iwl_priv *priv = file->private_data;
46269 char buf[8];
46270- int buf_size;
46271+ size_t buf_size;
46272 int plcp;
46273
46274 memset(buf, 0, sizeof(buf));
46275@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
46276
46277 struct iwl_priv *priv = file->private_data;
46278 char buf[8];
46279- int buf_size;
46280+ size_t buf_size;
46281 int flush;
46282
46283 memset(buf, 0, sizeof(buf));
46284@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
46285
46286 struct iwl_priv *priv = file->private_data;
46287 char buf[8];
46288- int buf_size;
46289+ size_t buf_size;
46290 int rts;
46291
46292 if (!priv->cfg->ht_params)
46293@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
46294 {
46295 struct iwl_priv *priv = file->private_data;
46296 char buf[8];
46297- int buf_size;
46298+ size_t buf_size;
46299
46300 memset(buf, 0, sizeof(buf));
46301 buf_size = min(count, sizeof(buf) - 1);
46302@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
46303 struct iwl_priv *priv = file->private_data;
46304 u32 event_log_flag;
46305 char buf[8];
46306- int buf_size;
46307+ size_t buf_size;
46308
46309 /* check that the interface is up */
46310 if (!iwl_is_ready(priv))
46311@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
46312 struct iwl_priv *priv = file->private_data;
46313 char buf[8];
46314 u32 calib_disabled;
46315- int buf_size;
46316+ size_t buf_size;
46317
46318 memset(buf, 0, sizeof(buf));
46319 buf_size = min(count, sizeof(buf) - 1);
46320diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
46321index 7aad766..06addb4 100644
46322--- a/drivers/net/wireless/iwlwifi/dvm/main.c
46323+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
46324@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
46325 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
46326 {
46327 struct iwl_nvm_data *data = priv->nvm_data;
46328- char *debug_msg;
46329+ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
46330
46331 if (data->sku_cap_11n_enable &&
46332 !priv->cfg->ht_params) {
46333@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
46334 return -EINVAL;
46335 }
46336
46337- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
46338 IWL_DEBUG_INFO(priv, debug_msg,
46339 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
46340 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
46341diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
46342index f53ef83..5e34bcb 100644
46343--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
46344+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
46345@@ -1390,7 +1390,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
46346 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
46347
46348 char buf[8];
46349- int buf_size;
46350+ size_t buf_size;
46351 u32 reset_flag;
46352
46353 memset(buf, 0, sizeof(buf));
46354@@ -1411,7 +1411,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
46355 {
46356 struct iwl_trans *trans = file->private_data;
46357 char buf[8];
46358- int buf_size;
46359+ size_t buf_size;
46360 int csr;
46361
46362 memset(buf, 0, sizeof(buf));
46363diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
46364index a1b32ee..94b3c3d 100644
46365--- a/drivers/net/wireless/mac80211_hwsim.c
46366+++ b/drivers/net/wireless/mac80211_hwsim.c
46367@@ -2224,25 +2224,19 @@ static int __init init_mac80211_hwsim(void)
46368
46369 if (channels > 1) {
46370 hwsim_if_comb.num_different_channels = channels;
46371- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
46372- mac80211_hwsim_ops.cancel_hw_scan =
46373- mac80211_hwsim_cancel_hw_scan;
46374- mac80211_hwsim_ops.sw_scan_start = NULL;
46375- mac80211_hwsim_ops.sw_scan_complete = NULL;
46376- mac80211_hwsim_ops.remain_on_channel =
46377- mac80211_hwsim_roc;
46378- mac80211_hwsim_ops.cancel_remain_on_channel =
46379- mac80211_hwsim_croc;
46380- mac80211_hwsim_ops.add_chanctx =
46381- mac80211_hwsim_add_chanctx;
46382- mac80211_hwsim_ops.remove_chanctx =
46383- mac80211_hwsim_remove_chanctx;
46384- mac80211_hwsim_ops.change_chanctx =
46385- mac80211_hwsim_change_chanctx;
46386- mac80211_hwsim_ops.assign_vif_chanctx =
46387- mac80211_hwsim_assign_vif_chanctx;
46388- mac80211_hwsim_ops.unassign_vif_chanctx =
46389- mac80211_hwsim_unassign_vif_chanctx;
46390+ pax_open_kernel();
46391+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
46392+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
46393+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
46394+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
46395+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
46396+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
46397+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
46398+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
46399+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
46400+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
46401+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
46402+ pax_close_kernel();
46403 }
46404
46405 spin_lock_init(&hwsim_radio_lock);
46406diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
46407index 8169a85..7fa3b47 100644
46408--- a/drivers/net/wireless/rndis_wlan.c
46409+++ b/drivers/net/wireless/rndis_wlan.c
46410@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
46411
46412 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
46413
46414- if (rts_threshold < 0 || rts_threshold > 2347)
46415+ if (rts_threshold > 2347)
46416 rts_threshold = 2347;
46417
46418 tmp = cpu_to_le32(rts_threshold);
46419diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
46420index e4ba2ce..63d7417 100644
46421--- a/drivers/net/wireless/rt2x00/rt2x00.h
46422+++ b/drivers/net/wireless/rt2x00/rt2x00.h
46423@@ -377,7 +377,7 @@ struct rt2x00_intf {
46424 * for hardware which doesn't support hardware
46425 * sequence counting.
46426 */
46427- atomic_t seqno;
46428+ atomic_unchecked_t seqno;
46429 };
46430
46431 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
46432diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
46433index a5d38e8..d3c24ea 100644
46434--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
46435+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
46436@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
46437 * sequence counter given by mac80211.
46438 */
46439 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
46440- seqno = atomic_add_return(0x10, &intf->seqno);
46441+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
46442 else
46443- seqno = atomic_read(&intf->seqno);
46444+ seqno = atomic_read_unchecked(&intf->seqno);
46445
46446 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
46447 hdr->seq_ctrl |= cpu_to_le16(seqno);
46448diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
46449index e2b3d9c..67a5184 100644
46450--- a/drivers/net/wireless/ti/wl1251/sdio.c
46451+++ b/drivers/net/wireless/ti/wl1251/sdio.c
46452@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
46453
46454 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
46455
46456- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
46457- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
46458+ pax_open_kernel();
46459+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
46460+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
46461+ pax_close_kernel();
46462
46463 wl1251_info("using dedicated interrupt line");
46464 } else {
46465- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
46466- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
46467+ pax_open_kernel();
46468+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
46469+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
46470+ pax_close_kernel();
46471
46472 wl1251_info("using SDIO interrupt");
46473 }
46474diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
46475index be7129b..4161356 100644
46476--- a/drivers/net/wireless/ti/wl12xx/main.c
46477+++ b/drivers/net/wireless/ti/wl12xx/main.c
46478@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
46479 sizeof(wl->conf.mem));
46480
46481 /* read data preparation is only needed by wl127x */
46482- wl->ops->prepare_read = wl127x_prepare_read;
46483+ pax_open_kernel();
46484+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
46485+ pax_close_kernel();
46486
46487 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
46488 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
46489@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
46490 sizeof(wl->conf.mem));
46491
46492 /* read data preparation is only needed by wl127x */
46493- wl->ops->prepare_read = wl127x_prepare_read;
46494+ pax_open_kernel();
46495+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
46496+ pax_close_kernel();
46497
46498 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
46499 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
46500diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
46501index ec37b16..7e34d66 100644
46502--- a/drivers/net/wireless/ti/wl18xx/main.c
46503+++ b/drivers/net/wireless/ti/wl18xx/main.c
46504@@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *wl)
46505 }
46506
46507 if (!checksum_param) {
46508- wl18xx_ops.set_rx_csum = NULL;
46509- wl18xx_ops.init_vif = NULL;
46510+ pax_open_kernel();
46511+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
46512+ *(void **)&wl18xx_ops.init_vif = NULL;
46513+ pax_close_kernel();
46514 }
46515
46516 /* Enable 11a Band only if we have 5G antennas */
46517diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
46518index 84d94f5..bd6c61c 100644
46519--- a/drivers/net/wireless/zd1211rw/zd_usb.c
46520+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
46521@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
46522 {
46523 struct zd_usb *usb = urb->context;
46524 struct zd_usb_interrupt *intr = &usb->intr;
46525- int len;
46526+ unsigned int len;
46527 u16 int_num;
46528
46529 ZD_ASSERT(in_interrupt());
46530diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
46531index 7130864..00e64de 100644
46532--- a/drivers/nfc/nfcwilink.c
46533+++ b/drivers/nfc/nfcwilink.c
46534@@ -498,7 +498,7 @@ static struct nci_ops nfcwilink_ops = {
46535
46536 static int nfcwilink_probe(struct platform_device *pdev)
46537 {
46538- static struct nfcwilink *drv;
46539+ struct nfcwilink *drv;
46540 int rc;
46541 __u32 protocols;
46542
46543diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
46544index d93b2b6..ae50401 100644
46545--- a/drivers/oprofile/buffer_sync.c
46546+++ b/drivers/oprofile/buffer_sync.c
46547@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
46548 if (cookie == NO_COOKIE)
46549 offset = pc;
46550 if (cookie == INVALID_COOKIE) {
46551- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
46552+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
46553 offset = pc;
46554 }
46555 if (cookie != last_cookie) {
46556@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
46557 /* add userspace sample */
46558
46559 if (!mm) {
46560- atomic_inc(&oprofile_stats.sample_lost_no_mm);
46561+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
46562 return 0;
46563 }
46564
46565 cookie = lookup_dcookie(mm, s->eip, &offset);
46566
46567 if (cookie == INVALID_COOKIE) {
46568- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
46569+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
46570 return 0;
46571 }
46572
46573@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
46574 /* ignore backtraces if failed to add a sample */
46575 if (state == sb_bt_start) {
46576 state = sb_bt_ignore;
46577- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
46578+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
46579 }
46580 }
46581 release_mm(mm);
46582diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
46583index c0cc4e7..44d4e54 100644
46584--- a/drivers/oprofile/event_buffer.c
46585+++ b/drivers/oprofile/event_buffer.c
46586@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
46587 }
46588
46589 if (buffer_pos == buffer_size) {
46590- atomic_inc(&oprofile_stats.event_lost_overflow);
46591+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
46592 return;
46593 }
46594
46595diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
46596index ed2c3ec..deda85a 100644
46597--- a/drivers/oprofile/oprof.c
46598+++ b/drivers/oprofile/oprof.c
46599@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
46600 if (oprofile_ops.switch_events())
46601 return;
46602
46603- atomic_inc(&oprofile_stats.multiplex_counter);
46604+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
46605 start_switch_worker();
46606 }
46607
46608diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
46609index ee2cfce..7f8f699 100644
46610--- a/drivers/oprofile/oprofile_files.c
46611+++ b/drivers/oprofile/oprofile_files.c
46612@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
46613
46614 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
46615
46616-static ssize_t timeout_read(struct file *file, char __user *buf,
46617+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
46618 size_t count, loff_t *offset)
46619 {
46620 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
46621diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
46622index 59659ce..6c860a0 100644
46623--- a/drivers/oprofile/oprofile_stats.c
46624+++ b/drivers/oprofile/oprofile_stats.c
46625@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
46626 cpu_buf->sample_invalid_eip = 0;
46627 }
46628
46629- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
46630- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
46631- atomic_set(&oprofile_stats.event_lost_overflow, 0);
46632- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
46633- atomic_set(&oprofile_stats.multiplex_counter, 0);
46634+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
46635+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
46636+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
46637+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
46638+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
46639 }
46640
46641
46642diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
46643index 1fc622b..8c48fc3 100644
46644--- a/drivers/oprofile/oprofile_stats.h
46645+++ b/drivers/oprofile/oprofile_stats.h
46646@@ -13,11 +13,11 @@
46647 #include <linux/atomic.h>
46648
46649 struct oprofile_stat_struct {
46650- atomic_t sample_lost_no_mm;
46651- atomic_t sample_lost_no_mapping;
46652- atomic_t bt_lost_no_mapping;
46653- atomic_t event_lost_overflow;
46654- atomic_t multiplex_counter;
46655+ atomic_unchecked_t sample_lost_no_mm;
46656+ atomic_unchecked_t sample_lost_no_mapping;
46657+ atomic_unchecked_t bt_lost_no_mapping;
46658+ atomic_unchecked_t event_lost_overflow;
46659+ atomic_unchecked_t multiplex_counter;
46660 };
46661
46662 extern struct oprofile_stat_struct oprofile_stats;
46663diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
46664index 3f49345..c750d0b 100644
46665--- a/drivers/oprofile/oprofilefs.c
46666+++ b/drivers/oprofile/oprofilefs.c
46667@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
46668
46669 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
46670 {
46671- atomic_t *val = file->private_data;
46672- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
46673+ atomic_unchecked_t *val = file->private_data;
46674+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
46675 }
46676
46677
46678@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
46679
46680
46681 int oprofilefs_create_ro_atomic(struct dentry *root,
46682- char const *name, atomic_t *val)
46683+ char const *name, atomic_unchecked_t *val)
46684 {
46685 return __oprofilefs_create_file(root, name,
46686 &atomic_ro_fops, 0444, val);
46687diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
46688index 61be1d9..dec05d7 100644
46689--- a/drivers/oprofile/timer_int.c
46690+++ b/drivers/oprofile/timer_int.c
46691@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
46692 return NOTIFY_OK;
46693 }
46694
46695-static struct notifier_block __refdata oprofile_cpu_notifier = {
46696+static struct notifier_block oprofile_cpu_notifier = {
46697 .notifier_call = oprofile_cpu_notify,
46698 };
46699
46700diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
46701index 92ed045..62d39bd7 100644
46702--- a/drivers/parport/procfs.c
46703+++ b/drivers/parport/procfs.c
46704@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
46705
46706 *ppos += len;
46707
46708- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
46709+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
46710 }
46711
46712 #ifdef CONFIG_PARPORT_1284
46713@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
46714
46715 *ppos += len;
46716
46717- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
46718+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
46719 }
46720 #endif /* IEEE1284.3 support. */
46721
46722diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
46723index ecfac7e..41be7028 100644
46724--- a/drivers/pci/hotplug/acpiphp_ibm.c
46725+++ b/drivers/pci/hotplug/acpiphp_ibm.c
46726@@ -453,7 +453,9 @@ static int __init ibm_acpiphp_init(void)
46727 goto init_cleanup;
46728 }
46729
46730- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
46731+ pax_open_kernel();
46732+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
46733+ pax_close_kernel();
46734 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
46735
46736 return retval;
46737diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
46738index 7536eef..52dc8fa 100644
46739--- a/drivers/pci/hotplug/cpcihp_generic.c
46740+++ b/drivers/pci/hotplug/cpcihp_generic.c
46741@@ -73,7 +73,6 @@ static u16 port;
46742 static unsigned int enum_bit;
46743 static u8 enum_mask;
46744
46745-static struct cpci_hp_controller_ops generic_hpc_ops;
46746 static struct cpci_hp_controller generic_hpc;
46747
46748 static int __init validate_parameters(void)
46749@@ -139,6 +138,10 @@ static int query_enum(void)
46750 return ((value & enum_mask) == enum_mask);
46751 }
46752
46753+static struct cpci_hp_controller_ops generic_hpc_ops = {
46754+ .query_enum = query_enum,
46755+};
46756+
46757 static int __init cpcihp_generic_init(void)
46758 {
46759 int status;
46760@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
46761 pci_dev_put(dev);
46762
46763 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
46764- generic_hpc_ops.query_enum = query_enum;
46765 generic_hpc.ops = &generic_hpc_ops;
46766
46767 status = cpci_hp_register_controller(&generic_hpc);
46768diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
46769index e8c4a7c..7046f5c 100644
46770--- a/drivers/pci/hotplug/cpcihp_zt5550.c
46771+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
46772@@ -59,7 +59,6 @@
46773 /* local variables */
46774 static bool debug;
46775 static bool poll;
46776-static struct cpci_hp_controller_ops zt5550_hpc_ops;
46777 static struct cpci_hp_controller zt5550_hpc;
46778
46779 /* Primary cPCI bus bridge device */
46780@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
46781 return 0;
46782 }
46783
46784+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
46785+ .query_enum = zt5550_hc_query_enum,
46786+};
46787+
46788 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
46789 {
46790 int status;
46791@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
46792 dbg("returned from zt5550_hc_config");
46793
46794 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
46795- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
46796 zt5550_hpc.ops = &zt5550_hpc_ops;
46797 if(!poll) {
46798 zt5550_hpc.irq = hc_dev->irq;
46799 zt5550_hpc.irq_flags = IRQF_SHARED;
46800 zt5550_hpc.dev_id = hc_dev;
46801
46802- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
46803- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
46804- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
46805+ pax_open_kernel();
46806+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
46807+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
46808+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
46809+ pax_open_kernel();
46810 } else {
46811 info("using ENUM# polling mode");
46812 }
46813diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
46814index 76ba8a1..20ca857 100644
46815--- a/drivers/pci/hotplug/cpqphp_nvram.c
46816+++ b/drivers/pci/hotplug/cpqphp_nvram.c
46817@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
46818
46819 void compaq_nvram_init (void __iomem *rom_start)
46820 {
46821+
46822+#ifndef CONFIG_PAX_KERNEXEC
46823 if (rom_start) {
46824 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
46825 }
46826+#endif
46827+
46828 dbg("int15 entry = %p\n", compaq_int15_entry_point);
46829
46830 /* initialize our int15 lock */
46831diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
46832index cfa92a9..29539c5 100644
46833--- a/drivers/pci/hotplug/pci_hotplug_core.c
46834+++ b/drivers/pci/hotplug/pci_hotplug_core.c
46835@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
46836 return -EINVAL;
46837 }
46838
46839- slot->ops->owner = owner;
46840- slot->ops->mod_name = mod_name;
46841+ pax_open_kernel();
46842+ *(struct module **)&slot->ops->owner = owner;
46843+ *(const char **)&slot->ops->mod_name = mod_name;
46844+ pax_close_kernel();
46845
46846 mutex_lock(&pci_hp_mutex);
46847 /*
46848diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
46849index bbd48bb..6907ef4 100644
46850--- a/drivers/pci/hotplug/pciehp_core.c
46851+++ b/drivers/pci/hotplug/pciehp_core.c
46852@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
46853 struct slot *slot = ctrl->slot;
46854 struct hotplug_slot *hotplug = NULL;
46855 struct hotplug_slot_info *info = NULL;
46856- struct hotplug_slot_ops *ops = NULL;
46857+ hotplug_slot_ops_no_const *ops = NULL;
46858 char name[SLOT_NAME_SIZE];
46859 int retval = -ENOMEM;
46860
46861diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
46862index c91e6c1..5c723ef 100644
46863--- a/drivers/pci/pci-sysfs.c
46864+++ b/drivers/pci/pci-sysfs.c
46865@@ -1117,7 +1117,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
46866 {
46867 /* allocate attribute structure, piggyback attribute name */
46868 int name_len = write_combine ? 13 : 10;
46869- struct bin_attribute *res_attr;
46870+ bin_attribute_no_const *res_attr;
46871 int retval;
46872
46873 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
46874@@ -1302,7 +1302,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
46875 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
46876 {
46877 int retval;
46878- struct bin_attribute *attr;
46879+ bin_attribute_no_const *attr;
46880
46881 /* If the device has VPD, try to expose it in sysfs. */
46882 if (dev->vpd) {
46883@@ -1349,7 +1349,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
46884 {
46885 int retval;
46886 int rom_size = 0;
46887- struct bin_attribute *attr;
46888+ bin_attribute_no_const *attr;
46889
46890 if (!sysfs_initialized)
46891 return -EACCES;
46892diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
46893index 9c91ecc..bda4796 100644
46894--- a/drivers/pci/pci.h
46895+++ b/drivers/pci/pci.h
46896@@ -95,7 +95,7 @@ struct pci_vpd_ops {
46897 struct pci_vpd {
46898 unsigned int len;
46899 const struct pci_vpd_ops *ops;
46900- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
46901+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
46902 };
46903
46904 int pci_vpd_pci22_init(struct pci_dev *dev);
46905diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
46906index f1272dc..e92a1ac 100644
46907--- a/drivers/pci/pcie/aspm.c
46908+++ b/drivers/pci/pcie/aspm.c
46909@@ -27,9 +27,9 @@
46910 #define MODULE_PARAM_PREFIX "pcie_aspm."
46911
46912 /* Note: those are not register definitions */
46913-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
46914-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
46915-#define ASPM_STATE_L1 (4) /* L1 state */
46916+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
46917+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
46918+#define ASPM_STATE_L1 (4U) /* L1 state */
46919 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
46920 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
46921
46922diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
46923index 38e403d..a2ce55a 100644
46924--- a/drivers/pci/probe.c
46925+++ b/drivers/pci/probe.c
46926@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
46927 struct pci_bus_region region, inverted_region;
46928 bool bar_too_big = false, bar_disabled = false;
46929
46930- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
46931+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
46932
46933 /* No printks while decoding is disabled! */
46934 if (!dev->mmio_always_on) {
46935diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
46936index 46d1378..30e452b 100644
46937--- a/drivers/pci/proc.c
46938+++ b/drivers/pci/proc.c
46939@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
46940 static int __init pci_proc_init(void)
46941 {
46942 struct pci_dev *dev = NULL;
46943+
46944+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46945+#ifdef CONFIG_GRKERNSEC_PROC_USER
46946+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
46947+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46948+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46949+#endif
46950+#else
46951 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
46952+#endif
46953 proc_create("devices", 0, proc_bus_pci_dir,
46954 &proc_bus_pci_dev_operations);
46955 proc_initialized = 1;
46956diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
46957index 3e5b4497..dcdfb70 100644
46958--- a/drivers/platform/chrome/chromeos_laptop.c
46959+++ b/drivers/platform/chrome/chromeos_laptop.c
46960@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
46961 return 0;
46962 }
46963
46964-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
46965+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
46966 {
46967 .ident = "Samsung Series 5 550 - Touchpad",
46968 .matches = {
46969diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
46970index 19c313b..ed28b38 100644
46971--- a/drivers/platform/x86/asus-wmi.c
46972+++ b/drivers/platform/x86/asus-wmi.c
46973@@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m, void *data)
46974 int err;
46975 u32 retval = -1;
46976
46977+#ifdef CONFIG_GRKERNSEC_KMEM
46978+ return -EPERM;
46979+#endif
46980+
46981 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
46982
46983 if (err < 0)
46984@@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m, void *data)
46985 int err;
46986 u32 retval = -1;
46987
46988+#ifdef CONFIG_GRKERNSEC_KMEM
46989+ return -EPERM;
46990+#endif
46991+
46992 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
46993 &retval);
46994
46995@@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m, void *data)
46996 union acpi_object *obj;
46997 acpi_status status;
46998
46999+#ifdef CONFIG_GRKERNSEC_KMEM
47000+ return -EPERM;
47001+#endif
47002+
47003 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
47004 1, asus->debug.method_id,
47005 &input, &output);
47006diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
47007index 62f8030..c7f2a45 100644
47008--- a/drivers/platform/x86/msi-laptop.c
47009+++ b/drivers/platform/x86/msi-laptop.c
47010@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
47011
47012 if (!quirks->ec_read_only) {
47013 /* allow userland write sysfs file */
47014- dev_attr_bluetooth.store = store_bluetooth;
47015- dev_attr_wlan.store = store_wlan;
47016- dev_attr_threeg.store = store_threeg;
47017- dev_attr_bluetooth.attr.mode |= S_IWUSR;
47018- dev_attr_wlan.attr.mode |= S_IWUSR;
47019- dev_attr_threeg.attr.mode |= S_IWUSR;
47020+ pax_open_kernel();
47021+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
47022+ *(void **)&dev_attr_wlan.store = store_wlan;
47023+ *(void **)&dev_attr_threeg.store = store_threeg;
47024+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
47025+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
47026+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
47027+ pax_close_kernel();
47028 }
47029
47030 /* disable hardware control by fn key */
47031diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
47032index 70222f2..8c8ce66 100644
47033--- a/drivers/platform/x86/msi-wmi.c
47034+++ b/drivers/platform/x86/msi-wmi.c
47035@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
47036 static void msi_wmi_notify(u32 value, void *context)
47037 {
47038 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
47039- static struct key_entry *key;
47040+ struct key_entry *key;
47041 union acpi_object *obj;
47042 acpi_status status;
47043
47044diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
47045index fb233ae..23a325c 100644
47046--- a/drivers/platform/x86/sony-laptop.c
47047+++ b/drivers/platform/x86/sony-laptop.c
47048@@ -2453,7 +2453,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
47049 }
47050
47051 /* High speed charging function */
47052-static struct device_attribute *hsc_handle;
47053+static device_attribute_no_const *hsc_handle;
47054
47055 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
47056 struct device_attribute *attr,
47057diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
47058index 58b0274..6704626 100644
47059--- a/drivers/platform/x86/thinkpad_acpi.c
47060+++ b/drivers/platform/x86/thinkpad_acpi.c
47061@@ -2100,7 +2100,7 @@ static int hotkey_mask_get(void)
47062 return 0;
47063 }
47064
47065-void static hotkey_mask_warn_incomplete_mask(void)
47066+static void hotkey_mask_warn_incomplete_mask(void)
47067 {
47068 /* log only what the user can fix... */
47069 const u32 wantedmask = hotkey_driver_mask &
47070@@ -2327,11 +2327,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
47071 }
47072 }
47073
47074-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47075- struct tp_nvram_state *newn,
47076- const u32 event_mask)
47077-{
47078-
47079 #define TPACPI_COMPARE_KEY(__scancode, __member) \
47080 do { \
47081 if ((event_mask & (1 << __scancode)) && \
47082@@ -2345,36 +2340,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47083 tpacpi_hotkey_send_key(__scancode); \
47084 } while (0)
47085
47086- void issue_volchange(const unsigned int oldvol,
47087- const unsigned int newvol)
47088- {
47089- unsigned int i = oldvol;
47090+static void issue_volchange(const unsigned int oldvol,
47091+ const unsigned int newvol,
47092+ const u32 event_mask)
47093+{
47094+ unsigned int i = oldvol;
47095
47096- while (i > newvol) {
47097- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47098- i--;
47099- }
47100- while (i < newvol) {
47101- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47102- i++;
47103- }
47104+ while (i > newvol) {
47105+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47106+ i--;
47107 }
47108+ while (i < newvol) {
47109+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47110+ i++;
47111+ }
47112+}
47113
47114- void issue_brightnesschange(const unsigned int oldbrt,
47115- const unsigned int newbrt)
47116- {
47117- unsigned int i = oldbrt;
47118+static void issue_brightnesschange(const unsigned int oldbrt,
47119+ const unsigned int newbrt,
47120+ const u32 event_mask)
47121+{
47122+ unsigned int i = oldbrt;
47123
47124- while (i > newbrt) {
47125- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47126- i--;
47127- }
47128- while (i < newbrt) {
47129- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47130- i++;
47131- }
47132+ while (i > newbrt) {
47133+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47134+ i--;
47135+ }
47136+ while (i < newbrt) {
47137+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47138+ i++;
47139 }
47140+}
47141
47142+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47143+ struct tp_nvram_state *newn,
47144+ const u32 event_mask)
47145+{
47146 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
47147 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
47148 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
47149@@ -2408,7 +2409,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47150 oldn->volume_level != newn->volume_level) {
47151 /* recently muted, or repeated mute keypress, or
47152 * multiple presses ending in mute */
47153- issue_volchange(oldn->volume_level, newn->volume_level);
47154+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47155 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
47156 }
47157 } else {
47158@@ -2418,7 +2419,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47159 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47160 }
47161 if (oldn->volume_level != newn->volume_level) {
47162- issue_volchange(oldn->volume_level, newn->volume_level);
47163+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47164 } else if (oldn->volume_toggle != newn->volume_toggle) {
47165 /* repeated vol up/down keypress at end of scale ? */
47166 if (newn->volume_level == 0)
47167@@ -2431,7 +2432,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47168 /* handle brightness */
47169 if (oldn->brightness_level != newn->brightness_level) {
47170 issue_brightnesschange(oldn->brightness_level,
47171- newn->brightness_level);
47172+ newn->brightness_level,
47173+ event_mask);
47174 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
47175 /* repeated key presses that didn't change state */
47176 if (newn->brightness_level == 0)
47177@@ -2440,10 +2442,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47178 && !tp_features.bright_unkfw)
47179 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47180 }
47181+}
47182
47183 #undef TPACPI_COMPARE_KEY
47184 #undef TPACPI_MAY_SEND_KEY
47185-}
47186
47187 /*
47188 * Polling driver
47189diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
47190index 769d265..a3a05ca 100644
47191--- a/drivers/pnp/pnpbios/bioscalls.c
47192+++ b/drivers/pnp/pnpbios/bioscalls.c
47193@@ -58,7 +58,7 @@ do { \
47194 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
47195 } while(0)
47196
47197-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
47198+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
47199 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
47200
47201 /*
47202@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47203
47204 cpu = get_cpu();
47205 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
47206+
47207+ pax_open_kernel();
47208 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
47209+ pax_close_kernel();
47210
47211 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
47212 spin_lock_irqsave(&pnp_bios_lock, flags);
47213@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47214 :"memory");
47215 spin_unlock_irqrestore(&pnp_bios_lock, flags);
47216
47217+ pax_open_kernel();
47218 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
47219+ pax_close_kernel();
47220+
47221 put_cpu();
47222
47223 /* If we get here and this is set then the PnP BIOS faulted on us. */
47224@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
47225 return status;
47226 }
47227
47228-void pnpbios_calls_init(union pnp_bios_install_struct *header)
47229+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
47230 {
47231 int i;
47232
47233@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47234 pnp_bios_callpoint.offset = header->fields.pm16offset;
47235 pnp_bios_callpoint.segment = PNP_CS16;
47236
47237+ pax_open_kernel();
47238+
47239 for_each_possible_cpu(i) {
47240 struct desc_struct *gdt = get_cpu_gdt_table(i);
47241 if (!gdt)
47242@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47243 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
47244 (unsigned long)__va(header->fields.pm16dseg));
47245 }
47246+
47247+ pax_close_kernel();
47248 }
47249diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
47250index d95e101..67f0c3f 100644
47251--- a/drivers/pnp/resource.c
47252+++ b/drivers/pnp/resource.c
47253@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
47254 return 1;
47255
47256 /* check if the resource is valid */
47257- if (*irq < 0 || *irq > 15)
47258+ if (*irq > 15)
47259 return 0;
47260
47261 /* check if the resource is reserved */
47262@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
47263 return 1;
47264
47265 /* check if the resource is valid */
47266- if (*dma < 0 || *dma == 4 || *dma > 7)
47267+ if (*dma == 4 || *dma > 7)
47268 return 0;
47269
47270 /* check if the resource is reserved */
47271diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
47272index 0c52e2a..3421ab7 100644
47273--- a/drivers/power/pda_power.c
47274+++ b/drivers/power/pda_power.c
47275@@ -37,7 +37,11 @@ static int polling;
47276
47277 #if IS_ENABLED(CONFIG_USB_PHY)
47278 static struct usb_phy *transceiver;
47279-static struct notifier_block otg_nb;
47280+static int otg_handle_notification(struct notifier_block *nb,
47281+ unsigned long event, void *unused);
47282+static struct notifier_block otg_nb = {
47283+ .notifier_call = otg_handle_notification
47284+};
47285 #endif
47286
47287 static struct regulator *ac_draw;
47288@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
47289
47290 #if IS_ENABLED(CONFIG_USB_PHY)
47291 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
47292- otg_nb.notifier_call = otg_handle_notification;
47293 ret = usb_register_notifier(transceiver, &otg_nb);
47294 if (ret) {
47295 dev_err(dev, "failure to register otg notifier\n");
47296diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
47297index cc439fd..8fa30df 100644
47298--- a/drivers/power/power_supply.h
47299+++ b/drivers/power/power_supply.h
47300@@ -16,12 +16,12 @@ struct power_supply;
47301
47302 #ifdef CONFIG_SYSFS
47303
47304-extern void power_supply_init_attrs(struct device_type *dev_type);
47305+extern void power_supply_init_attrs(void);
47306 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
47307
47308 #else
47309
47310-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
47311+static inline void power_supply_init_attrs(void) {}
47312 #define power_supply_uevent NULL
47313
47314 #endif /* CONFIG_SYSFS */
47315diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
47316index 557af94..84dc1fe 100644
47317--- a/drivers/power/power_supply_core.c
47318+++ b/drivers/power/power_supply_core.c
47319@@ -24,7 +24,10 @@
47320 struct class *power_supply_class;
47321 EXPORT_SYMBOL_GPL(power_supply_class);
47322
47323-static struct device_type power_supply_dev_type;
47324+extern const struct attribute_group *power_supply_attr_groups[];
47325+static struct device_type power_supply_dev_type = {
47326+ .groups = power_supply_attr_groups,
47327+};
47328
47329 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
47330 struct power_supply *supply)
47331@@ -584,7 +587,7 @@ static int __init power_supply_class_init(void)
47332 return PTR_ERR(power_supply_class);
47333
47334 power_supply_class->dev_uevent = power_supply_uevent;
47335- power_supply_init_attrs(&power_supply_dev_type);
47336+ power_supply_init_attrs();
47337
47338 return 0;
47339 }
47340diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
47341index 44420d1..967126e 100644
47342--- a/drivers/power/power_supply_sysfs.c
47343+++ b/drivers/power/power_supply_sysfs.c
47344@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
47345 .is_visible = power_supply_attr_is_visible,
47346 };
47347
47348-static const struct attribute_group *power_supply_attr_groups[] = {
47349+const struct attribute_group *power_supply_attr_groups[] = {
47350 &power_supply_attr_group,
47351 NULL,
47352 };
47353
47354-void power_supply_init_attrs(struct device_type *dev_type)
47355+void power_supply_init_attrs(void)
47356 {
47357 int i;
47358
47359- dev_type->groups = power_supply_attr_groups;
47360-
47361 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
47362 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
47363 }
47364diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
47365index 84419af..268ede8 100644
47366--- a/drivers/powercap/powercap_sys.c
47367+++ b/drivers/powercap/powercap_sys.c
47368@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
47369 struct device_attribute name_attr;
47370 };
47371
47372+static ssize_t show_constraint_name(struct device *dev,
47373+ struct device_attribute *dev_attr,
47374+ char *buf);
47375+
47376 static struct powercap_constraint_attr
47377- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
47378+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
47379+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
47380+ .power_limit_attr = {
47381+ .attr = {
47382+ .name = NULL,
47383+ .mode = S_IWUSR | S_IRUGO
47384+ },
47385+ .show = show_constraint_power_limit_uw,
47386+ .store = store_constraint_power_limit_uw
47387+ },
47388+
47389+ .time_window_attr = {
47390+ .attr = {
47391+ .name = NULL,
47392+ .mode = S_IWUSR | S_IRUGO
47393+ },
47394+ .show = show_constraint_time_window_us,
47395+ .store = store_constraint_time_window_us
47396+ },
47397+
47398+ .max_power_attr = {
47399+ .attr = {
47400+ .name = NULL,
47401+ .mode = S_IRUGO
47402+ },
47403+ .show = show_constraint_max_power_uw,
47404+ .store = NULL
47405+ },
47406+
47407+ .min_power_attr = {
47408+ .attr = {
47409+ .name = NULL,
47410+ .mode = S_IRUGO
47411+ },
47412+ .show = show_constraint_min_power_uw,
47413+ .store = NULL
47414+ },
47415+
47416+ .max_time_window_attr = {
47417+ .attr = {
47418+ .name = NULL,
47419+ .mode = S_IRUGO
47420+ },
47421+ .show = show_constraint_max_time_window_us,
47422+ .store = NULL
47423+ },
47424+
47425+ .min_time_window_attr = {
47426+ .attr = {
47427+ .name = NULL,
47428+ .mode = S_IRUGO
47429+ },
47430+ .show = show_constraint_min_time_window_us,
47431+ .store = NULL
47432+ },
47433+
47434+ .name_attr = {
47435+ .attr = {
47436+ .name = NULL,
47437+ .mode = S_IRUGO
47438+ },
47439+ .show = show_constraint_name,
47440+ .store = NULL
47441+ }
47442+ }
47443+};
47444
47445 /* A list of powercap control_types */
47446 static LIST_HEAD(powercap_cntrl_list);
47447@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
47448 }
47449
47450 static int create_constraint_attribute(int id, const char *name,
47451- int mode,
47452- struct device_attribute *dev_attr,
47453- ssize_t (*show)(struct device *,
47454- struct device_attribute *, char *),
47455- ssize_t (*store)(struct device *,
47456- struct device_attribute *,
47457- const char *, size_t)
47458- )
47459+ struct device_attribute *dev_attr)
47460 {
47461+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
47462
47463- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
47464- id, name);
47465- if (!dev_attr->attr.name)
47466+ if (!name)
47467 return -ENOMEM;
47468- dev_attr->attr.mode = mode;
47469- dev_attr->show = show;
47470- dev_attr->store = store;
47471+
47472+ pax_open_kernel();
47473+ *(const char **)&dev_attr->attr.name = name;
47474+ pax_close_kernel();
47475
47476 return 0;
47477 }
47478@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
47479
47480 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
47481 ret = create_constraint_attribute(i, "power_limit_uw",
47482- S_IWUSR | S_IRUGO,
47483- &constraint_attrs[i].power_limit_attr,
47484- show_constraint_power_limit_uw,
47485- store_constraint_power_limit_uw);
47486+ &constraint_attrs[i].power_limit_attr);
47487 if (ret)
47488 goto err_alloc;
47489 ret = create_constraint_attribute(i, "time_window_us",
47490- S_IWUSR | S_IRUGO,
47491- &constraint_attrs[i].time_window_attr,
47492- show_constraint_time_window_us,
47493- store_constraint_time_window_us);
47494+ &constraint_attrs[i].time_window_attr);
47495 if (ret)
47496 goto err_alloc;
47497- ret = create_constraint_attribute(i, "name", S_IRUGO,
47498- &constraint_attrs[i].name_attr,
47499- show_constraint_name,
47500- NULL);
47501+ ret = create_constraint_attribute(i, "name",
47502+ &constraint_attrs[i].name_attr);
47503 if (ret)
47504 goto err_alloc;
47505- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
47506- &constraint_attrs[i].max_power_attr,
47507- show_constraint_max_power_uw,
47508- NULL);
47509+ ret = create_constraint_attribute(i, "max_power_uw",
47510+ &constraint_attrs[i].max_power_attr);
47511 if (ret)
47512 goto err_alloc;
47513- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
47514- &constraint_attrs[i].min_power_attr,
47515- show_constraint_min_power_uw,
47516- NULL);
47517+ ret = create_constraint_attribute(i, "min_power_uw",
47518+ &constraint_attrs[i].min_power_attr);
47519 if (ret)
47520 goto err_alloc;
47521 ret = create_constraint_attribute(i, "max_time_window_us",
47522- S_IRUGO,
47523- &constraint_attrs[i].max_time_window_attr,
47524- show_constraint_max_time_window_us,
47525- NULL);
47526+ &constraint_attrs[i].max_time_window_attr);
47527 if (ret)
47528 goto err_alloc;
47529 ret = create_constraint_attribute(i, "min_time_window_us",
47530- S_IRUGO,
47531- &constraint_attrs[i].min_time_window_attr,
47532- show_constraint_min_time_window_us,
47533- NULL);
47534+ &constraint_attrs[i].min_time_window_attr);
47535 if (ret)
47536 goto err_alloc;
47537
47538@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
47539 power_zone->zone_dev_attrs[count++] =
47540 &dev_attr_max_energy_range_uj.attr;
47541 if (power_zone->ops->get_energy_uj) {
47542+ pax_open_kernel();
47543 if (power_zone->ops->reset_energy_uj)
47544- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
47545+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
47546 else
47547- dev_attr_energy_uj.attr.mode = S_IRUGO;
47548+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
47549+ pax_close_kernel();
47550 power_zone->zone_dev_attrs[count++] =
47551 &dev_attr_energy_uj.attr;
47552 }
47553diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
47554index d85f313..ae857d0 100644
47555--- a/drivers/regulator/core.c
47556+++ b/drivers/regulator/core.c
47557@@ -3362,7 +3362,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
47558 {
47559 const struct regulation_constraints *constraints = NULL;
47560 const struct regulator_init_data *init_data;
47561- static atomic_t regulator_no = ATOMIC_INIT(0);
47562+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
47563 struct regulator_dev *rdev;
47564 struct device *dev;
47565 int ret, i;
47566@@ -3432,7 +3432,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
47567 rdev->dev.of_node = config->of_node;
47568 rdev->dev.parent = dev;
47569 dev_set_name(&rdev->dev, "regulator.%d",
47570- atomic_inc_return(&regulator_no) - 1);
47571+ atomic_inc_return_unchecked(&regulator_no) - 1);
47572 ret = device_register(&rdev->dev);
47573 if (ret != 0) {
47574 put_device(&rdev->dev);
47575diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
47576index 8d94d3d..653b623 100644
47577--- a/drivers/regulator/max8660.c
47578+++ b/drivers/regulator/max8660.c
47579@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
47580 max8660->shadow_regs[MAX8660_OVER1] = 5;
47581 } else {
47582 /* Otherwise devices can be toggled via software */
47583- max8660_dcdc_ops.enable = max8660_dcdc_enable;
47584- max8660_dcdc_ops.disable = max8660_dcdc_disable;
47585+ pax_open_kernel();
47586+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
47587+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
47588+ pax_close_kernel();
47589 }
47590
47591 /*
47592diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
47593index 892aa1e..ebd1b9c 100644
47594--- a/drivers/regulator/max8973-regulator.c
47595+++ b/drivers/regulator/max8973-regulator.c
47596@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
47597 if (!pdata || !pdata->enable_ext_control) {
47598 max->desc.enable_reg = MAX8973_VOUT;
47599 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
47600- max->ops.enable = regulator_enable_regmap;
47601- max->ops.disable = regulator_disable_regmap;
47602- max->ops.is_enabled = regulator_is_enabled_regmap;
47603+ pax_open_kernel();
47604+ *(void **)&max->ops.enable = regulator_enable_regmap;
47605+ *(void **)&max->ops.disable = regulator_disable_regmap;
47606+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
47607+ pax_close_kernel();
47608 }
47609
47610 if (pdata) {
47611diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
47612index 96c9f80..90974ca 100644
47613--- a/drivers/regulator/mc13892-regulator.c
47614+++ b/drivers/regulator/mc13892-regulator.c
47615@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
47616 }
47617 mc13xxx_unlock(mc13892);
47618
47619- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
47620+ pax_open_kernel();
47621+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
47622 = mc13892_vcam_set_mode;
47623- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
47624+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
47625 = mc13892_vcam_get_mode;
47626+ pax_close_kernel();
47627
47628 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
47629 ARRAY_SIZE(mc13892_regulators));
47630diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
47631index f148762..5a6d1e5 100644
47632--- a/drivers/rtc/rtc-cmos.c
47633+++ b/drivers/rtc/rtc-cmos.c
47634@@ -731,7 +731,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
47635 hpet_rtc_timer_init();
47636
47637 /* export at least the first block of NVRAM */
47638- nvram.size = address_space - NVRAM_OFFSET;
47639+ pax_open_kernel();
47640+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
47641+ pax_close_kernel();
47642 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
47643 if (retval < 0) {
47644 dev_dbg(dev, "can't create nvram file? %d\n", retval);
47645diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
47646index d049393..bb20be0 100644
47647--- a/drivers/rtc/rtc-dev.c
47648+++ b/drivers/rtc/rtc-dev.c
47649@@ -16,6 +16,7 @@
47650 #include <linux/module.h>
47651 #include <linux/rtc.h>
47652 #include <linux/sched.h>
47653+#include <linux/grsecurity.h>
47654 #include "rtc-core.h"
47655
47656 static dev_t rtc_devt;
47657@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
47658 if (copy_from_user(&tm, uarg, sizeof(tm)))
47659 return -EFAULT;
47660
47661+ gr_log_timechange();
47662+
47663 return rtc_set_time(rtc, &tm);
47664
47665 case RTC_PIE_ON:
47666diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
47667index 4e75345..09f8663 100644
47668--- a/drivers/rtc/rtc-ds1307.c
47669+++ b/drivers/rtc/rtc-ds1307.c
47670@@ -107,7 +107,7 @@ struct ds1307 {
47671 u8 offset; /* register's offset */
47672 u8 regs[11];
47673 u16 nvram_offset;
47674- struct bin_attribute *nvram;
47675+ bin_attribute_no_const *nvram;
47676 enum ds_type type;
47677 unsigned long flags;
47678 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
47679diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
47680index 11880c1..b823aa4 100644
47681--- a/drivers/rtc/rtc-m48t59.c
47682+++ b/drivers/rtc/rtc-m48t59.c
47683@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
47684 if (IS_ERR(m48t59->rtc))
47685 return PTR_ERR(m48t59->rtc);
47686
47687- m48t59_nvram_attr.size = pdata->offset;
47688+ pax_open_kernel();
47689+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
47690+ pax_close_kernel();
47691
47692 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
47693 if (ret)
47694diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
47695index 14b5f8d..cc9bd26 100644
47696--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
47697+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
47698@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
47699 for (bit = 0; bit < 8; bit++) {
47700
47701 if ((pci_status[i] & (0x1 << bit)) != 0) {
47702- static const char *s;
47703+ const char *s;
47704
47705 s = pci_status_strings[bit];
47706 if (i == 7/*TARG*/ && bit == 3)
47707@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
47708
47709 for (bit = 0; bit < 8; bit++) {
47710
47711- if ((split_status[i] & (0x1 << bit)) != 0) {
47712- static const char *s;
47713-
47714- s = split_status_strings[bit];
47715- printk(s, ahd_name(ahd),
47716+ if ((split_status[i] & (0x1 << bit)) != 0)
47717+ printk(split_status_strings[bit], ahd_name(ahd),
47718 split_status_source[i]);
47719- }
47720
47721 if (i > 1)
47722 continue;
47723
47724- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
47725- static const char *s;
47726-
47727- s = split_status_strings[bit];
47728- printk(s, ahd_name(ahd), "SG");
47729- }
47730+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
47731+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
47732 }
47733 }
47734 /*
47735diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
47736index e693af6..2e525b6 100644
47737--- a/drivers/scsi/bfa/bfa_fcpim.h
47738+++ b/drivers/scsi/bfa/bfa_fcpim.h
47739@@ -36,7 +36,7 @@ struct bfa_iotag_s {
47740
47741 struct bfa_itn_s {
47742 bfa_isr_func_t isr;
47743-};
47744+} __no_const;
47745
47746 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
47747 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
47748diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
47749index a3ab5cc..8143622 100644
47750--- a/drivers/scsi/bfa/bfa_fcs.c
47751+++ b/drivers/scsi/bfa/bfa_fcs.c
47752@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
47753 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
47754
47755 static struct bfa_fcs_mod_s fcs_modules[] = {
47756- { bfa_fcs_port_attach, NULL, NULL },
47757- { bfa_fcs_uf_attach, NULL, NULL },
47758- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
47759- bfa_fcs_fabric_modexit },
47760+ {
47761+ .attach = bfa_fcs_port_attach,
47762+ .modinit = NULL,
47763+ .modexit = NULL
47764+ },
47765+ {
47766+ .attach = bfa_fcs_uf_attach,
47767+ .modinit = NULL,
47768+ .modexit = NULL
47769+ },
47770+ {
47771+ .attach = bfa_fcs_fabric_attach,
47772+ .modinit = bfa_fcs_fabric_modinit,
47773+ .modexit = bfa_fcs_fabric_modexit
47774+ },
47775 };
47776
47777 /*
47778diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
47779index f5e4e61..a0acaf6 100644
47780--- a/drivers/scsi/bfa/bfa_fcs_lport.c
47781+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
47782@@ -89,15 +89,26 @@ static struct {
47783 void (*offline) (struct bfa_fcs_lport_s *port);
47784 } __port_action[] = {
47785 {
47786- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
47787- bfa_fcs_lport_unknown_offline}, {
47788- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
47789- bfa_fcs_lport_fab_offline}, {
47790- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
47791- bfa_fcs_lport_n2n_offline}, {
47792- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
47793- bfa_fcs_lport_loop_offline},
47794- };
47795+ .init = bfa_fcs_lport_unknown_init,
47796+ .online = bfa_fcs_lport_unknown_online,
47797+ .offline = bfa_fcs_lport_unknown_offline
47798+ },
47799+ {
47800+ .init = bfa_fcs_lport_fab_init,
47801+ .online = bfa_fcs_lport_fab_online,
47802+ .offline = bfa_fcs_lport_fab_offline
47803+ },
47804+ {
47805+ .init = bfa_fcs_lport_n2n_init,
47806+ .online = bfa_fcs_lport_n2n_online,
47807+ .offline = bfa_fcs_lport_n2n_offline
47808+ },
47809+ {
47810+ .init = bfa_fcs_lport_loop_init,
47811+ .online = bfa_fcs_lport_loop_online,
47812+ .offline = bfa_fcs_lport_loop_offline
47813+ },
47814+};
47815
47816 /*
47817 * fcs_port_sm FCS logical port state machine
47818diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
47819index 90814fe..4384138 100644
47820--- a/drivers/scsi/bfa/bfa_ioc.h
47821+++ b/drivers/scsi/bfa/bfa_ioc.h
47822@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
47823 bfa_ioc_disable_cbfn_t disable_cbfn;
47824 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
47825 bfa_ioc_reset_cbfn_t reset_cbfn;
47826-};
47827+} __no_const;
47828
47829 /*
47830 * IOC event notification mechanism.
47831@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
47832 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
47833 enum bfi_ioc_state fwstate);
47834 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
47835-};
47836+} __no_const;
47837
47838 /*
47839 * Queue element to wait for room in request queue. FIFO order is
47840diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
47841index a14c784..6de6790 100644
47842--- a/drivers/scsi/bfa/bfa_modules.h
47843+++ b/drivers/scsi/bfa/bfa_modules.h
47844@@ -78,12 +78,12 @@ enum {
47845 \
47846 extern struct bfa_module_s hal_mod_ ## __mod; \
47847 struct bfa_module_s hal_mod_ ## __mod = { \
47848- bfa_ ## __mod ## _meminfo, \
47849- bfa_ ## __mod ## _attach, \
47850- bfa_ ## __mod ## _detach, \
47851- bfa_ ## __mod ## _start, \
47852- bfa_ ## __mod ## _stop, \
47853- bfa_ ## __mod ## _iocdisable, \
47854+ .meminfo = bfa_ ## __mod ## _meminfo, \
47855+ .attach = bfa_ ## __mod ## _attach, \
47856+ .detach = bfa_ ## __mod ## _detach, \
47857+ .start = bfa_ ## __mod ## _start, \
47858+ .stop = bfa_ ## __mod ## _stop, \
47859+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
47860 }
47861
47862 #define BFA_CACHELINE_SZ (256)
47863diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
47864index 045c4e1..13de803 100644
47865--- a/drivers/scsi/fcoe/fcoe_sysfs.c
47866+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
47867@@ -33,8 +33,8 @@
47868 */
47869 #include "libfcoe.h"
47870
47871-static atomic_t ctlr_num;
47872-static atomic_t fcf_num;
47873+static atomic_unchecked_t ctlr_num;
47874+static atomic_unchecked_t fcf_num;
47875
47876 /*
47877 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
47878@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
47879 if (!ctlr)
47880 goto out;
47881
47882- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
47883+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
47884 ctlr->f = f;
47885 ctlr->mode = FIP_CONN_TYPE_FABRIC;
47886 INIT_LIST_HEAD(&ctlr->fcfs);
47887@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
47888 fcf->dev.parent = &ctlr->dev;
47889 fcf->dev.bus = &fcoe_bus_type;
47890 fcf->dev.type = &fcoe_fcf_device_type;
47891- fcf->id = atomic_inc_return(&fcf_num) - 1;
47892+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
47893 fcf->state = FCOE_FCF_STATE_UNKNOWN;
47894
47895 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
47896@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
47897 {
47898 int error;
47899
47900- atomic_set(&ctlr_num, 0);
47901- atomic_set(&fcf_num, 0);
47902+ atomic_set_unchecked(&ctlr_num, 0);
47903+ atomic_set_unchecked(&fcf_num, 0);
47904
47905 error = bus_register(&fcoe_bus_type);
47906 if (error)
47907diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
47908index f2c5005..db36c02 100644
47909--- a/drivers/scsi/hosts.c
47910+++ b/drivers/scsi/hosts.c
47911@@ -42,7 +42,7 @@
47912 #include "scsi_logging.h"
47913
47914
47915-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
47916+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
47917
47918
47919 static void scsi_host_cls_release(struct device *dev)
47920@@ -367,7 +367,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
47921 * subtract one because we increment first then return, but we need to
47922 * know what the next host number was before increment
47923 */
47924- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
47925+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
47926 shost->dma_channel = 0xff;
47927
47928 /* These three are default values which can be overridden */
47929diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
47930index 20a5e6e..8b23cea 100644
47931--- a/drivers/scsi/hpsa.c
47932+++ b/drivers/scsi/hpsa.c
47933@@ -578,7 +578,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
47934 unsigned long flags;
47935
47936 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
47937- return h->access.command_completed(h, q);
47938+ return h->access->command_completed(h, q);
47939
47940 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
47941 a = rq->head[rq->current_entry];
47942@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
47943 while (!list_empty(&h->reqQ)) {
47944 c = list_entry(h->reqQ.next, struct CommandList, list);
47945 /* can't do anything if fifo is full */
47946- if ((h->access.fifo_full(h))) {
47947+ if ((h->access->fifo_full(h))) {
47948 dev_warn(&h->pdev->dev, "fifo full\n");
47949 break;
47950 }
47951@@ -3466,7 +3466,7 @@ static void start_io(struct ctlr_info *h)
47952
47953 /* Tell the controller execute command */
47954 spin_unlock_irqrestore(&h->lock, flags);
47955- h->access.submit_command(h, c);
47956+ h->access->submit_command(h, c);
47957 spin_lock_irqsave(&h->lock, flags);
47958 }
47959 spin_unlock_irqrestore(&h->lock, flags);
47960@@ -3474,17 +3474,17 @@ static void start_io(struct ctlr_info *h)
47961
47962 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
47963 {
47964- return h->access.command_completed(h, q);
47965+ return h->access->command_completed(h, q);
47966 }
47967
47968 static inline bool interrupt_pending(struct ctlr_info *h)
47969 {
47970- return h->access.intr_pending(h);
47971+ return h->access->intr_pending(h);
47972 }
47973
47974 static inline long interrupt_not_for_us(struct ctlr_info *h)
47975 {
47976- return (h->access.intr_pending(h) == 0) ||
47977+ return (h->access->intr_pending(h) == 0) ||
47978 (h->interrupts_enabled == 0);
47979 }
47980
47981@@ -4386,7 +4386,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
47982 if (prod_index < 0)
47983 return -ENODEV;
47984 h->product_name = products[prod_index].product_name;
47985- h->access = *(products[prod_index].access);
47986+ h->access = products[prod_index].access;
47987
47988 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
47989 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
47990@@ -4668,7 +4668,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
47991
47992 assert_spin_locked(&lockup_detector_lock);
47993 remove_ctlr_from_lockup_detector_list(h);
47994- h->access.set_intr_mask(h, HPSA_INTR_OFF);
47995+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
47996 spin_lock_irqsave(&h->lock, flags);
47997 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
47998 spin_unlock_irqrestore(&h->lock, flags);
47999@@ -4845,7 +4845,7 @@ reinit_after_soft_reset:
48000 }
48001
48002 /* make sure the board interrupts are off */
48003- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48004+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48005
48006 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
48007 goto clean2;
48008@@ -4879,7 +4879,7 @@ reinit_after_soft_reset:
48009 * fake ones to scoop up any residual completions.
48010 */
48011 spin_lock_irqsave(&h->lock, flags);
48012- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48013+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48014 spin_unlock_irqrestore(&h->lock, flags);
48015 free_irqs(h);
48016 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
48017@@ -4898,9 +4898,9 @@ reinit_after_soft_reset:
48018 dev_info(&h->pdev->dev, "Board READY.\n");
48019 dev_info(&h->pdev->dev,
48020 "Waiting for stale completions to drain.\n");
48021- h->access.set_intr_mask(h, HPSA_INTR_ON);
48022+ h->access->set_intr_mask(h, HPSA_INTR_ON);
48023 msleep(10000);
48024- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48025+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48026
48027 rc = controller_reset_failed(h->cfgtable);
48028 if (rc)
48029@@ -4921,7 +4921,7 @@ reinit_after_soft_reset:
48030 }
48031
48032 /* Turn the interrupts on so we can service requests */
48033- h->access.set_intr_mask(h, HPSA_INTR_ON);
48034+ h->access->set_intr_mask(h, HPSA_INTR_ON);
48035
48036 hpsa_hba_inquiry(h);
48037 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
48038@@ -4976,7 +4976,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
48039 * To write all data in the battery backed cache to disks
48040 */
48041 hpsa_flush_cache(h);
48042- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48043+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48044 hpsa_free_irqs_and_disable_msix(h);
48045 }
48046
48047@@ -5143,7 +5143,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
48048 return;
48049 }
48050 /* Change the access methods to the performant access methods */
48051- h->access = SA5_performant_access;
48052+ h->access = &SA5_performant_access;
48053 h->transMethod = CFGTBL_Trans_Performant;
48054 }
48055
48056diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
48057index bc85e72..d463049 100644
48058--- a/drivers/scsi/hpsa.h
48059+++ b/drivers/scsi/hpsa.h
48060@@ -79,7 +79,7 @@ struct ctlr_info {
48061 unsigned int msix_vector;
48062 unsigned int msi_vector;
48063 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
48064- struct access_method access;
48065+ struct access_method *access;
48066
48067 /* queue and queue Info */
48068 struct list_head reqQ;
48069@@ -381,19 +381,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
48070 }
48071
48072 static struct access_method SA5_access = {
48073- SA5_submit_command,
48074- SA5_intr_mask,
48075- SA5_fifo_full,
48076- SA5_intr_pending,
48077- SA5_completed,
48078+ .submit_command = SA5_submit_command,
48079+ .set_intr_mask = SA5_intr_mask,
48080+ .fifo_full = SA5_fifo_full,
48081+ .intr_pending = SA5_intr_pending,
48082+ .command_completed = SA5_completed,
48083 };
48084
48085 static struct access_method SA5_performant_access = {
48086- SA5_submit_command,
48087- SA5_performant_intr_mask,
48088- SA5_fifo_full,
48089- SA5_performant_intr_pending,
48090- SA5_performant_completed,
48091+ .submit_command = SA5_submit_command,
48092+ .set_intr_mask = SA5_performant_intr_mask,
48093+ .fifo_full = SA5_fifo_full,
48094+ .intr_pending = SA5_performant_intr_pending,
48095+ .command_completed = SA5_performant_completed,
48096 };
48097
48098 struct board_type {
48099diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
48100index 1b3a094..068e683 100644
48101--- a/drivers/scsi/libfc/fc_exch.c
48102+++ b/drivers/scsi/libfc/fc_exch.c
48103@@ -101,12 +101,12 @@ struct fc_exch_mgr {
48104 u16 pool_max_index;
48105
48106 struct {
48107- atomic_t no_free_exch;
48108- atomic_t no_free_exch_xid;
48109- atomic_t xid_not_found;
48110- atomic_t xid_busy;
48111- atomic_t seq_not_found;
48112- atomic_t non_bls_resp;
48113+ atomic_unchecked_t no_free_exch;
48114+ atomic_unchecked_t no_free_exch_xid;
48115+ atomic_unchecked_t xid_not_found;
48116+ atomic_unchecked_t xid_busy;
48117+ atomic_unchecked_t seq_not_found;
48118+ atomic_unchecked_t non_bls_resp;
48119 } stats;
48120 };
48121
48122@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
48123 /* allocate memory for exchange */
48124 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
48125 if (!ep) {
48126- atomic_inc(&mp->stats.no_free_exch);
48127+ atomic_inc_unchecked(&mp->stats.no_free_exch);
48128 goto out;
48129 }
48130 memset(ep, 0, sizeof(*ep));
48131@@ -874,7 +874,7 @@ out:
48132 return ep;
48133 err:
48134 spin_unlock_bh(&pool->lock);
48135- atomic_inc(&mp->stats.no_free_exch_xid);
48136+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
48137 mempool_free(ep, mp->ep_pool);
48138 return NULL;
48139 }
48140@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48141 xid = ntohs(fh->fh_ox_id); /* we originated exch */
48142 ep = fc_exch_find(mp, xid);
48143 if (!ep) {
48144- atomic_inc(&mp->stats.xid_not_found);
48145+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48146 reject = FC_RJT_OX_ID;
48147 goto out;
48148 }
48149@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48150 ep = fc_exch_find(mp, xid);
48151 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
48152 if (ep) {
48153- atomic_inc(&mp->stats.xid_busy);
48154+ atomic_inc_unchecked(&mp->stats.xid_busy);
48155 reject = FC_RJT_RX_ID;
48156 goto rel;
48157 }
48158@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48159 }
48160 xid = ep->xid; /* get our XID */
48161 } else if (!ep) {
48162- atomic_inc(&mp->stats.xid_not_found);
48163+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48164 reject = FC_RJT_RX_ID; /* XID not found */
48165 goto out;
48166 }
48167@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48168 } else {
48169 sp = &ep->seq;
48170 if (sp->id != fh->fh_seq_id) {
48171- atomic_inc(&mp->stats.seq_not_found);
48172+ atomic_inc_unchecked(&mp->stats.seq_not_found);
48173 if (f_ctl & FC_FC_END_SEQ) {
48174 /*
48175 * Update sequence_id based on incoming last
48176@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48177
48178 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
48179 if (!ep) {
48180- atomic_inc(&mp->stats.xid_not_found);
48181+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48182 goto out;
48183 }
48184 if (ep->esb_stat & ESB_ST_COMPLETE) {
48185- atomic_inc(&mp->stats.xid_not_found);
48186+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48187 goto rel;
48188 }
48189 if (ep->rxid == FC_XID_UNKNOWN)
48190 ep->rxid = ntohs(fh->fh_rx_id);
48191 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
48192- atomic_inc(&mp->stats.xid_not_found);
48193+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48194 goto rel;
48195 }
48196 if (ep->did != ntoh24(fh->fh_s_id) &&
48197 ep->did != FC_FID_FLOGI) {
48198- atomic_inc(&mp->stats.xid_not_found);
48199+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48200 goto rel;
48201 }
48202 sof = fr_sof(fp);
48203@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48204 sp->ssb_stat |= SSB_ST_RESP;
48205 sp->id = fh->fh_seq_id;
48206 } else if (sp->id != fh->fh_seq_id) {
48207- atomic_inc(&mp->stats.seq_not_found);
48208+ atomic_inc_unchecked(&mp->stats.seq_not_found);
48209 goto rel;
48210 }
48211
48212@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48213 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
48214
48215 if (!sp)
48216- atomic_inc(&mp->stats.xid_not_found);
48217+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48218 else
48219- atomic_inc(&mp->stats.non_bls_resp);
48220+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
48221
48222 fc_frame_free(fp);
48223 }
48224@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
48225
48226 list_for_each_entry(ema, &lport->ema_list, ema_list) {
48227 mp = ema->mp;
48228- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
48229+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
48230 st->fc_no_free_exch_xid +=
48231- atomic_read(&mp->stats.no_free_exch_xid);
48232- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
48233- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
48234- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
48235- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
48236+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
48237+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
48238+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
48239+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
48240+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
48241 }
48242 }
48243 EXPORT_SYMBOL(fc_exch_update_stats);
48244diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
48245index d289583..b745eec 100644
48246--- a/drivers/scsi/libsas/sas_ata.c
48247+++ b/drivers/scsi/libsas/sas_ata.c
48248@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
48249 .postreset = ata_std_postreset,
48250 .error_handler = ata_std_error_handler,
48251 .post_internal_cmd = sas_ata_post_internal,
48252- .qc_defer = ata_std_qc_defer,
48253+ .qc_defer = ata_std_qc_defer,
48254 .qc_prep = ata_noop_qc_prep,
48255 .qc_issue = sas_ata_qc_issue,
48256 .qc_fill_rtf = sas_ata_qc_fill_rtf,
48257diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
48258index 4e1b75c..0bbdfa9 100644
48259--- a/drivers/scsi/lpfc/lpfc.h
48260+++ b/drivers/scsi/lpfc/lpfc.h
48261@@ -432,7 +432,7 @@ struct lpfc_vport {
48262 struct dentry *debug_nodelist;
48263 struct dentry *vport_debugfs_root;
48264 struct lpfc_debugfs_trc *disc_trc;
48265- atomic_t disc_trc_cnt;
48266+ atomic_unchecked_t disc_trc_cnt;
48267 #endif
48268 uint8_t stat_data_enabled;
48269 uint8_t stat_data_blocked;
48270@@ -865,8 +865,8 @@ struct lpfc_hba {
48271 struct timer_list fabric_block_timer;
48272 unsigned long bit_flags;
48273 #define FABRIC_COMANDS_BLOCKED 0
48274- atomic_t num_rsrc_err;
48275- atomic_t num_cmd_success;
48276+ atomic_unchecked_t num_rsrc_err;
48277+ atomic_unchecked_t num_cmd_success;
48278 unsigned long last_rsrc_error_time;
48279 unsigned long last_ramp_down_time;
48280 unsigned long last_ramp_up_time;
48281@@ -902,7 +902,7 @@ struct lpfc_hba {
48282
48283 struct dentry *debug_slow_ring_trc;
48284 struct lpfc_debugfs_trc *slow_ring_trc;
48285- atomic_t slow_ring_trc_cnt;
48286+ atomic_unchecked_t slow_ring_trc_cnt;
48287 /* iDiag debugfs sub-directory */
48288 struct dentry *idiag_root;
48289 struct dentry *idiag_pci_cfg;
48290diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
48291index 60084e6..0e2e700 100644
48292--- a/drivers/scsi/lpfc/lpfc_debugfs.c
48293+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
48294@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
48295
48296 #include <linux/debugfs.h>
48297
48298-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48299+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48300 static unsigned long lpfc_debugfs_start_time = 0L;
48301
48302 /* iDiag */
48303@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
48304 lpfc_debugfs_enable = 0;
48305
48306 len = 0;
48307- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
48308+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
48309 (lpfc_debugfs_max_disc_trc - 1);
48310 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
48311 dtp = vport->disc_trc + i;
48312@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
48313 lpfc_debugfs_enable = 0;
48314
48315 len = 0;
48316- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
48317+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
48318 (lpfc_debugfs_max_slow_ring_trc - 1);
48319 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
48320 dtp = phba->slow_ring_trc + i;
48321@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
48322 !vport || !vport->disc_trc)
48323 return;
48324
48325- index = atomic_inc_return(&vport->disc_trc_cnt) &
48326+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
48327 (lpfc_debugfs_max_disc_trc - 1);
48328 dtp = vport->disc_trc + index;
48329 dtp->fmt = fmt;
48330 dtp->data1 = data1;
48331 dtp->data2 = data2;
48332 dtp->data3 = data3;
48333- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
48334+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
48335 dtp->jif = jiffies;
48336 #endif
48337 return;
48338@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
48339 !phba || !phba->slow_ring_trc)
48340 return;
48341
48342- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
48343+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
48344 (lpfc_debugfs_max_slow_ring_trc - 1);
48345 dtp = phba->slow_ring_trc + index;
48346 dtp->fmt = fmt;
48347 dtp->data1 = data1;
48348 dtp->data2 = data2;
48349 dtp->data3 = data3;
48350- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
48351+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
48352 dtp->jif = jiffies;
48353 #endif
48354 return;
48355@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
48356 "slow_ring buffer\n");
48357 goto debug_failed;
48358 }
48359- atomic_set(&phba->slow_ring_trc_cnt, 0);
48360+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
48361 memset(phba->slow_ring_trc, 0,
48362 (sizeof(struct lpfc_debugfs_trc) *
48363 lpfc_debugfs_max_slow_ring_trc));
48364@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
48365 "buffer\n");
48366 goto debug_failed;
48367 }
48368- atomic_set(&vport->disc_trc_cnt, 0);
48369+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
48370
48371 snprintf(name, sizeof(name), "discovery_trace");
48372 vport->debug_disc_trc =
48373diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
48374index 68c94cc..8c27be5 100644
48375--- a/drivers/scsi/lpfc/lpfc_init.c
48376+++ b/drivers/scsi/lpfc/lpfc_init.c
48377@@ -10949,8 +10949,10 @@ lpfc_init(void)
48378 "misc_register returned with status %d", error);
48379
48380 if (lpfc_enable_npiv) {
48381- lpfc_transport_functions.vport_create = lpfc_vport_create;
48382- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
48383+ pax_open_kernel();
48384+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
48385+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
48386+ pax_close_kernel();
48387 }
48388 lpfc_transport_template =
48389 fc_attach_transport(&lpfc_transport_functions);
48390diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
48391index b2ede05..aaf482ca 100644
48392--- a/drivers/scsi/lpfc/lpfc_scsi.c
48393+++ b/drivers/scsi/lpfc/lpfc_scsi.c
48394@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
48395 uint32_t evt_posted;
48396
48397 spin_lock_irqsave(&phba->hbalock, flags);
48398- atomic_inc(&phba->num_rsrc_err);
48399+ atomic_inc_unchecked(&phba->num_rsrc_err);
48400 phba->last_rsrc_error_time = jiffies;
48401
48402 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
48403@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
48404 unsigned long flags;
48405 struct lpfc_hba *phba = vport->phba;
48406 uint32_t evt_posted;
48407- atomic_inc(&phba->num_cmd_success);
48408+ atomic_inc_unchecked(&phba->num_cmd_success);
48409
48410 if (vport->cfg_lun_queue_depth <= queue_depth)
48411 return;
48412@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
48413 unsigned long num_rsrc_err, num_cmd_success;
48414 int i;
48415
48416- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
48417- num_cmd_success = atomic_read(&phba->num_cmd_success);
48418+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
48419+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
48420
48421 /*
48422 * The error and success command counters are global per
48423@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
48424 }
48425 }
48426 lpfc_destroy_vport_work_array(phba, vports);
48427- atomic_set(&phba->num_rsrc_err, 0);
48428- atomic_set(&phba->num_cmd_success, 0);
48429+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
48430+ atomic_set_unchecked(&phba->num_cmd_success, 0);
48431 }
48432
48433 /**
48434@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
48435 }
48436 }
48437 lpfc_destroy_vport_work_array(phba, vports);
48438- atomic_set(&phba->num_rsrc_err, 0);
48439- atomic_set(&phba->num_cmd_success, 0);
48440+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
48441+ atomic_set_unchecked(&phba->num_cmd_success, 0);
48442 }
48443
48444 /**
48445diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
48446index 7f0af4f..193ac3e 100644
48447--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
48448+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
48449@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
48450 {
48451 struct scsi_device *sdev = to_scsi_device(dev);
48452 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
48453- static struct _raid_device *raid_device;
48454+ struct _raid_device *raid_device;
48455 unsigned long flags;
48456 Mpi2RaidVolPage0_t vol_pg0;
48457 Mpi2ConfigReply_t mpi_reply;
48458@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
48459 {
48460 struct scsi_device *sdev = to_scsi_device(dev);
48461 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
48462- static struct _raid_device *raid_device;
48463+ struct _raid_device *raid_device;
48464 unsigned long flags;
48465 Mpi2RaidVolPage0_t vol_pg0;
48466 Mpi2ConfigReply_t mpi_reply;
48467@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
48468 struct fw_event_work *fw_event)
48469 {
48470 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
48471- static struct _raid_device *raid_device;
48472+ struct _raid_device *raid_device;
48473 unsigned long flags;
48474 u16 handle;
48475
48476@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
48477 u64 sas_address;
48478 struct _sas_device *sas_device;
48479 struct _sas_node *expander_device;
48480- static struct _raid_device *raid_device;
48481+ struct _raid_device *raid_device;
48482 u8 retry_count;
48483 unsigned long flags;
48484
48485diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
48486index be8ce54..94ed33a 100644
48487--- a/drivers/scsi/pmcraid.c
48488+++ b/drivers/scsi/pmcraid.c
48489@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
48490 res->scsi_dev = scsi_dev;
48491 scsi_dev->hostdata = res;
48492 res->change_detected = 0;
48493- atomic_set(&res->read_failures, 0);
48494- atomic_set(&res->write_failures, 0);
48495+ atomic_set_unchecked(&res->read_failures, 0);
48496+ atomic_set_unchecked(&res->write_failures, 0);
48497 rc = 0;
48498 }
48499 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
48500@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
48501
48502 /* If this was a SCSI read/write command keep count of errors */
48503 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
48504- atomic_inc(&res->read_failures);
48505+ atomic_inc_unchecked(&res->read_failures);
48506 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
48507- atomic_inc(&res->write_failures);
48508+ atomic_inc_unchecked(&res->write_failures);
48509
48510 if (!RES_IS_GSCSI(res->cfg_entry) &&
48511 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
48512@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
48513 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
48514 * hrrq_id assigned here in queuecommand
48515 */
48516- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
48517+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
48518 pinstance->num_hrrq;
48519 cmd->cmd_done = pmcraid_io_done;
48520
48521@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
48522 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
48523 * hrrq_id assigned here in queuecommand
48524 */
48525- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
48526+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
48527 pinstance->num_hrrq;
48528
48529 if (request_size) {
48530@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
48531
48532 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
48533 /* add resources only after host is added into system */
48534- if (!atomic_read(&pinstance->expose_resources))
48535+ if (!atomic_read_unchecked(&pinstance->expose_resources))
48536 return;
48537
48538 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
48539@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
48540 init_waitqueue_head(&pinstance->reset_wait_q);
48541
48542 atomic_set(&pinstance->outstanding_cmds, 0);
48543- atomic_set(&pinstance->last_message_id, 0);
48544- atomic_set(&pinstance->expose_resources, 0);
48545+ atomic_set_unchecked(&pinstance->last_message_id, 0);
48546+ atomic_set_unchecked(&pinstance->expose_resources, 0);
48547
48548 INIT_LIST_HEAD(&pinstance->free_res_q);
48549 INIT_LIST_HEAD(&pinstance->used_res_q);
48550@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
48551 /* Schedule worker thread to handle CCN and take care of adding and
48552 * removing devices to OS
48553 */
48554- atomic_set(&pinstance->expose_resources, 1);
48555+ atomic_set_unchecked(&pinstance->expose_resources, 1);
48556 schedule_work(&pinstance->worker_q);
48557 return rc;
48558
48559diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
48560index e1d150f..6c6df44 100644
48561--- a/drivers/scsi/pmcraid.h
48562+++ b/drivers/scsi/pmcraid.h
48563@@ -748,7 +748,7 @@ struct pmcraid_instance {
48564 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
48565
48566 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
48567- atomic_t last_message_id;
48568+ atomic_unchecked_t last_message_id;
48569
48570 /* configuration table */
48571 struct pmcraid_config_table *cfg_table;
48572@@ -777,7 +777,7 @@ struct pmcraid_instance {
48573 atomic_t outstanding_cmds;
48574
48575 /* should add/delete resources to mid-layer now ?*/
48576- atomic_t expose_resources;
48577+ atomic_unchecked_t expose_resources;
48578
48579
48580
48581@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
48582 struct pmcraid_config_table_entry_ext cfg_entry_ext;
48583 };
48584 struct scsi_device *scsi_dev; /* Link scsi_device structure */
48585- atomic_t read_failures; /* count of failed READ commands */
48586- atomic_t write_failures; /* count of failed WRITE commands */
48587+ atomic_unchecked_t read_failures; /* count of failed READ commands */
48588+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
48589
48590 /* To indicate add/delete/modify during CCN */
48591 u8 change_detected;
48592diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
48593index 5f174b8..98d32b0 100644
48594--- a/drivers/scsi/qla2xxx/qla_attr.c
48595+++ b/drivers/scsi/qla2xxx/qla_attr.c
48596@@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
48597 return 0;
48598 }
48599
48600-struct fc_function_template qla2xxx_transport_functions = {
48601+fc_function_template_no_const qla2xxx_transport_functions = {
48602
48603 .show_host_node_name = 1,
48604 .show_host_port_name = 1,
48605@@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_transport_functions = {
48606 .bsg_timeout = qla24xx_bsg_timeout,
48607 };
48608
48609-struct fc_function_template qla2xxx_transport_vport_functions = {
48610+fc_function_template_no_const qla2xxx_transport_vport_functions = {
48611
48612 .show_host_node_name = 1,
48613 .show_host_port_name = 1,
48614diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
48615index 4446bf5..9a3574d 100644
48616--- a/drivers/scsi/qla2xxx/qla_gbl.h
48617+++ b/drivers/scsi/qla2xxx/qla_gbl.h
48618@@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
48619 struct device_attribute;
48620 extern struct device_attribute *qla2x00_host_attrs[];
48621 struct fc_function_template;
48622-extern struct fc_function_template qla2xxx_transport_functions;
48623-extern struct fc_function_template qla2xxx_transport_vport_functions;
48624+extern fc_function_template_no_const qla2xxx_transport_functions;
48625+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
48626 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
48627 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
48628 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
48629diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
48630index 52be35e..b933907 100644
48631--- a/drivers/scsi/qla2xxx/qla_os.c
48632+++ b/drivers/scsi/qla2xxx/qla_os.c
48633@@ -1568,8 +1568,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
48634 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
48635 /* Ok, a 64bit DMA mask is applicable. */
48636 ha->flags.enable_64bit_addressing = 1;
48637- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
48638- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
48639+ pax_open_kernel();
48640+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
48641+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
48642+ pax_close_kernel();
48643 return;
48644 }
48645 }
48646diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
48647index 084d1fd..9f939eb 100644
48648--- a/drivers/scsi/qla4xxx/ql4_def.h
48649+++ b/drivers/scsi/qla4xxx/ql4_def.h
48650@@ -296,7 +296,7 @@ struct ddb_entry {
48651 * (4000 only) */
48652 atomic_t relogin_timer; /* Max Time to wait for
48653 * relogin to complete */
48654- atomic_t relogin_retry_count; /* Num of times relogin has been
48655+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
48656 * retried */
48657 uint32_t default_time2wait; /* Default Min time between
48658 * relogins (+aens) */
48659diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
48660index cf174a4..128a420 100644
48661--- a/drivers/scsi/qla4xxx/ql4_os.c
48662+++ b/drivers/scsi/qla4xxx/ql4_os.c
48663@@ -3311,12 +3311,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
48664 */
48665 if (!iscsi_is_session_online(cls_sess)) {
48666 /* Reset retry relogin timer */
48667- atomic_inc(&ddb_entry->relogin_retry_count);
48668+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
48669 DEBUG2(ql4_printk(KERN_INFO, ha,
48670 "%s: index[%d] relogin timed out-retrying"
48671 " relogin (%d), retry (%d)\n", __func__,
48672 ddb_entry->fw_ddb_index,
48673- atomic_read(&ddb_entry->relogin_retry_count),
48674+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
48675 ddb_entry->default_time2wait + 4));
48676 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
48677 atomic_set(&ddb_entry->retry_relogin_timer,
48678@@ -5458,7 +5458,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
48679
48680 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
48681 atomic_set(&ddb_entry->relogin_timer, 0);
48682- atomic_set(&ddb_entry->relogin_retry_count, 0);
48683+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
48684 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
48685 ddb_entry->default_relogin_timeout =
48686 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
48687diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
48688index fe0bcb1..c9255be 100644
48689--- a/drivers/scsi/scsi.c
48690+++ b/drivers/scsi/scsi.c
48691@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
48692 struct Scsi_Host *host = cmd->device->host;
48693 int rtn = 0;
48694
48695- atomic_inc(&cmd->device->iorequest_cnt);
48696+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
48697
48698 /* check if the device is still usable */
48699 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
48700diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
48701index 7bd7f0d..44147bf 100644
48702--- a/drivers/scsi/scsi_lib.c
48703+++ b/drivers/scsi/scsi_lib.c
48704@@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
48705 shost = sdev->host;
48706 scsi_init_cmd_errh(cmd);
48707 cmd->result = DID_NO_CONNECT << 16;
48708- atomic_inc(&cmd->device->iorequest_cnt);
48709+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
48710
48711 /*
48712 * SCSI request completion path will do scsi_device_unbusy(),
48713@@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
48714
48715 INIT_LIST_HEAD(&cmd->eh_entry);
48716
48717- atomic_inc(&cmd->device->iodone_cnt);
48718+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
48719 if (cmd->result)
48720- atomic_inc(&cmd->device->ioerr_cnt);
48721+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
48722
48723 disposition = scsi_decide_disposition(cmd);
48724 if (disposition != SUCCESS &&
48725diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
48726index 8ff62c2..693b6f7 100644
48727--- a/drivers/scsi/scsi_sysfs.c
48728+++ b/drivers/scsi/scsi_sysfs.c
48729@@ -725,7 +725,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
48730 char *buf) \
48731 { \
48732 struct scsi_device *sdev = to_scsi_device(dev); \
48733- unsigned long long count = atomic_read(&sdev->field); \
48734+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
48735 return snprintf(buf, 20, "0x%llx\n", count); \
48736 } \
48737 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
48738diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
48739index 84a1fdf..693b0d6 100644
48740--- a/drivers/scsi/scsi_tgt_lib.c
48741+++ b/drivers/scsi/scsi_tgt_lib.c
48742@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
48743 int err;
48744
48745 dprintk("%lx %u\n", uaddr, len);
48746- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
48747+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
48748 if (err) {
48749 /*
48750 * TODO: need to fixup sg_tablesize, max_segment_size,
48751diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
48752index 4628fd5..a94a1c2 100644
48753--- a/drivers/scsi/scsi_transport_fc.c
48754+++ b/drivers/scsi/scsi_transport_fc.c
48755@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
48756 * Netlink Infrastructure
48757 */
48758
48759-static atomic_t fc_event_seq;
48760+static atomic_unchecked_t fc_event_seq;
48761
48762 /**
48763 * fc_get_event_number - Obtain the next sequential FC event number
48764@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
48765 u32
48766 fc_get_event_number(void)
48767 {
48768- return atomic_add_return(1, &fc_event_seq);
48769+ return atomic_add_return_unchecked(1, &fc_event_seq);
48770 }
48771 EXPORT_SYMBOL(fc_get_event_number);
48772
48773@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
48774 {
48775 int error;
48776
48777- atomic_set(&fc_event_seq, 0);
48778+ atomic_set_unchecked(&fc_event_seq, 0);
48779
48780 error = transport_class_register(&fc_host_class);
48781 if (error)
48782@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
48783 char *cp;
48784
48785 *val = simple_strtoul(buf, &cp, 0);
48786- if ((*cp && (*cp != '\n')) || (*val < 0))
48787+ if (*cp && (*cp != '\n'))
48788 return -EINVAL;
48789 /*
48790 * Check for overflow; dev_loss_tmo is u32
48791diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
48792index 63a6ca4..5d5cadd 100644
48793--- a/drivers/scsi/scsi_transport_iscsi.c
48794+++ b/drivers/scsi/scsi_transport_iscsi.c
48795@@ -79,7 +79,7 @@ struct iscsi_internal {
48796 struct transport_container session_cont;
48797 };
48798
48799-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
48800+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
48801 static struct workqueue_struct *iscsi_eh_timer_workq;
48802
48803 static DEFINE_IDA(iscsi_sess_ida);
48804@@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
48805 int err;
48806
48807 ihost = shost->shost_data;
48808- session->sid = atomic_add_return(1, &iscsi_session_nr);
48809+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
48810
48811 if (target_id == ISCSI_MAX_TARGET) {
48812 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
48813@@ -4103,7 +4103,7 @@ static __init int iscsi_transport_init(void)
48814 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
48815 ISCSI_TRANSPORT_VERSION);
48816
48817- atomic_set(&iscsi_session_nr, 0);
48818+ atomic_set_unchecked(&iscsi_session_nr, 0);
48819
48820 err = class_register(&iscsi_transport_class);
48821 if (err)
48822diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
48823index 2700a5a..752ec38 100644
48824--- a/drivers/scsi/scsi_transport_srp.c
48825+++ b/drivers/scsi/scsi_transport_srp.c
48826@@ -36,7 +36,7 @@
48827 #include "scsi_transport_srp_internal.h"
48828
48829 struct srp_host_attrs {
48830- atomic_t next_port_id;
48831+ atomic_unchecked_t next_port_id;
48832 };
48833 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
48834
48835@@ -94,7 +94,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
48836 struct Scsi_Host *shost = dev_to_shost(dev);
48837 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
48838
48839- atomic_set(&srp_host->next_port_id, 0);
48840+ atomic_set_unchecked(&srp_host->next_port_id, 0);
48841 return 0;
48842 }
48843
48844@@ -730,7 +730,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
48845 rport_fast_io_fail_timedout);
48846 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
48847
48848- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
48849+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
48850 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
48851
48852 transport_setup_device(&rport->dev);
48853diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
48854index 69725f7..03aaee1 100644
48855--- a/drivers/scsi/sd.c
48856+++ b/drivers/scsi/sd.c
48857@@ -2964,7 +2964,7 @@ static int sd_probe(struct device *dev)
48858 sdkp->disk = gd;
48859 sdkp->index = index;
48860 atomic_set(&sdkp->openers, 0);
48861- atomic_set(&sdkp->device->ioerr_cnt, 0);
48862+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
48863
48864 if (!sdp->request_queue->rq_timeout) {
48865 if (sdp->type != TYPE_MOD)
48866diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
48867index df5e961..df6b97f 100644
48868--- a/drivers/scsi/sg.c
48869+++ b/drivers/scsi/sg.c
48870@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
48871 sdp->disk->disk_name,
48872 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
48873 NULL,
48874- (char *)arg);
48875+ (char __user *)arg);
48876 case BLKTRACESTART:
48877 return blk_trace_startstop(sdp->device->request_queue, 1);
48878 case BLKTRACESTOP:
48879diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
48880index 349ebba..ff2a249 100644
48881--- a/drivers/spi/spi.c
48882+++ b/drivers/spi/spi.c
48883@@ -1945,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *master)
48884 EXPORT_SYMBOL_GPL(spi_bus_unlock);
48885
48886 /* portable code must never pass more than 32 bytes */
48887-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
48888+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
48889
48890 static u8 *buf;
48891
48892diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
48893index 2c61783..4d49e4e 100644
48894--- a/drivers/staging/android/timed_output.c
48895+++ b/drivers/staging/android/timed_output.c
48896@@ -25,7 +25,7 @@
48897 #include "timed_output.h"
48898
48899 static struct class *timed_output_class;
48900-static atomic_t device_count;
48901+static atomic_unchecked_t device_count;
48902
48903 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
48904 char *buf)
48905@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
48906 timed_output_class = class_create(THIS_MODULE, "timed_output");
48907 if (IS_ERR(timed_output_class))
48908 return PTR_ERR(timed_output_class);
48909- atomic_set(&device_count, 0);
48910+ atomic_set_unchecked(&device_count, 0);
48911 timed_output_class->dev_groups = timed_output_groups;
48912 }
48913
48914@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
48915 if (ret < 0)
48916 return ret;
48917
48918- tdev->index = atomic_inc_return(&device_count);
48919+ tdev->index = atomic_inc_return_unchecked(&device_count);
48920 tdev->dev = device_create(timed_output_class, NULL,
48921 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
48922 if (IS_ERR(tdev->dev))
48923diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
48924index c0f7cd7..5424212 100644
48925--- a/drivers/staging/gdm724x/gdm_tty.c
48926+++ b/drivers/staging/gdm724x/gdm_tty.c
48927@@ -45,7 +45,7 @@
48928 #define gdm_tty_send_control(n, r, v, d, l) (\
48929 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
48930
48931-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
48932+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
48933
48934 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
48935 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
48936diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
48937index b7613c8..c302392 100644
48938--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
48939+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
48940@@ -487,13 +487,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
48941 return 0;
48942 }
48943
48944-sfw_test_client_ops_t brw_test_client;
48945-void brw_init_test_client(void)
48946-{
48947- brw_test_client.tso_init = brw_client_init;
48948- brw_test_client.tso_fini = brw_client_fini;
48949- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
48950- brw_test_client.tso_done_rpc = brw_client_done_rpc;
48951+sfw_test_client_ops_t brw_test_client = {
48952+ .tso_init = brw_client_init,
48953+ .tso_fini = brw_client_fini,
48954+ .tso_prep_rpc = brw_client_prep_rpc,
48955+ .tso_done_rpc = brw_client_done_rpc,
48956 };
48957
48958 srpc_service_t brw_test_service;
48959diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
48960index 483c785..e1a2a7b 100644
48961--- a/drivers/staging/lustre/lnet/selftest/framework.c
48962+++ b/drivers/staging/lustre/lnet/selftest/framework.c
48963@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
48964
48965 extern sfw_test_client_ops_t ping_test_client;
48966 extern srpc_service_t ping_test_service;
48967-extern void ping_init_test_client(void);
48968 extern void ping_init_test_service(void);
48969
48970 extern sfw_test_client_ops_t brw_test_client;
48971 extern srpc_service_t brw_test_service;
48972-extern void brw_init_test_client(void);
48973 extern void brw_init_test_service(void);
48974
48975
48976@@ -1684,12 +1682,10 @@ sfw_startup (void)
48977 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
48978 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
48979
48980- brw_init_test_client();
48981 brw_init_test_service();
48982 rc = sfw_register_test(&brw_test_service, &brw_test_client);
48983 LASSERT (rc == 0);
48984
48985- ping_init_test_client();
48986 ping_init_test_service();
48987 rc = sfw_register_test(&ping_test_service, &ping_test_client);
48988 LASSERT (rc == 0);
48989diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
48990index f0f9194..b589047 100644
48991--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
48992+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
48993@@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
48994 return 0;
48995 }
48996
48997-sfw_test_client_ops_t ping_test_client;
48998-void ping_init_test_client(void)
48999-{
49000- ping_test_client.tso_init = ping_client_init;
49001- ping_test_client.tso_fini = ping_client_fini;
49002- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
49003- ping_test_client.tso_done_rpc = ping_client_done_rpc;
49004-}
49005+sfw_test_client_ops_t ping_test_client = {
49006+ .tso_init = ping_client_init,
49007+ .tso_fini = ping_client_fini,
49008+ .tso_prep_rpc = ping_client_prep_rpc,
49009+ .tso_done_rpc = ping_client_done_rpc,
49010+};
49011
49012 srpc_service_t ping_test_service;
49013 void ping_init_test_service(void)
49014diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49015index bc2b82f..67fd598 100644
49016--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
49017+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49018@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
49019 ldlm_completion_callback lcs_completion;
49020 ldlm_blocking_callback lcs_blocking;
49021 ldlm_glimpse_callback lcs_glimpse;
49022-};
49023+} __no_const;
49024
49025 /* ldlm_lockd.c */
49026 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
49027diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
49028index d0aea15..7af68e1 100644
49029--- a/drivers/staging/lustre/lustre/include/obd.h
49030+++ b/drivers/staging/lustre/lustre/include/obd.h
49031@@ -1417,7 +1417,7 @@ struct md_ops {
49032 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
49033 * wrapper function in include/linux/obd_class.h.
49034 */
49035-};
49036+} __no_const;
49037
49038 struct lsm_operations {
49039 void (*lsm_free)(struct lov_stripe_md *);
49040diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49041index 39fcdac..222780f 100644
49042--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49043+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49044@@ -249,7 +249,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
49045 int added = (mode == LCK_NL);
49046 int overlaps = 0;
49047 int splitted = 0;
49048- const struct ldlm_callback_suite null_cbs = { NULL };
49049+ const struct ldlm_callback_suite null_cbs = { };
49050 int rc;
49051
49052 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
49053diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49054index fc6c977..df1f956 100644
49055--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49056+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49057@@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
49058 int LL_PROC_PROTO(proc_console_max_delay_cs)
49059 {
49060 int rc, max_delay_cs;
49061- ctl_table_t dummy = *table;
49062+ ctl_table_no_const dummy = *table;
49063 cfs_duration_t d;
49064
49065 dummy.data = &max_delay_cs;
49066@@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
49067 int LL_PROC_PROTO(proc_console_min_delay_cs)
49068 {
49069 int rc, min_delay_cs;
49070- ctl_table_t dummy = *table;
49071+ ctl_table_no_const dummy = *table;
49072 cfs_duration_t d;
49073
49074 dummy.data = &min_delay_cs;
49075@@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
49076 int LL_PROC_PROTO(proc_console_backoff)
49077 {
49078 int rc, backoff;
49079- ctl_table_t dummy = *table;
49080+ ctl_table_no_const dummy = *table;
49081
49082 dummy.data = &backoff;
49083 dummy.proc_handler = &proc_dointvec;
49084diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
49085index f3108c7..cd4f9da 100644
49086--- a/drivers/staging/lustre/lustre/libcfs/module.c
49087+++ b/drivers/staging/lustre/lustre/libcfs/module.c
49088@@ -348,11 +348,11 @@ out:
49089
49090
49091 struct cfs_psdev_ops libcfs_psdev_ops = {
49092- libcfs_psdev_open,
49093- libcfs_psdev_release,
49094- NULL,
49095- NULL,
49096- libcfs_ioctl
49097+ .p_open = libcfs_psdev_open,
49098+ .p_close = libcfs_psdev_release,
49099+ .p_read = NULL,
49100+ .p_write = NULL,
49101+ .p_ioctl = libcfs_ioctl
49102 };
49103
49104 extern int insert_proc(void);
49105diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
49106index 3675020..e80d92c 100644
49107--- a/drivers/staging/media/solo6x10/solo6x10-core.c
49108+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
49109@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
49110
49111 static int solo_sysfs_init(struct solo_dev *solo_dev)
49112 {
49113- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
49114+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
49115 struct device *dev = &solo_dev->dev;
49116 const char *driver;
49117 int i;
49118diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
49119index 1db18c7..35e6afc 100644
49120--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
49121+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
49122@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
49123
49124 int solo_g723_init(struct solo_dev *solo_dev)
49125 {
49126- static struct snd_device_ops ops = { NULL };
49127+ static struct snd_device_ops ops = { };
49128 struct snd_card *card;
49129 struct snd_kcontrol_new kctl;
49130 char name[32];
49131diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49132index 7f2f247..d999137 100644
49133--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
49134+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49135@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
49136
49137 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
49138 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
49139- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
49140+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
49141 if (p2m_id < 0)
49142 p2m_id = -p2m_id;
49143 }
49144diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
49145index f1bbb8c..a73eaba 100644
49146--- a/drivers/staging/media/solo6x10/solo6x10.h
49147+++ b/drivers/staging/media/solo6x10/solo6x10.h
49148@@ -237,7 +237,7 @@ struct solo_dev {
49149
49150 /* P2M DMA Engine */
49151 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
49152- atomic_t p2m_count;
49153+ atomic_unchecked_t p2m_count;
49154 int p2m_jiffies;
49155 unsigned int p2m_timeouts;
49156
49157diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
49158index 0315f60..2ecae10 100644
49159--- a/drivers/staging/octeon/ethernet-rx.c
49160+++ b/drivers/staging/octeon/ethernet-rx.c
49161@@ -418,11 +418,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49162 /* Increment RX stats for virtual ports */
49163 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
49164 #ifdef CONFIG_64BIT
49165- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
49166- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
49167+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
49168+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
49169 #else
49170- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
49171- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
49172+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
49173+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
49174 #endif
49175 }
49176 netif_receive_skb(skb);
49177@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49178 dev->name);
49179 */
49180 #ifdef CONFIG_64BIT
49181- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
49182+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49183 #else
49184- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
49185+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
49186 #endif
49187 dev_kfree_skb_irq(skb);
49188 }
49189diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
49190index bd6ca71..8f0961e 100644
49191--- a/drivers/staging/octeon/ethernet.c
49192+++ b/drivers/staging/octeon/ethernet.c
49193@@ -254,11 +254,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
49194 * since the RX tasklet also increments it.
49195 */
49196 #ifdef CONFIG_64BIT
49197- atomic64_add(rx_status.dropped_packets,
49198- (atomic64_t *)&priv->stats.rx_dropped);
49199+ atomic64_add_unchecked(rx_status.dropped_packets,
49200+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49201 #else
49202- atomic_add(rx_status.dropped_packets,
49203- (atomic_t *)&priv->stats.rx_dropped);
49204+ atomic_add_unchecked(rx_status.dropped_packets,
49205+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
49206 #endif
49207 }
49208
49209diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
49210index 439c3c9..2d74293 100644
49211--- a/drivers/staging/rtl8188eu/include/hal_intf.h
49212+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
49213@@ -271,7 +271,7 @@ struct hal_ops {
49214 s32 (*c2h_handler)(struct adapter *padapter,
49215 struct c2h_evt_hdr *c2h_evt);
49216 c2h_id_filter c2h_id_filter_ccx;
49217-};
49218+} __no_const;
49219
49220 enum rt_eeprom_type {
49221 EEPROM_93C46,
49222diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
49223index eb6f0e5..e6a0958 100644
49224--- a/drivers/staging/rtl8188eu/include/rtw_io.h
49225+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
49226@@ -126,7 +126,7 @@ struct _io_ops {
49227 u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
49228 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
49229 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
49230-};
49231+} __no_const;
49232
49233 struct io_req {
49234 struct list_head list;
49235diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
49236index dc23395..cf7e9b1 100644
49237--- a/drivers/staging/rtl8712/rtl871x_io.h
49238+++ b/drivers/staging/rtl8712/rtl871x_io.h
49239@@ -108,7 +108,7 @@ struct _io_ops {
49240 u8 *pmem);
49241 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
49242 u8 *pmem);
49243-};
49244+} __no_const;
49245
49246 struct io_req {
49247 struct list_head list;
49248diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
49249index 1f5088b..0e59820 100644
49250--- a/drivers/staging/sbe-2t3e3/netdev.c
49251+++ b/drivers/staging/sbe-2t3e3/netdev.c
49252@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49253 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
49254
49255 if (rlen)
49256- if (copy_to_user(data, &resp, rlen))
49257+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
49258 return -EFAULT;
49259
49260 return 0;
49261diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
49262index a863a98..d272795 100644
49263--- a/drivers/staging/usbip/vhci.h
49264+++ b/drivers/staging/usbip/vhci.h
49265@@ -83,7 +83,7 @@ struct vhci_hcd {
49266 unsigned resuming:1;
49267 unsigned long re_timeout;
49268
49269- atomic_t seqnum;
49270+ atomic_unchecked_t seqnum;
49271
49272 /*
49273 * NOTE:
49274diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
49275index e810ad5..931336f 100644
49276--- a/drivers/staging/usbip/vhci_hcd.c
49277+++ b/drivers/staging/usbip/vhci_hcd.c
49278@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
49279
49280 spin_lock(&vdev->priv_lock);
49281
49282- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
49283+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
49284 if (priv->seqnum == 0xffff)
49285 dev_info(&urb->dev->dev, "seqnum max\n");
49286
49287@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
49288 return -ENOMEM;
49289 }
49290
49291- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
49292+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
49293 if (unlink->seqnum == 0xffff)
49294 pr_info("seqnum max\n");
49295
49296@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
49297 vdev->rhport = rhport;
49298 }
49299
49300- atomic_set(&vhci->seqnum, 0);
49301+ atomic_set_unchecked(&vhci->seqnum, 0);
49302 spin_lock_init(&vhci->lock);
49303
49304 hcd->power_budget = 0; /* no limit */
49305diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
49306index d07fcb5..358e1e1 100644
49307--- a/drivers/staging/usbip/vhci_rx.c
49308+++ b/drivers/staging/usbip/vhci_rx.c
49309@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
49310 if (!urb) {
49311 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
49312 pr_info("max seqnum %d\n",
49313- atomic_read(&the_controller->seqnum));
49314+ atomic_read_unchecked(&the_controller->seqnum));
49315 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
49316 return;
49317 }
49318diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
49319index ab8b2ba..99184aa 100644
49320--- a/drivers/staging/vt6655/hostap.c
49321+++ b/drivers/staging/vt6655/hostap.c
49322@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
49323 *
49324 */
49325
49326+static net_device_ops_no_const apdev_netdev_ops;
49327+
49328 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
49329 {
49330 PSDevice apdev_priv;
49331 struct net_device *dev = pDevice->dev;
49332 int ret;
49333- const struct net_device_ops apdev_netdev_ops = {
49334- .ndo_start_xmit = pDevice->tx_80211,
49335- };
49336
49337 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
49338
49339@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
49340 *apdev_priv = *pDevice;
49341 eth_hw_addr_inherit(pDevice->apdev, dev);
49342
49343+ /* only half broken now */
49344+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
49345 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
49346
49347 pDevice->apdev->type = ARPHRD_IEEE80211;
49348diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
49349index 67ba48b..24e602f 100644
49350--- a/drivers/staging/vt6656/hostap.c
49351+++ b/drivers/staging/vt6656/hostap.c
49352@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
49353 *
49354 */
49355
49356+static net_device_ops_no_const apdev_netdev_ops;
49357+
49358 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
49359 {
49360 struct vnt_private *apdev_priv;
49361 struct net_device *dev = pDevice->dev;
49362 int ret;
49363- const struct net_device_ops apdev_netdev_ops = {
49364- .ndo_start_xmit = pDevice->tx_80211,
49365- };
49366
49367 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
49368
49369@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
49370 *apdev_priv = *pDevice;
49371 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
49372
49373+ /* only half broken now */
49374+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
49375 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
49376
49377 pDevice->apdev->type = ARPHRD_IEEE80211;
49378diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
49379index 24884ca..26c8220 100644
49380--- a/drivers/target/sbp/sbp_target.c
49381+++ b/drivers/target/sbp/sbp_target.c
49382@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
49383
49384 #define SESSION_MAINTENANCE_INTERVAL HZ
49385
49386-static atomic_t login_id = ATOMIC_INIT(0);
49387+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
49388
49389 static void session_maintenance_work(struct work_struct *);
49390 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
49391@@ -444,7 +444,7 @@ static void sbp_management_request_login(
49392 login->lun = se_lun;
49393 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
49394 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
49395- login->login_id = atomic_inc_return(&login_id);
49396+ login->login_id = atomic_inc_return_unchecked(&login_id);
49397
49398 login->tgt_agt = sbp_target_agent_register(login);
49399 if (IS_ERR(login->tgt_agt)) {
49400diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
49401index d06de84..fd38c9b 100644
49402--- a/drivers/target/target_core_device.c
49403+++ b/drivers/target/target_core_device.c
49404@@ -1435,7 +1435,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
49405 spin_lock_init(&dev->se_tmr_lock);
49406 spin_lock_init(&dev->qf_cmd_lock);
49407 sema_init(&dev->caw_sem, 1);
49408- atomic_set(&dev->dev_ordered_id, 0);
49409+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
49410 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
49411 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
49412 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
49413diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
49414index 91953da..a842b90 100644
49415--- a/drivers/target/target_core_transport.c
49416+++ b/drivers/target/target_core_transport.c
49417@@ -1112,7 +1112,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
49418 * Used to determine when ORDERED commands should go from
49419 * Dormant to Active status.
49420 */
49421- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
49422+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
49423 smp_mb__after_atomic_inc();
49424 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
49425 cmd->se_ordered_id, cmd->sam_task_attr,
49426diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
49427index 33f83fe..d80f8e1 100644
49428--- a/drivers/tty/cyclades.c
49429+++ b/drivers/tty/cyclades.c
49430@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
49431 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
49432 info->port.count);
49433 #endif
49434- info->port.count++;
49435+ atomic_inc(&info->port.count);
49436 #ifdef CY_DEBUG_COUNT
49437 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
49438- current->pid, info->port.count);
49439+ current->pid, atomic_read(&info->port.count));
49440 #endif
49441
49442 /*
49443@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
49444 for (j = 0; j < cy_card[i].nports; j++) {
49445 info = &cy_card[i].ports[j];
49446
49447- if (info->port.count) {
49448+ if (atomic_read(&info->port.count)) {
49449 /* XXX is the ldisc num worth this? */
49450 struct tty_struct *tty;
49451 struct tty_ldisc *ld;
49452diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
49453index 9eba119..5070303 100644
49454--- a/drivers/tty/hvc/hvc_console.c
49455+++ b/drivers/tty/hvc/hvc_console.c
49456@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
49457
49458 spin_lock_irqsave(&hp->port.lock, flags);
49459 /* Check and then increment for fast path open. */
49460- if (hp->port.count++ > 0) {
49461+ if (atomic_inc_return(&hp->port.count) > 1) {
49462 spin_unlock_irqrestore(&hp->port.lock, flags);
49463 hvc_kick();
49464 return 0;
49465@@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
49466
49467 spin_lock_irqsave(&hp->port.lock, flags);
49468
49469- if (--hp->port.count == 0) {
49470+ if (atomic_dec_return(&hp->port.count) == 0) {
49471 spin_unlock_irqrestore(&hp->port.lock, flags);
49472 /* We are done with the tty pointer now. */
49473 tty_port_tty_set(&hp->port, NULL);
49474@@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
49475 */
49476 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
49477 } else {
49478- if (hp->port.count < 0)
49479+ if (atomic_read(&hp->port.count) < 0)
49480 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
49481- hp->vtermno, hp->port.count);
49482+ hp->vtermno, atomic_read(&hp->port.count));
49483 spin_unlock_irqrestore(&hp->port.lock, flags);
49484 }
49485 }
49486@@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty)
49487 * open->hangup case this can be called after the final close so prevent
49488 * that from happening for now.
49489 */
49490- if (hp->port.count <= 0) {
49491+ if (atomic_read(&hp->port.count) <= 0) {
49492 spin_unlock_irqrestore(&hp->port.lock, flags);
49493 return;
49494 }
49495
49496- hp->port.count = 0;
49497+ atomic_set(&hp->port.count, 0);
49498 spin_unlock_irqrestore(&hp->port.lock, flags);
49499 tty_port_tty_set(&hp->port, NULL);
49500
49501@@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
49502 return -EPIPE;
49503
49504 /* FIXME what's this (unprotected) check for? */
49505- if (hp->port.count <= 0)
49506+ if (atomic_read(&hp->port.count) <= 0)
49507 return -EIO;
49508
49509 spin_lock_irqsave(&hp->lock, flags);
49510diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
49511index 81e939e..95ead10 100644
49512--- a/drivers/tty/hvc/hvcs.c
49513+++ b/drivers/tty/hvc/hvcs.c
49514@@ -83,6 +83,7 @@
49515 #include <asm/hvcserver.h>
49516 #include <asm/uaccess.h>
49517 #include <asm/vio.h>
49518+#include <asm/local.h>
49519
49520 /*
49521 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
49522@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
49523
49524 spin_lock_irqsave(&hvcsd->lock, flags);
49525
49526- if (hvcsd->port.count > 0) {
49527+ if (atomic_read(&hvcsd->port.count) > 0) {
49528 spin_unlock_irqrestore(&hvcsd->lock, flags);
49529 printk(KERN_INFO "HVCS: vterm state unchanged. "
49530 "The hvcs device node is still in use.\n");
49531@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
49532 }
49533 }
49534
49535- hvcsd->port.count = 0;
49536+ atomic_set(&hvcsd->port.count, 0);
49537 hvcsd->port.tty = tty;
49538 tty->driver_data = hvcsd;
49539
49540@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
49541 unsigned long flags;
49542
49543 spin_lock_irqsave(&hvcsd->lock, flags);
49544- hvcsd->port.count++;
49545+ atomic_inc(&hvcsd->port.count);
49546 hvcsd->todo_mask |= HVCS_SCHED_READ;
49547 spin_unlock_irqrestore(&hvcsd->lock, flags);
49548
49549@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
49550 hvcsd = tty->driver_data;
49551
49552 spin_lock_irqsave(&hvcsd->lock, flags);
49553- if (--hvcsd->port.count == 0) {
49554+ if (atomic_dec_and_test(&hvcsd->port.count)) {
49555
49556 vio_disable_interrupts(hvcsd->vdev);
49557
49558@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
49559
49560 free_irq(irq, hvcsd);
49561 return;
49562- } else if (hvcsd->port.count < 0) {
49563+ } else if (atomic_read(&hvcsd->port.count) < 0) {
49564 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
49565 " is missmanaged.\n",
49566- hvcsd->vdev->unit_address, hvcsd->port.count);
49567+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
49568 }
49569
49570 spin_unlock_irqrestore(&hvcsd->lock, flags);
49571@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
49572
49573 spin_lock_irqsave(&hvcsd->lock, flags);
49574 /* Preserve this so that we know how many kref refs to put */
49575- temp_open_count = hvcsd->port.count;
49576+ temp_open_count = atomic_read(&hvcsd->port.count);
49577
49578 /*
49579 * Don't kref put inside the spinlock because the destruction
49580@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
49581 tty->driver_data = NULL;
49582 hvcsd->port.tty = NULL;
49583
49584- hvcsd->port.count = 0;
49585+ atomic_set(&hvcsd->port.count, 0);
49586
49587 /* This will drop any buffered data on the floor which is OK in a hangup
49588 * scenario. */
49589@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
49590 * the middle of a write operation? This is a crummy place to do this
49591 * but we want to keep it all in the spinlock.
49592 */
49593- if (hvcsd->port.count <= 0) {
49594+ if (atomic_read(&hvcsd->port.count) <= 0) {
49595 spin_unlock_irqrestore(&hvcsd->lock, flags);
49596 return -ENODEV;
49597 }
49598@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
49599 {
49600 struct hvcs_struct *hvcsd = tty->driver_data;
49601
49602- if (!hvcsd || hvcsd->port.count <= 0)
49603+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
49604 return 0;
49605
49606 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
49607diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
49608index 4190199..48f2920 100644
49609--- a/drivers/tty/hvc/hvsi.c
49610+++ b/drivers/tty/hvc/hvsi.c
49611@@ -85,7 +85,7 @@ struct hvsi_struct {
49612 int n_outbuf;
49613 uint32_t vtermno;
49614 uint32_t virq;
49615- atomic_t seqno; /* HVSI packet sequence number */
49616+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
49617 uint16_t mctrl;
49618 uint8_t state; /* HVSI protocol state */
49619 uint8_t flags;
49620@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
49621
49622 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
49623 packet.hdr.len = sizeof(struct hvsi_query_response);
49624- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49625+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49626 packet.verb = VSV_SEND_VERSION_NUMBER;
49627 packet.u.version = HVSI_VERSION;
49628 packet.query_seqno = query_seqno+1;
49629@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
49630
49631 packet.hdr.type = VS_QUERY_PACKET_HEADER;
49632 packet.hdr.len = sizeof(struct hvsi_query);
49633- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49634+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49635 packet.verb = verb;
49636
49637 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
49638@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
49639 int wrote;
49640
49641 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
49642- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49643+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49644 packet.hdr.len = sizeof(struct hvsi_control);
49645 packet.verb = VSV_SET_MODEM_CTL;
49646 packet.mask = HVSI_TSDTR;
49647@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
49648 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
49649
49650 packet.hdr.type = VS_DATA_PACKET_HEADER;
49651- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49652+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49653 packet.hdr.len = count + sizeof(struct hvsi_header);
49654 memcpy(&packet.data, buf, count);
49655
49656@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
49657 struct hvsi_control packet __ALIGNED__;
49658
49659 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
49660- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49661+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49662 packet.hdr.len = 6;
49663 packet.verb = VSV_CLOSE_PROTOCOL;
49664
49665diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
49666index 347050e..14f8fbf 100644
49667--- a/drivers/tty/hvc/hvsi_lib.c
49668+++ b/drivers/tty/hvc/hvsi_lib.c
49669@@ -9,7 +9,7 @@
49670
49671 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
49672 {
49673- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
49674+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
49675
49676 /* Assumes that always succeeds, works in practice */
49677 return pv->put_chars(pv->termno, (char *)packet, packet->len);
49678@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
49679
49680 /* Reset state */
49681 pv->established = 0;
49682- atomic_set(&pv->seqno, 0);
49683+ atomic_set_unchecked(&pv->seqno, 0);
49684
49685 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
49686
49687diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
49688index 8fd72ff..34a0bed 100644
49689--- a/drivers/tty/ipwireless/tty.c
49690+++ b/drivers/tty/ipwireless/tty.c
49691@@ -29,6 +29,7 @@
49692 #include <linux/tty_driver.h>
49693 #include <linux/tty_flip.h>
49694 #include <linux/uaccess.h>
49695+#include <asm/local.h>
49696
49697 #include "tty.h"
49698 #include "network.h"
49699@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
49700 mutex_unlock(&tty->ipw_tty_mutex);
49701 return -ENODEV;
49702 }
49703- if (tty->port.count == 0)
49704+ if (atomic_read(&tty->port.count) == 0)
49705 tty->tx_bytes_queued = 0;
49706
49707- tty->port.count++;
49708+ atomic_inc(&tty->port.count);
49709
49710 tty->port.tty = linux_tty;
49711 linux_tty->driver_data = tty;
49712@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
49713
49714 static void do_ipw_close(struct ipw_tty *tty)
49715 {
49716- tty->port.count--;
49717-
49718- if (tty->port.count == 0) {
49719+ if (atomic_dec_return(&tty->port.count) == 0) {
49720 struct tty_struct *linux_tty = tty->port.tty;
49721
49722 if (linux_tty != NULL) {
49723@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
49724 return;
49725
49726 mutex_lock(&tty->ipw_tty_mutex);
49727- if (tty->port.count == 0) {
49728+ if (atomic_read(&tty->port.count) == 0) {
49729 mutex_unlock(&tty->ipw_tty_mutex);
49730 return;
49731 }
49732@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
49733
49734 mutex_lock(&tty->ipw_tty_mutex);
49735
49736- if (!tty->port.count) {
49737+ if (!atomic_read(&tty->port.count)) {
49738 mutex_unlock(&tty->ipw_tty_mutex);
49739 return;
49740 }
49741@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
49742 return -ENODEV;
49743
49744 mutex_lock(&tty->ipw_tty_mutex);
49745- if (!tty->port.count) {
49746+ if (!atomic_read(&tty->port.count)) {
49747 mutex_unlock(&tty->ipw_tty_mutex);
49748 return -EINVAL;
49749 }
49750@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
49751 if (!tty)
49752 return -ENODEV;
49753
49754- if (!tty->port.count)
49755+ if (!atomic_read(&tty->port.count))
49756 return -EINVAL;
49757
49758 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
49759@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
49760 if (!tty)
49761 return 0;
49762
49763- if (!tty->port.count)
49764+ if (!atomic_read(&tty->port.count))
49765 return 0;
49766
49767 return tty->tx_bytes_queued;
49768@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
49769 if (!tty)
49770 return -ENODEV;
49771
49772- if (!tty->port.count)
49773+ if (!atomic_read(&tty->port.count))
49774 return -EINVAL;
49775
49776 return get_control_lines(tty);
49777@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
49778 if (!tty)
49779 return -ENODEV;
49780
49781- if (!tty->port.count)
49782+ if (!atomic_read(&tty->port.count))
49783 return -EINVAL;
49784
49785 return set_control_lines(tty, set, clear);
49786@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
49787 if (!tty)
49788 return -ENODEV;
49789
49790- if (!tty->port.count)
49791+ if (!atomic_read(&tty->port.count))
49792 return -EINVAL;
49793
49794 /* FIXME: Exactly how is the tty object locked here .. */
49795@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
49796 * are gone */
49797 mutex_lock(&ttyj->ipw_tty_mutex);
49798 }
49799- while (ttyj->port.count)
49800+ while (atomic_read(&ttyj->port.count))
49801 do_ipw_close(ttyj);
49802 ipwireless_disassociate_network_ttys(network,
49803 ttyj->channel_idx);
49804diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
49805index 1deaca4..c8582d4 100644
49806--- a/drivers/tty/moxa.c
49807+++ b/drivers/tty/moxa.c
49808@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
49809 }
49810
49811 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
49812- ch->port.count++;
49813+ atomic_inc(&ch->port.count);
49814 tty->driver_data = ch;
49815 tty_port_tty_set(&ch->port, tty);
49816 mutex_lock(&ch->port.mutex);
49817diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
49818index c0f76da..d974c32 100644
49819--- a/drivers/tty/n_gsm.c
49820+++ b/drivers/tty/n_gsm.c
49821@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
49822 spin_lock_init(&dlci->lock);
49823 mutex_init(&dlci->mutex);
49824 dlci->fifo = &dlci->_fifo;
49825- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
49826+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
49827 kfree(dlci);
49828 return NULL;
49829 }
49830@@ -2935,7 +2935,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
49831 struct gsm_dlci *dlci = tty->driver_data;
49832 struct tty_port *port = &dlci->port;
49833
49834- port->count++;
49835+ atomic_inc(&port->count);
49836 dlci_get(dlci);
49837 dlci_get(dlci->gsm->dlci[0]);
49838 mux_get(dlci->gsm);
49839diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
49840index 34aacaa..dad073b 100644
49841--- a/drivers/tty/n_tty.c
49842+++ b/drivers/tty/n_tty.c
49843@@ -114,7 +114,7 @@ struct n_tty_data {
49844 int minimum_to_wake;
49845
49846 /* consumer-published */
49847- size_t read_tail;
49848+ size_t read_tail __intentional_overflow(-1);
49849 size_t line_start;
49850
49851 /* protected by output lock */
49852@@ -2502,6 +2502,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
49853 {
49854 *ops = tty_ldisc_N_TTY;
49855 ops->owner = NULL;
49856- ops->refcount = ops->flags = 0;
49857+ atomic_set(&ops->refcount, 0);
49858+ ops->flags = 0;
49859 }
49860 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
49861diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
49862index 25c9bc7..24077b7 100644
49863--- a/drivers/tty/pty.c
49864+++ b/drivers/tty/pty.c
49865@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
49866 panic("Couldn't register Unix98 pts driver");
49867
49868 /* Now create the /dev/ptmx special device */
49869+ pax_open_kernel();
49870 tty_default_fops(&ptmx_fops);
49871- ptmx_fops.open = ptmx_open;
49872+ *(void **)&ptmx_fops.open = ptmx_open;
49873+ pax_close_kernel();
49874
49875 cdev_init(&ptmx_cdev, &ptmx_fops);
49876 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
49877diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
49878index 354564e..fe50d9a 100644
49879--- a/drivers/tty/rocket.c
49880+++ b/drivers/tty/rocket.c
49881@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
49882 tty->driver_data = info;
49883 tty_port_tty_set(port, tty);
49884
49885- if (port->count++ == 0) {
49886+ if (atomic_inc_return(&port->count) == 1) {
49887 atomic_inc(&rp_num_ports_open);
49888
49889 #ifdef ROCKET_DEBUG_OPEN
49890@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
49891 #endif
49892 }
49893 #ifdef ROCKET_DEBUG_OPEN
49894- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
49895+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
49896 #endif
49897
49898 /*
49899@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
49900 spin_unlock_irqrestore(&info->port.lock, flags);
49901 return;
49902 }
49903- if (info->port.count)
49904+ if (atomic_read(&info->port.count))
49905 atomic_dec(&rp_num_ports_open);
49906 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
49907 spin_unlock_irqrestore(&info->port.lock, flags);
49908diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
49909index 1274499..f541382 100644
49910--- a/drivers/tty/serial/ioc4_serial.c
49911+++ b/drivers/tty/serial/ioc4_serial.c
49912@@ -437,7 +437,7 @@ struct ioc4_soft {
49913 } is_intr_info[MAX_IOC4_INTR_ENTS];
49914
49915 /* Number of entries active in the above array */
49916- atomic_t is_num_intrs;
49917+ atomic_unchecked_t is_num_intrs;
49918 } is_intr_type[IOC4_NUM_INTR_TYPES];
49919
49920 /* is_ir_lock must be held while
49921@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
49922 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
49923 || (type == IOC4_OTHER_INTR_TYPE)));
49924
49925- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
49926+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
49927 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
49928
49929 /* Save off the lower level interrupt handler */
49930@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
49931
49932 soft = arg;
49933 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
49934- num_intrs = (int)atomic_read(
49935+ num_intrs = (int)atomic_read_unchecked(
49936 &soft->is_intr_type[intr_type].is_num_intrs);
49937
49938 this_mir = this_ir = pending_intrs(soft, intr_type);
49939diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
49940index a260cde..6b2b5ce 100644
49941--- a/drivers/tty/serial/kgdboc.c
49942+++ b/drivers/tty/serial/kgdboc.c
49943@@ -24,8 +24,9 @@
49944 #define MAX_CONFIG_LEN 40
49945
49946 static struct kgdb_io kgdboc_io_ops;
49947+static struct kgdb_io kgdboc_io_ops_console;
49948
49949-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
49950+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
49951 static int configured = -1;
49952
49953 static char config[MAX_CONFIG_LEN];
49954@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
49955 kgdboc_unregister_kbd();
49956 if (configured == 1)
49957 kgdb_unregister_io_module(&kgdboc_io_ops);
49958+ else if (configured == 2)
49959+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
49960 }
49961
49962 static int configure_kgdboc(void)
49963@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
49964 int err;
49965 char *cptr = config;
49966 struct console *cons;
49967+ int is_console = 0;
49968
49969 err = kgdboc_option_setup(config);
49970 if (err || !strlen(config) || isspace(config[0]))
49971 goto noconfig;
49972
49973 err = -ENODEV;
49974- kgdboc_io_ops.is_console = 0;
49975 kgdb_tty_driver = NULL;
49976
49977 kgdboc_use_kms = 0;
49978@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
49979 int idx;
49980 if (cons->device && cons->device(cons, &idx) == p &&
49981 idx == tty_line) {
49982- kgdboc_io_ops.is_console = 1;
49983+ is_console = 1;
49984 break;
49985 }
49986 cons = cons->next;
49987@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
49988 kgdb_tty_line = tty_line;
49989
49990 do_register:
49991- err = kgdb_register_io_module(&kgdboc_io_ops);
49992+ if (is_console) {
49993+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
49994+ configured = 2;
49995+ } else {
49996+ err = kgdb_register_io_module(&kgdboc_io_ops);
49997+ configured = 1;
49998+ }
49999 if (err)
50000 goto noconfig;
50001
50002@@ -205,8 +214,6 @@ do_register:
50003 if (err)
50004 goto nmi_con_failed;
50005
50006- configured = 1;
50007-
50008 return 0;
50009
50010 nmi_con_failed:
50011@@ -223,7 +230,7 @@ noconfig:
50012 static int __init init_kgdboc(void)
50013 {
50014 /* Already configured? */
50015- if (configured == 1)
50016+ if (configured >= 1)
50017 return 0;
50018
50019 return configure_kgdboc();
50020@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
50021 if (config[len - 1] == '\n')
50022 config[len - 1] = '\0';
50023
50024- if (configured == 1)
50025+ if (configured >= 1)
50026 cleanup_kgdboc();
50027
50028 /* Go and configure with the new params. */
50029@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
50030 .post_exception = kgdboc_post_exp_handler,
50031 };
50032
50033+static struct kgdb_io kgdboc_io_ops_console = {
50034+ .name = "kgdboc",
50035+ .read_char = kgdboc_get_char,
50036+ .write_char = kgdboc_put_char,
50037+ .pre_exception = kgdboc_pre_exp_handler,
50038+ .post_exception = kgdboc_post_exp_handler,
50039+ .is_console = 1
50040+};
50041+
50042 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
50043 /* This is only available if kgdboc is a built in for early debugging */
50044 static int __init kgdboc_early_init(char *opt)
50045diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
50046index b5d779c..3622cfe 100644
50047--- a/drivers/tty/serial/msm_serial.c
50048+++ b/drivers/tty/serial/msm_serial.c
50049@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
50050 .cons = MSM_CONSOLE,
50051 };
50052
50053-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
50054+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
50055
50056 static const struct of_device_id msm_uartdm_table[] = {
50057 { .compatible = "qcom,msm-uartdm" },
50058@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
50059 int irq;
50060
50061 if (pdev->id == -1)
50062- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
50063+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
50064
50065 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
50066 return -ENXIO;
50067diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
50068index c1af04d..0815c8a 100644
50069--- a/drivers/tty/serial/samsung.c
50070+++ b/drivers/tty/serial/samsung.c
50071@@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
50072 }
50073 }
50074
50075+static int s3c64xx_serial_startup(struct uart_port *port);
50076 static int s3c24xx_serial_startup(struct uart_port *port)
50077 {
50078 struct s3c24xx_uart_port *ourport = to_ourport(port);
50079 int ret;
50080
50081+ /* Startup sequence is different for s3c64xx and higher SoC's */
50082+ if (s3c24xx_serial_has_interrupt_mask(port))
50083+ return s3c64xx_serial_startup(port);
50084+
50085 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
50086 port->mapbase, port->membase);
50087
50088@@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
50089 /* setup info for port */
50090 port->dev = &platdev->dev;
50091
50092- /* Startup sequence is different for s3c64xx and higher SoC's */
50093- if (s3c24xx_serial_has_interrupt_mask(port))
50094- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
50095-
50096 port->uartclk = 1;
50097
50098 if (cfg->uart_flags & UPF_CONS_FLOW) {
50099diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
50100index 0f02351..07c59c5 100644
50101--- a/drivers/tty/serial/serial_core.c
50102+++ b/drivers/tty/serial/serial_core.c
50103@@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
50104 uart_flush_buffer(tty);
50105 uart_shutdown(tty, state);
50106 spin_lock_irqsave(&port->lock, flags);
50107- port->count = 0;
50108+ atomic_set(&port->count, 0);
50109 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
50110 spin_unlock_irqrestore(&port->lock, flags);
50111 tty_port_tty_set(port, NULL);
50112@@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50113 goto end;
50114 }
50115
50116- port->count++;
50117+ atomic_inc(&port->count);
50118 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
50119 retval = -ENXIO;
50120 goto err_dec_count;
50121@@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50122 /*
50123 * Make sure the device is in D0 state.
50124 */
50125- if (port->count == 1)
50126+ if (atomic_read(&port->count) == 1)
50127 uart_change_pm(state, UART_PM_STATE_ON);
50128
50129 /*
50130@@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50131 end:
50132 return retval;
50133 err_dec_count:
50134- port->count--;
50135+ atomic_inc(&port->count);
50136 mutex_unlock(&port->mutex);
50137 goto end;
50138 }
50139diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
50140index e1ce141..6d4ed80 100644
50141--- a/drivers/tty/synclink.c
50142+++ b/drivers/tty/synclink.c
50143@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50144
50145 if (debug_level >= DEBUG_LEVEL_INFO)
50146 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
50147- __FILE__,__LINE__, info->device_name, info->port.count);
50148+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
50149
50150 if (tty_port_close_start(&info->port, tty, filp) == 0)
50151 goto cleanup;
50152@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50153 cleanup:
50154 if (debug_level >= DEBUG_LEVEL_INFO)
50155 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
50156- tty->driver->name, info->port.count);
50157+ tty->driver->name, atomic_read(&info->port.count));
50158
50159 } /* end of mgsl_close() */
50160
50161@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
50162
50163 mgsl_flush_buffer(tty);
50164 shutdown(info);
50165-
50166- info->port.count = 0;
50167+
50168+ atomic_set(&info->port.count, 0);
50169 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50170 info->port.tty = NULL;
50171
50172@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50173
50174 if (debug_level >= DEBUG_LEVEL_INFO)
50175 printk("%s(%d):block_til_ready before block on %s count=%d\n",
50176- __FILE__,__LINE__, tty->driver->name, port->count );
50177+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50178
50179 spin_lock_irqsave(&info->irq_spinlock, flags);
50180 if (!tty_hung_up_p(filp)) {
50181 extra_count = true;
50182- port->count--;
50183+ atomic_dec(&port->count);
50184 }
50185 spin_unlock_irqrestore(&info->irq_spinlock, flags);
50186 port->blocked_open++;
50187@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50188
50189 if (debug_level >= DEBUG_LEVEL_INFO)
50190 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
50191- __FILE__,__LINE__, tty->driver->name, port->count );
50192+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50193
50194 tty_unlock(tty);
50195 schedule();
50196@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50197
50198 /* FIXME: Racy on hangup during close wait */
50199 if (extra_count)
50200- port->count++;
50201+ atomic_inc(&port->count);
50202 port->blocked_open--;
50203
50204 if (debug_level >= DEBUG_LEVEL_INFO)
50205 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
50206- __FILE__,__LINE__, tty->driver->name, port->count );
50207+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50208
50209 if (!retval)
50210 port->flags |= ASYNC_NORMAL_ACTIVE;
50211@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50212
50213 if (debug_level >= DEBUG_LEVEL_INFO)
50214 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
50215- __FILE__,__LINE__,tty->driver->name, info->port.count);
50216+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
50217
50218 /* If port is closing, signal caller to try again */
50219 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50220@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50221 spin_unlock_irqrestore(&info->netlock, flags);
50222 goto cleanup;
50223 }
50224- info->port.count++;
50225+ atomic_inc(&info->port.count);
50226 spin_unlock_irqrestore(&info->netlock, flags);
50227
50228- if (info->port.count == 1) {
50229+ if (atomic_read(&info->port.count) == 1) {
50230 /* 1st open on this device, init hardware */
50231 retval = startup(info);
50232 if (retval < 0)
50233@@ -3446,8 +3446,8 @@ cleanup:
50234 if (retval) {
50235 if (tty->count == 1)
50236 info->port.tty = NULL; /* tty layer will release tty struct */
50237- if(info->port.count)
50238- info->port.count--;
50239+ if (atomic_read(&info->port.count))
50240+ atomic_dec(&info->port.count);
50241 }
50242
50243 return retval;
50244@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50245 unsigned short new_crctype;
50246
50247 /* return error if TTY interface open */
50248- if (info->port.count)
50249+ if (atomic_read(&info->port.count))
50250 return -EBUSY;
50251
50252 switch (encoding)
50253@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
50254
50255 /* arbitrate between network and tty opens */
50256 spin_lock_irqsave(&info->netlock, flags);
50257- if (info->port.count != 0 || info->netcount != 0) {
50258+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50259 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
50260 spin_unlock_irqrestore(&info->netlock, flags);
50261 return -EBUSY;
50262@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50263 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
50264
50265 /* return error if TTY interface open */
50266- if (info->port.count)
50267+ if (atomic_read(&info->port.count))
50268 return -EBUSY;
50269
50270 if (cmd != SIOCWANDEV)
50271diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
50272index 1abf946..1ee34fc 100644
50273--- a/drivers/tty/synclink_gt.c
50274+++ b/drivers/tty/synclink_gt.c
50275@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
50276 tty->driver_data = info;
50277 info->port.tty = tty;
50278
50279- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
50280+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
50281
50282 /* If port is closing, signal caller to try again */
50283 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50284@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
50285 mutex_unlock(&info->port.mutex);
50286 goto cleanup;
50287 }
50288- info->port.count++;
50289+ atomic_inc(&info->port.count);
50290 spin_unlock_irqrestore(&info->netlock, flags);
50291
50292- if (info->port.count == 1) {
50293+ if (atomic_read(&info->port.count) == 1) {
50294 /* 1st open on this device, init hardware */
50295 retval = startup(info);
50296 if (retval < 0) {
50297@@ -715,8 +715,8 @@ cleanup:
50298 if (retval) {
50299 if (tty->count == 1)
50300 info->port.tty = NULL; /* tty layer will release tty struct */
50301- if(info->port.count)
50302- info->port.count--;
50303+ if(atomic_read(&info->port.count))
50304+ atomic_dec(&info->port.count);
50305 }
50306
50307 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
50308@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50309
50310 if (sanity_check(info, tty->name, "close"))
50311 return;
50312- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
50313+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
50314
50315 if (tty_port_close_start(&info->port, tty, filp) == 0)
50316 goto cleanup;
50317@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50318 tty_port_close_end(&info->port, tty);
50319 info->port.tty = NULL;
50320 cleanup:
50321- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
50322+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
50323 }
50324
50325 static void hangup(struct tty_struct *tty)
50326@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
50327 shutdown(info);
50328
50329 spin_lock_irqsave(&info->port.lock, flags);
50330- info->port.count = 0;
50331+ atomic_set(&info->port.count, 0);
50332 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50333 info->port.tty = NULL;
50334 spin_unlock_irqrestore(&info->port.lock, flags);
50335@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50336 unsigned short new_crctype;
50337
50338 /* return error if TTY interface open */
50339- if (info->port.count)
50340+ if (atomic_read(&info->port.count))
50341 return -EBUSY;
50342
50343 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
50344@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
50345
50346 /* arbitrate between network and tty opens */
50347 spin_lock_irqsave(&info->netlock, flags);
50348- if (info->port.count != 0 || info->netcount != 0) {
50349+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50350 DBGINFO(("%s hdlc_open busy\n", dev->name));
50351 spin_unlock_irqrestore(&info->netlock, flags);
50352 return -EBUSY;
50353@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50354 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
50355
50356 /* return error if TTY interface open */
50357- if (info->port.count)
50358+ if (atomic_read(&info->port.count))
50359 return -EBUSY;
50360
50361 if (cmd != SIOCWANDEV)
50362@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
50363 if (port == NULL)
50364 continue;
50365 spin_lock(&port->lock);
50366- if ((port->port.count || port->netcount) &&
50367+ if ((atomic_read(&port->port.count) || port->netcount) &&
50368 port->pending_bh && !port->bh_running &&
50369 !port->bh_requested) {
50370 DBGISR(("%s bh queued\n", port->device_name));
50371@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50372 spin_lock_irqsave(&info->lock, flags);
50373 if (!tty_hung_up_p(filp)) {
50374 extra_count = true;
50375- port->count--;
50376+ atomic_dec(&port->count);
50377 }
50378 spin_unlock_irqrestore(&info->lock, flags);
50379 port->blocked_open++;
50380@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50381 remove_wait_queue(&port->open_wait, &wait);
50382
50383 if (extra_count)
50384- port->count++;
50385+ atomic_inc(&port->count);
50386 port->blocked_open--;
50387
50388 if (!retval)
50389diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
50390index dc6e969..5dc8786 100644
50391--- a/drivers/tty/synclinkmp.c
50392+++ b/drivers/tty/synclinkmp.c
50393@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
50394
50395 if (debug_level >= DEBUG_LEVEL_INFO)
50396 printk("%s(%d):%s open(), old ref count = %d\n",
50397- __FILE__,__LINE__,tty->driver->name, info->port.count);
50398+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
50399
50400 /* If port is closing, signal caller to try again */
50401 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50402@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
50403 spin_unlock_irqrestore(&info->netlock, flags);
50404 goto cleanup;
50405 }
50406- info->port.count++;
50407+ atomic_inc(&info->port.count);
50408 spin_unlock_irqrestore(&info->netlock, flags);
50409
50410- if (info->port.count == 1) {
50411+ if (atomic_read(&info->port.count) == 1) {
50412 /* 1st open on this device, init hardware */
50413 retval = startup(info);
50414 if (retval < 0)
50415@@ -796,8 +796,8 @@ cleanup:
50416 if (retval) {
50417 if (tty->count == 1)
50418 info->port.tty = NULL; /* tty layer will release tty struct */
50419- if(info->port.count)
50420- info->port.count--;
50421+ if(atomic_read(&info->port.count))
50422+ atomic_dec(&info->port.count);
50423 }
50424
50425 return retval;
50426@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50427
50428 if (debug_level >= DEBUG_LEVEL_INFO)
50429 printk("%s(%d):%s close() entry, count=%d\n",
50430- __FILE__,__LINE__, info->device_name, info->port.count);
50431+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
50432
50433 if (tty_port_close_start(&info->port, tty, filp) == 0)
50434 goto cleanup;
50435@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50436 cleanup:
50437 if (debug_level >= DEBUG_LEVEL_INFO)
50438 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
50439- tty->driver->name, info->port.count);
50440+ tty->driver->name, atomic_read(&info->port.count));
50441 }
50442
50443 /* Called by tty_hangup() when a hangup is signaled.
50444@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
50445 shutdown(info);
50446
50447 spin_lock_irqsave(&info->port.lock, flags);
50448- info->port.count = 0;
50449+ atomic_set(&info->port.count, 0);
50450 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50451 info->port.tty = NULL;
50452 spin_unlock_irqrestore(&info->port.lock, flags);
50453@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50454 unsigned short new_crctype;
50455
50456 /* return error if TTY interface open */
50457- if (info->port.count)
50458+ if (atomic_read(&info->port.count))
50459 return -EBUSY;
50460
50461 switch (encoding)
50462@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
50463
50464 /* arbitrate between network and tty opens */
50465 spin_lock_irqsave(&info->netlock, flags);
50466- if (info->port.count != 0 || info->netcount != 0) {
50467+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50468 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
50469 spin_unlock_irqrestore(&info->netlock, flags);
50470 return -EBUSY;
50471@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50472 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
50473
50474 /* return error if TTY interface open */
50475- if (info->port.count)
50476+ if (atomic_read(&info->port.count))
50477 return -EBUSY;
50478
50479 if (cmd != SIOCWANDEV)
50480@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
50481 * do not request bottom half processing if the
50482 * device is not open in a normal mode.
50483 */
50484- if ( port && (port->port.count || port->netcount) &&
50485+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
50486 port->pending_bh && !port->bh_running &&
50487 !port->bh_requested ) {
50488 if ( debug_level >= DEBUG_LEVEL_ISR )
50489@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50490
50491 if (debug_level >= DEBUG_LEVEL_INFO)
50492 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
50493- __FILE__,__LINE__, tty->driver->name, port->count );
50494+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50495
50496 spin_lock_irqsave(&info->lock, flags);
50497 if (!tty_hung_up_p(filp)) {
50498 extra_count = true;
50499- port->count--;
50500+ atomic_dec(&port->count);
50501 }
50502 spin_unlock_irqrestore(&info->lock, flags);
50503 port->blocked_open++;
50504@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50505
50506 if (debug_level >= DEBUG_LEVEL_INFO)
50507 printk("%s(%d):%s block_til_ready() count=%d\n",
50508- __FILE__,__LINE__, tty->driver->name, port->count );
50509+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50510
50511 tty_unlock(tty);
50512 schedule();
50513@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50514 remove_wait_queue(&port->open_wait, &wait);
50515
50516 if (extra_count)
50517- port->count++;
50518+ atomic_inc(&port->count);
50519 port->blocked_open--;
50520
50521 if (debug_level >= DEBUG_LEVEL_INFO)
50522 printk("%s(%d):%s block_til_ready() after, count=%d\n",
50523- __FILE__,__LINE__, tty->driver->name, port->count );
50524+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50525
50526 if (!retval)
50527 port->flags |= ASYNC_NORMAL_ACTIVE;
50528diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
50529index ce396ec..04a37be 100644
50530--- a/drivers/tty/sysrq.c
50531+++ b/drivers/tty/sysrq.c
50532@@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
50533 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
50534 size_t count, loff_t *ppos)
50535 {
50536- if (count) {
50537+ if (count && capable(CAP_SYS_ADMIN)) {
50538 char c;
50539
50540 if (get_user(c, buf))
50541diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
50542index c74a00a..02cf211a 100644
50543--- a/drivers/tty/tty_io.c
50544+++ b/drivers/tty/tty_io.c
50545@@ -3474,7 +3474,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
50546
50547 void tty_default_fops(struct file_operations *fops)
50548 {
50549- *fops = tty_fops;
50550+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
50551 }
50552
50553 /*
50554diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
50555index 6458e11..6cfc218 100644
50556--- a/drivers/tty/tty_ldisc.c
50557+++ b/drivers/tty/tty_ldisc.c
50558@@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
50559 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
50560 tty_ldiscs[disc] = new_ldisc;
50561 new_ldisc->num = disc;
50562- new_ldisc->refcount = 0;
50563+ atomic_set(&new_ldisc->refcount, 0);
50564 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
50565
50566 return ret;
50567@@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
50568 return -EINVAL;
50569
50570 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
50571- if (tty_ldiscs[disc]->refcount)
50572+ if (atomic_read(&tty_ldiscs[disc]->refcount))
50573 ret = -EBUSY;
50574 else
50575 tty_ldiscs[disc] = NULL;
50576@@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
50577 if (ldops) {
50578 ret = ERR_PTR(-EAGAIN);
50579 if (try_module_get(ldops->owner)) {
50580- ldops->refcount++;
50581+ atomic_inc(&ldops->refcount);
50582 ret = ldops;
50583 }
50584 }
50585@@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
50586 unsigned long flags;
50587
50588 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
50589- ldops->refcount--;
50590+ atomic_dec(&ldops->refcount);
50591 module_put(ldops->owner);
50592 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
50593 }
50594diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
50595index c94d234..8210f2d 100644
50596--- a/drivers/tty/tty_port.c
50597+++ b/drivers/tty/tty_port.c
50598@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
50599 unsigned long flags;
50600
50601 spin_lock_irqsave(&port->lock, flags);
50602- port->count = 0;
50603+ atomic_set(&port->count, 0);
50604 port->flags &= ~ASYNC_NORMAL_ACTIVE;
50605 tty = port->tty;
50606 if (tty)
50607@@ -394,7 +394,7 @@ int tty_port_block_til_ready(struct tty_port *port,
50608 /* The port lock protects the port counts */
50609 spin_lock_irqsave(&port->lock, flags);
50610 if (!tty_hung_up_p(filp))
50611- port->count--;
50612+ atomic_dec(&port->count);
50613 port->blocked_open++;
50614 spin_unlock_irqrestore(&port->lock, flags);
50615
50616@@ -436,7 +436,7 @@ int tty_port_block_til_ready(struct tty_port *port,
50617 we must not mess that up further */
50618 spin_lock_irqsave(&port->lock, flags);
50619 if (!tty_hung_up_p(filp))
50620- port->count++;
50621+ atomic_inc(&port->count);
50622 port->blocked_open--;
50623 if (retval == 0)
50624 port->flags |= ASYNC_NORMAL_ACTIVE;
50625@@ -470,19 +470,19 @@ int tty_port_close_start(struct tty_port *port,
50626 return 0;
50627 }
50628
50629- if (tty->count == 1 && port->count != 1) {
50630+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
50631 printk(KERN_WARNING
50632 "tty_port_close_start: tty->count = 1 port count = %d.\n",
50633- port->count);
50634- port->count = 1;
50635+ atomic_read(&port->count));
50636+ atomic_set(&port->count, 1);
50637 }
50638- if (--port->count < 0) {
50639+ if (atomic_dec_return(&port->count) < 0) {
50640 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
50641- port->count);
50642- port->count = 0;
50643+ atomic_read(&port->count));
50644+ atomic_set(&port->count, 0);
50645 }
50646
50647- if (port->count) {
50648+ if (atomic_read(&port->count)) {
50649 spin_unlock_irqrestore(&port->lock, flags);
50650 return 0;
50651 }
50652@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
50653 {
50654 spin_lock_irq(&port->lock);
50655 if (!tty_hung_up_p(filp))
50656- ++port->count;
50657+ atomic_inc(&port->count);
50658 spin_unlock_irq(&port->lock);
50659 tty_port_tty_set(port, tty);
50660
50661diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
50662index d0e3a44..5f8b754 100644
50663--- a/drivers/tty/vt/keyboard.c
50664+++ b/drivers/tty/vt/keyboard.c
50665@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
50666 kbd->kbdmode == VC_OFF) &&
50667 value != KVAL(K_SAK))
50668 return; /* SAK is allowed even in raw mode */
50669+
50670+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50671+ {
50672+ void *func = fn_handler[value];
50673+ if (func == fn_show_state || func == fn_show_ptregs ||
50674+ func == fn_show_mem)
50675+ return;
50676+ }
50677+#endif
50678+
50679 fn_handler[value](vc);
50680 }
50681
50682@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
50683 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
50684 return -EFAULT;
50685
50686- if (!capable(CAP_SYS_TTY_CONFIG))
50687- perm = 0;
50688-
50689 switch (cmd) {
50690 case KDGKBENT:
50691 /* Ensure another thread doesn't free it under us */
50692@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
50693 spin_unlock_irqrestore(&kbd_event_lock, flags);
50694 return put_user(val, &user_kbe->kb_value);
50695 case KDSKBENT:
50696+ if (!capable(CAP_SYS_TTY_CONFIG))
50697+ perm = 0;
50698+
50699 if (!perm)
50700 return -EPERM;
50701 if (!i && v == K_NOSUCHMAP) {
50702@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
50703 int i, j, k;
50704 int ret;
50705
50706- if (!capable(CAP_SYS_TTY_CONFIG))
50707- perm = 0;
50708-
50709 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
50710 if (!kbs) {
50711 ret = -ENOMEM;
50712@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
50713 kfree(kbs);
50714 return ((p && *p) ? -EOVERFLOW : 0);
50715 case KDSKBSENT:
50716+ if (!capable(CAP_SYS_TTY_CONFIG))
50717+ perm = 0;
50718+
50719 if (!perm) {
50720 ret = -EPERM;
50721 goto reterr;
50722diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
50723index a673e5b..36e5d32 100644
50724--- a/drivers/uio/uio.c
50725+++ b/drivers/uio/uio.c
50726@@ -25,6 +25,7 @@
50727 #include <linux/kobject.h>
50728 #include <linux/cdev.h>
50729 #include <linux/uio_driver.h>
50730+#include <asm/local.h>
50731
50732 #define UIO_MAX_DEVICES (1U << MINORBITS)
50733
50734@@ -32,7 +33,7 @@ struct uio_device {
50735 struct module *owner;
50736 struct device *dev;
50737 int minor;
50738- atomic_t event;
50739+ atomic_unchecked_t event;
50740 struct fasync_struct *async_queue;
50741 wait_queue_head_t wait;
50742 struct uio_info *info;
50743@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
50744 struct device_attribute *attr, char *buf)
50745 {
50746 struct uio_device *idev = dev_get_drvdata(dev);
50747- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
50748+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
50749 }
50750 static DEVICE_ATTR_RO(event);
50751
50752@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
50753 {
50754 struct uio_device *idev = info->uio_dev;
50755
50756- atomic_inc(&idev->event);
50757+ atomic_inc_unchecked(&idev->event);
50758 wake_up_interruptible(&idev->wait);
50759 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
50760 }
50761@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
50762 }
50763
50764 listener->dev = idev;
50765- listener->event_count = atomic_read(&idev->event);
50766+ listener->event_count = atomic_read_unchecked(&idev->event);
50767 filep->private_data = listener;
50768
50769 if (idev->info->open) {
50770@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
50771 return -EIO;
50772
50773 poll_wait(filep, &idev->wait, wait);
50774- if (listener->event_count != atomic_read(&idev->event))
50775+ if (listener->event_count != atomic_read_unchecked(&idev->event))
50776 return POLLIN | POLLRDNORM;
50777 return 0;
50778 }
50779@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
50780 do {
50781 set_current_state(TASK_INTERRUPTIBLE);
50782
50783- event_count = atomic_read(&idev->event);
50784+ event_count = atomic_read_unchecked(&idev->event);
50785 if (event_count != listener->event_count) {
50786 if (copy_to_user(buf, &event_count, count))
50787 retval = -EFAULT;
50788@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
50789 static int uio_find_mem_index(struct vm_area_struct *vma)
50790 {
50791 struct uio_device *idev = vma->vm_private_data;
50792+ unsigned long size;
50793
50794 if (vma->vm_pgoff < MAX_UIO_MAPS) {
50795- if (idev->info->mem[vma->vm_pgoff].size == 0)
50796+ size = idev->info->mem[vma->vm_pgoff].size;
50797+ if (size == 0)
50798+ return -1;
50799+ if (vma->vm_end - vma->vm_start > size)
50800 return -1;
50801 return (int)vma->vm_pgoff;
50802 }
50803@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
50804 idev->owner = owner;
50805 idev->info = info;
50806 init_waitqueue_head(&idev->wait);
50807- atomic_set(&idev->event, 0);
50808+ atomic_set_unchecked(&idev->event, 0);
50809
50810 ret = uio_get_minor(idev);
50811 if (ret)
50812diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
50813index 8a7eb77..c00402f 100644
50814--- a/drivers/usb/atm/cxacru.c
50815+++ b/drivers/usb/atm/cxacru.c
50816@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
50817 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
50818 if (ret < 2)
50819 return -EINVAL;
50820- if (index < 0 || index > 0x7f)
50821+ if (index > 0x7f)
50822 return -EINVAL;
50823 pos += tmp;
50824
50825diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
50826index 25a7bfc..57f3cf5 100644
50827--- a/drivers/usb/atm/usbatm.c
50828+++ b/drivers/usb/atm/usbatm.c
50829@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50830 if (printk_ratelimit())
50831 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
50832 __func__, vpi, vci);
50833- atomic_inc(&vcc->stats->rx_err);
50834+ atomic_inc_unchecked(&vcc->stats->rx_err);
50835 return;
50836 }
50837
50838@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50839 if (length > ATM_MAX_AAL5_PDU) {
50840 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
50841 __func__, length, vcc);
50842- atomic_inc(&vcc->stats->rx_err);
50843+ atomic_inc_unchecked(&vcc->stats->rx_err);
50844 goto out;
50845 }
50846
50847@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50848 if (sarb->len < pdu_length) {
50849 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
50850 __func__, pdu_length, sarb->len, vcc);
50851- atomic_inc(&vcc->stats->rx_err);
50852+ atomic_inc_unchecked(&vcc->stats->rx_err);
50853 goto out;
50854 }
50855
50856 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
50857 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
50858 __func__, vcc);
50859- atomic_inc(&vcc->stats->rx_err);
50860+ atomic_inc_unchecked(&vcc->stats->rx_err);
50861 goto out;
50862 }
50863
50864@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50865 if (printk_ratelimit())
50866 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
50867 __func__, length);
50868- atomic_inc(&vcc->stats->rx_drop);
50869+ atomic_inc_unchecked(&vcc->stats->rx_drop);
50870 goto out;
50871 }
50872
50873@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50874
50875 vcc->push(vcc, skb);
50876
50877- atomic_inc(&vcc->stats->rx);
50878+ atomic_inc_unchecked(&vcc->stats->rx);
50879 out:
50880 skb_trim(sarb, 0);
50881 }
50882@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
50883 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
50884
50885 usbatm_pop(vcc, skb);
50886- atomic_inc(&vcc->stats->tx);
50887+ atomic_inc_unchecked(&vcc->stats->tx);
50888
50889 skb = skb_dequeue(&instance->sndqueue);
50890 }
50891@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
50892 if (!left--)
50893 return sprintf(page,
50894 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
50895- atomic_read(&atm_dev->stats.aal5.tx),
50896- atomic_read(&atm_dev->stats.aal5.tx_err),
50897- atomic_read(&atm_dev->stats.aal5.rx),
50898- atomic_read(&atm_dev->stats.aal5.rx_err),
50899- atomic_read(&atm_dev->stats.aal5.rx_drop));
50900+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
50901+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
50902+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
50903+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
50904+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
50905
50906 if (!left--) {
50907 if (instance->disconnected)
50908diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
50909index 2a3bbdf..91d72cf 100644
50910--- a/drivers/usb/core/devices.c
50911+++ b/drivers/usb/core/devices.c
50912@@ -126,7 +126,7 @@ static const char format_endpt[] =
50913 * time it gets called.
50914 */
50915 static struct device_connect_event {
50916- atomic_t count;
50917+ atomic_unchecked_t count;
50918 wait_queue_head_t wait;
50919 } device_event = {
50920 .count = ATOMIC_INIT(1),
50921@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
50922
50923 void usbfs_conn_disc_event(void)
50924 {
50925- atomic_add(2, &device_event.count);
50926+ atomic_add_unchecked(2, &device_event.count);
50927 wake_up(&device_event.wait);
50928 }
50929
50930@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
50931
50932 poll_wait(file, &device_event.wait, wait);
50933
50934- event_count = atomic_read(&device_event.count);
50935+ event_count = atomic_read_unchecked(&device_event.count);
50936 if (file->f_version != event_count) {
50937 file->f_version = event_count;
50938 return POLLIN | POLLRDNORM;
50939diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
50940index 967152a..16fa2e5 100644
50941--- a/drivers/usb/core/devio.c
50942+++ b/drivers/usb/core/devio.c
50943@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
50944 struct dev_state *ps = file->private_data;
50945 struct usb_device *dev = ps->dev;
50946 ssize_t ret = 0;
50947- unsigned len;
50948+ size_t len;
50949 loff_t pos;
50950 int i;
50951
50952@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
50953 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
50954 struct usb_config_descriptor *config =
50955 (struct usb_config_descriptor *)dev->rawdescriptors[i];
50956- unsigned int length = le16_to_cpu(config->wTotalLength);
50957+ size_t length = le16_to_cpu(config->wTotalLength);
50958
50959 if (*ppos < pos + length) {
50960
50961 /* The descriptor may claim to be longer than it
50962 * really is. Here is the actual allocated length. */
50963- unsigned alloclen =
50964+ size_t alloclen =
50965 le16_to_cpu(dev->config[i].desc.wTotalLength);
50966
50967- len = length - (*ppos - pos);
50968+ len = length + pos - *ppos;
50969 if (len > nbytes)
50970 len = nbytes;
50971
50972 /* Simply don't write (skip over) unallocated parts */
50973 if (alloclen > (*ppos - pos)) {
50974- alloclen -= (*ppos - pos);
50975+ alloclen = alloclen + pos - *ppos;
50976 if (copy_to_user(buf,
50977 dev->rawdescriptors[i] + (*ppos - pos),
50978 min(len, alloclen))) {
50979diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
50980index 6bffb8c..b404e8b 100644
50981--- a/drivers/usb/core/hcd.c
50982+++ b/drivers/usb/core/hcd.c
50983@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
50984 */
50985 usb_get_urb(urb);
50986 atomic_inc(&urb->use_count);
50987- atomic_inc(&urb->dev->urbnum);
50988+ atomic_inc_unchecked(&urb->dev->urbnum);
50989 usbmon_urb_submit(&hcd->self, urb);
50990
50991 /* NOTE requirements on root-hub callers (usbfs and the hub
50992@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
50993 urb->hcpriv = NULL;
50994 INIT_LIST_HEAD(&urb->urb_list);
50995 atomic_dec(&urb->use_count);
50996- atomic_dec(&urb->dev->urbnum);
50997+ atomic_dec_unchecked(&urb->dev->urbnum);
50998 if (atomic_read(&urb->reject))
50999 wake_up(&usb_kill_urb_queue);
51000 usb_put_urb(urb);
51001diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
51002index 07e6654..6420edf 100644
51003--- a/drivers/usb/core/hub.c
51004+++ b/drivers/usb/core/hub.c
51005@@ -27,6 +27,7 @@
51006 #include <linux/freezer.h>
51007 #include <linux/random.h>
51008 #include <linux/pm_qos.h>
51009+#include <linux/grsecurity.h>
51010
51011 #include <asm/uaccess.h>
51012 #include <asm/byteorder.h>
51013@@ -4442,6 +4443,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
51014 goto done;
51015 return;
51016 }
51017+
51018+ if (gr_handle_new_usb())
51019+ goto done;
51020+
51021 if (hub_is_superspeed(hub->hdev))
51022 unit_load = 150;
51023 else
51024diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
51025index bb31597..6c5ef8b 100644
51026--- a/drivers/usb/core/message.c
51027+++ b/drivers/usb/core/message.c
51028@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
51029 * Return: If successful, the number of bytes transferred. Otherwise, a negative
51030 * error number.
51031 */
51032-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51033+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51034 __u8 requesttype, __u16 value, __u16 index, void *data,
51035 __u16 size, int timeout)
51036 {
51037@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
51038 * If successful, 0. Otherwise a negative error number. The number of actual
51039 * bytes transferred will be stored in the @actual_length paramater.
51040 */
51041-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51042+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51043 void *data, int len, int *actual_length, int timeout)
51044 {
51045 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
51046@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
51047 * bytes transferred will be stored in the @actual_length paramater.
51048 *
51049 */
51050-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51051+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51052 void *data, int len, int *actual_length, int timeout)
51053 {
51054 struct urb *urb;
51055diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
51056index 52a97ad..e73330f 100644
51057--- a/drivers/usb/core/sysfs.c
51058+++ b/drivers/usb/core/sysfs.c
51059@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
51060 struct usb_device *udev;
51061
51062 udev = to_usb_device(dev);
51063- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
51064+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
51065 }
51066 static DEVICE_ATTR_RO(urbnum);
51067
51068diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
51069index 4d11449..f4ccabf 100644
51070--- a/drivers/usb/core/usb.c
51071+++ b/drivers/usb/core/usb.c
51072@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
51073 set_dev_node(&dev->dev, dev_to_node(bus->controller));
51074 dev->state = USB_STATE_ATTACHED;
51075 dev->lpm_disable_count = 1;
51076- atomic_set(&dev->urbnum, 0);
51077+ atomic_set_unchecked(&dev->urbnum, 0);
51078
51079 INIT_LIST_HEAD(&dev->ep0.urb_list);
51080 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
51081diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
51082index 02e44fc..3c4fe64 100644
51083--- a/drivers/usb/dwc3/gadget.c
51084+++ b/drivers/usb/dwc3/gadget.c
51085@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
51086 if (!usb_endpoint_xfer_isoc(desc))
51087 return 0;
51088
51089- memset(&trb_link, 0, sizeof(trb_link));
51090-
51091 /* Link TRB for ISOC. The HWO bit is never reset */
51092 trb_st_hw = &dep->trb_pool[0];
51093
51094diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
51095index 8cfc319..4868255 100644
51096--- a/drivers/usb/early/ehci-dbgp.c
51097+++ b/drivers/usb/early/ehci-dbgp.c
51098@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
51099
51100 #ifdef CONFIG_KGDB
51101 static struct kgdb_io kgdbdbgp_io_ops;
51102-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
51103+static struct kgdb_io kgdbdbgp_io_ops_console;
51104+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
51105 #else
51106 #define dbgp_kgdb_mode (0)
51107 #endif
51108@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
51109 .write_char = kgdbdbgp_write_char,
51110 };
51111
51112+static struct kgdb_io kgdbdbgp_io_ops_console = {
51113+ .name = "kgdbdbgp",
51114+ .read_char = kgdbdbgp_read_char,
51115+ .write_char = kgdbdbgp_write_char,
51116+ .is_console = 1
51117+};
51118+
51119 static int kgdbdbgp_wait_time;
51120
51121 static int __init kgdbdbgp_parse_config(char *str)
51122@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
51123 ptr++;
51124 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
51125 }
51126- kgdb_register_io_module(&kgdbdbgp_io_ops);
51127- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
51128+ if (early_dbgp_console.index != -1)
51129+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
51130+ else
51131+ kgdb_register_io_module(&kgdbdbgp_io_ops);
51132
51133 return 0;
51134 }
51135diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
51136index b369292..9f3ba40 100644
51137--- a/drivers/usb/gadget/u_serial.c
51138+++ b/drivers/usb/gadget/u_serial.c
51139@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51140 spin_lock_irq(&port->port_lock);
51141
51142 /* already open? Great. */
51143- if (port->port.count) {
51144+ if (atomic_read(&port->port.count)) {
51145 status = 0;
51146- port->port.count++;
51147+ atomic_inc(&port->port.count);
51148
51149 /* currently opening/closing? wait ... */
51150 } else if (port->openclose) {
51151@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51152 tty->driver_data = port;
51153 port->port.tty = tty;
51154
51155- port->port.count = 1;
51156+ atomic_set(&port->port.count, 1);
51157 port->openclose = false;
51158
51159 /* if connected, start the I/O stream */
51160@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51161
51162 spin_lock_irq(&port->port_lock);
51163
51164- if (port->port.count != 1) {
51165- if (port->port.count == 0)
51166+ if (atomic_read(&port->port.count) != 1) {
51167+ if (atomic_read(&port->port.count) == 0)
51168 WARN_ON(1);
51169 else
51170- --port->port.count;
51171+ atomic_dec(&port->port.count);
51172 goto exit;
51173 }
51174
51175@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51176 * and sleep if necessary
51177 */
51178 port->openclose = true;
51179- port->port.count = 0;
51180+ atomic_set(&port->port.count, 0);
51181
51182 gser = port->port_usb;
51183 if (gser && gser->disconnect)
51184@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
51185 int cond;
51186
51187 spin_lock_irq(&port->port_lock);
51188- cond = (port->port.count == 0) && !port->openclose;
51189+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
51190 spin_unlock_irq(&port->port_lock);
51191 return cond;
51192 }
51193@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
51194 /* if it's already open, start I/O ... and notify the serial
51195 * protocol about open/close status (connect/disconnect).
51196 */
51197- if (port->port.count) {
51198+ if (atomic_read(&port->port.count)) {
51199 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
51200 gs_start_io(port);
51201 if (gser->connect)
51202@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
51203
51204 port->port_usb = NULL;
51205 gser->ioport = NULL;
51206- if (port->port.count > 0 || port->openclose) {
51207+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
51208 wake_up_interruptible(&port->drain_wait);
51209 if (port->port.tty)
51210 tty_hangup(port->port.tty);
51211@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
51212
51213 /* finally, free any unused/unusable I/O buffers */
51214 spin_lock_irqsave(&port->port_lock, flags);
51215- if (port->port.count == 0 && !port->openclose)
51216+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
51217 gs_buf_free(&port->port_write_buf);
51218 gs_free_requests(gser->out, &port->read_pool, NULL);
51219 gs_free_requests(gser->out, &port->read_queue, NULL);
51220diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
51221index 835fc08..f8b22bf 100644
51222--- a/drivers/usb/host/ehci-hub.c
51223+++ b/drivers/usb/host/ehci-hub.c
51224@@ -762,7 +762,7 @@ static struct urb *request_single_step_set_feature_urb(
51225 urb->transfer_flags = URB_DIR_IN;
51226 usb_get_urb(urb);
51227 atomic_inc(&urb->use_count);
51228- atomic_inc(&urb->dev->urbnum);
51229+ atomic_inc_unchecked(&urb->dev->urbnum);
51230 urb->setup_dma = dma_map_single(
51231 hcd->self.controller,
51232 urb->setup_packet,
51233@@ -829,7 +829,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
51234 urb->status = -EINPROGRESS;
51235 usb_get_urb(urb);
51236 atomic_inc(&urb->use_count);
51237- atomic_inc(&urb->dev->urbnum);
51238+ atomic_inc_unchecked(&urb->dev->urbnum);
51239 retval = submit_single_step_set_feature(hcd, urb, 0);
51240 if (!retval && !wait_for_completion_timeout(&done,
51241 msecs_to_jiffies(2000))) {
51242diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
51243index ba6a5d6..f88f7f3 100644
51244--- a/drivers/usb/misc/appledisplay.c
51245+++ b/drivers/usb/misc/appledisplay.c
51246@@ -83,7 +83,7 @@ struct appledisplay {
51247 spinlock_t lock;
51248 };
51249
51250-static atomic_t count_displays = ATOMIC_INIT(0);
51251+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
51252 static struct workqueue_struct *wq;
51253
51254 static void appledisplay_complete(struct urb *urb)
51255@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
51256
51257 /* Register backlight device */
51258 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
51259- atomic_inc_return(&count_displays) - 1);
51260+ atomic_inc_return_unchecked(&count_displays) - 1);
51261 memset(&props, 0, sizeof(struct backlight_properties));
51262 props.type = BACKLIGHT_RAW;
51263 props.max_brightness = 0xff;
51264diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
51265index c69bb50..215ef37 100644
51266--- a/drivers/usb/serial/console.c
51267+++ b/drivers/usb/serial/console.c
51268@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
51269
51270 info->port = port;
51271
51272- ++port->port.count;
51273+ atomic_inc(&port->port.count);
51274 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
51275 if (serial->type->set_termios) {
51276 /*
51277@@ -170,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
51278 }
51279 /* Now that any required fake tty operations are completed restore
51280 * the tty port count */
51281- --port->port.count;
51282+ atomic_dec(&port->port.count);
51283 /* The console is special in terms of closing the device so
51284 * indicate this port is now acting as a system console. */
51285 port->port.console = 1;
51286@@ -183,7 +183,7 @@ static int usb_console_setup(struct console *co, char *options)
51287 free_tty:
51288 kfree(tty);
51289 reset_open_count:
51290- port->port.count = 0;
51291+ atomic_set(&port->port.count, 0);
51292 usb_autopm_put_interface(serial->interface);
51293 error_get_interface:
51294 usb_serial_put(serial);
51295@@ -194,7 +194,7 @@ static int usb_console_setup(struct console *co, char *options)
51296 static void usb_console_write(struct console *co,
51297 const char *buf, unsigned count)
51298 {
51299- static struct usbcons_info *info = &usbcons_info;
51300+ struct usbcons_info *info = &usbcons_info;
51301 struct usb_serial_port *port = info->port;
51302 struct usb_serial *serial;
51303 int retval = -ENODEV;
51304diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
51305index 75f70f0..d467e1a 100644
51306--- a/drivers/usb/storage/usb.h
51307+++ b/drivers/usb/storage/usb.h
51308@@ -63,7 +63,7 @@ struct us_unusual_dev {
51309 __u8 useProtocol;
51310 __u8 useTransport;
51311 int (*initFunction)(struct us_data *);
51312-};
51313+} __do_const;
51314
51315
51316 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
51317diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
51318index e614f02..3fd60e2 100644
51319--- a/drivers/usb/wusbcore/wa-hc.h
51320+++ b/drivers/usb/wusbcore/wa-hc.h
51321@@ -225,7 +225,7 @@ struct wahc {
51322 spinlock_t xfer_list_lock;
51323 struct work_struct xfer_enqueue_work;
51324 struct work_struct xfer_error_work;
51325- atomic_t xfer_id_count;
51326+ atomic_unchecked_t xfer_id_count;
51327
51328 kernel_ulong_t quirks;
51329 };
51330@@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *wa)
51331 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
51332 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
51333 wa->dto_in_use = 0;
51334- atomic_set(&wa->xfer_id_count, 1);
51335+ atomic_set_unchecked(&wa->xfer_id_count, 1);
51336 }
51337
51338 /**
51339diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
51340index ed5abe8..7036400 100644
51341--- a/drivers/usb/wusbcore/wa-xfer.c
51342+++ b/drivers/usb/wusbcore/wa-xfer.c
51343@@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
51344 */
51345 static void wa_xfer_id_init(struct wa_xfer *xfer)
51346 {
51347- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
51348+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
51349 }
51350
51351 /* Return the xfer's ID. */
51352diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
51353index 1eab4ac..e21efc9 100644
51354--- a/drivers/vfio/vfio.c
51355+++ b/drivers/vfio/vfio.c
51356@@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
51357 return 0;
51358
51359 /* TODO Prevent device auto probing */
51360- WARN("Device %s added to live group %d!\n", dev_name(dev),
51361+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
51362 iommu_group_id(group->iommu_group));
51363
51364 return 0;
51365diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
51366index 5174eba..451e6bc 100644
51367--- a/drivers/vhost/vringh.c
51368+++ b/drivers/vhost/vringh.c
51369@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
51370 /* Userspace access helpers: in this case, addresses are really userspace. */
51371 static inline int getu16_user(u16 *val, const u16 *p)
51372 {
51373- return get_user(*val, (__force u16 __user *)p);
51374+ return get_user(*val, (u16 __force_user *)p);
51375 }
51376
51377 static inline int putu16_user(u16 *p, u16 val)
51378 {
51379- return put_user(val, (__force u16 __user *)p);
51380+ return put_user(val, (u16 __force_user *)p);
51381 }
51382
51383 static inline int copydesc_user(void *dst, const void *src, size_t len)
51384 {
51385- return copy_from_user(dst, (__force void __user *)src, len) ?
51386+ return copy_from_user(dst, (void __force_user *)src, len) ?
51387 -EFAULT : 0;
51388 }
51389
51390@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
51391 const struct vring_used_elem *src,
51392 unsigned int num)
51393 {
51394- return copy_to_user((__force void __user *)dst, src,
51395+ return copy_to_user((void __force_user *)dst, src,
51396 sizeof(*dst) * num) ? -EFAULT : 0;
51397 }
51398
51399 static inline int xfer_from_user(void *src, void *dst, size_t len)
51400 {
51401- return copy_from_user(dst, (__force void __user *)src, len) ?
51402+ return copy_from_user(dst, (void __force_user *)src, len) ?
51403 -EFAULT : 0;
51404 }
51405
51406 static inline int xfer_to_user(void *dst, void *src, size_t len)
51407 {
51408- return copy_to_user((__force void __user *)dst, src, len) ?
51409+ return copy_to_user((void __force_user *)dst, src, len) ?
51410 -EFAULT : 0;
51411 }
51412
51413@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
51414 vrh->last_used_idx = 0;
51415 vrh->vring.num = num;
51416 /* vring expects kernel addresses, but only used via accessors. */
51417- vrh->vring.desc = (__force struct vring_desc *)desc;
51418- vrh->vring.avail = (__force struct vring_avail *)avail;
51419- vrh->vring.used = (__force struct vring_used *)used;
51420+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
51421+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
51422+ vrh->vring.used = (__force_kernel struct vring_used *)used;
51423 return 0;
51424 }
51425 EXPORT_SYMBOL(vringh_init_user);
51426@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
51427
51428 static inline int putu16_kern(u16 *p, u16 val)
51429 {
51430- ACCESS_ONCE(*p) = val;
51431+ ACCESS_ONCE_RW(*p) = val;
51432 return 0;
51433 }
51434
51435diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
51436index 1b0b233..6f34c2c 100644
51437--- a/drivers/video/arcfb.c
51438+++ b/drivers/video/arcfb.c
51439@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
51440 return -ENOSPC;
51441
51442 err = 0;
51443- if ((count + p) > fbmemlength) {
51444+ if (count > (fbmemlength - p)) {
51445 count = fbmemlength - p;
51446 err = -ENOSPC;
51447 }
51448diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
51449index 12ca031..84a8a74 100644
51450--- a/drivers/video/aty/aty128fb.c
51451+++ b/drivers/video/aty/aty128fb.c
51452@@ -149,7 +149,7 @@ enum {
51453 };
51454
51455 /* Must match above enum */
51456-static char * const r128_family[] = {
51457+static const char * const r128_family[] = {
51458 "AGP",
51459 "PCI",
51460 "PRO AGP",
51461diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
51462index 28fafbf..ae91651 100644
51463--- a/drivers/video/aty/atyfb_base.c
51464+++ b/drivers/video/aty/atyfb_base.c
51465@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
51466 par->accel_flags = var->accel_flags; /* hack */
51467
51468 if (var->accel_flags) {
51469- info->fbops->fb_sync = atyfb_sync;
51470+ pax_open_kernel();
51471+ *(void **)&info->fbops->fb_sync = atyfb_sync;
51472+ pax_close_kernel();
51473 info->flags &= ~FBINFO_HWACCEL_DISABLED;
51474 } else {
51475- info->fbops->fb_sync = NULL;
51476+ pax_open_kernel();
51477+ *(void **)&info->fbops->fb_sync = NULL;
51478+ pax_close_kernel();
51479 info->flags |= FBINFO_HWACCEL_DISABLED;
51480 }
51481
51482diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
51483index 95ec042..e6affdd 100644
51484--- a/drivers/video/aty/mach64_cursor.c
51485+++ b/drivers/video/aty/mach64_cursor.c
51486@@ -7,6 +7,7 @@
51487 #include <linux/string.h>
51488
51489 #include <asm/io.h>
51490+#include <asm/pgtable.h>
51491
51492 #ifdef __sparc__
51493 #include <asm/fbio.h>
51494@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
51495 info->sprite.buf_align = 16; /* and 64 lines tall. */
51496 info->sprite.flags = FB_PIXMAP_IO;
51497
51498- info->fbops->fb_cursor = atyfb_cursor;
51499+ pax_open_kernel();
51500+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
51501+ pax_close_kernel();
51502
51503 return 0;
51504 }
51505diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
51506index 7592cc2..92feb56 100644
51507--- a/drivers/video/backlight/kb3886_bl.c
51508+++ b/drivers/video/backlight/kb3886_bl.c
51509@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
51510 static unsigned long kb3886bl_flags;
51511 #define KB3886BL_SUSPENDED 0x01
51512
51513-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
51514+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
51515 {
51516 .ident = "Sahara Touch-iT",
51517 .matches = {
51518diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
51519index 900aa4e..6d49418 100644
51520--- a/drivers/video/fb_defio.c
51521+++ b/drivers/video/fb_defio.c
51522@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
51523
51524 BUG_ON(!fbdefio);
51525 mutex_init(&fbdefio->lock);
51526- info->fbops->fb_mmap = fb_deferred_io_mmap;
51527+ pax_open_kernel();
51528+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
51529+ pax_close_kernel();
51530 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
51531 INIT_LIST_HEAD(&fbdefio->pagelist);
51532 if (fbdefio->delay == 0) /* set a default of 1 s */
51533@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
51534 page->mapping = NULL;
51535 }
51536
51537- info->fbops->fb_mmap = NULL;
51538+ *(void **)&info->fbops->fb_mmap = NULL;
51539 mutex_destroy(&fbdefio->lock);
51540 }
51541 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
51542diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
51543index 010d191..7b8235a 100644
51544--- a/drivers/video/fbmem.c
51545+++ b/drivers/video/fbmem.c
51546@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
51547 image->dx += image->width + 8;
51548 }
51549 } else if (rotate == FB_ROTATE_UD) {
51550- for (x = 0; x < num && image->dx >= 0; x++) {
51551+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
51552 info->fbops->fb_imageblit(info, image);
51553 image->dx -= image->width + 8;
51554 }
51555@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
51556 image->dy += image->height + 8;
51557 }
51558 } else if (rotate == FB_ROTATE_CCW) {
51559- for (x = 0; x < num && image->dy >= 0; x++) {
51560+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
51561 info->fbops->fb_imageblit(info, image);
51562 image->dy -= image->height + 8;
51563 }
51564@@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
51565 return -EFAULT;
51566 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
51567 return -EINVAL;
51568- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
51569+ if (con2fb.framebuffer >= FB_MAX)
51570 return -EINVAL;
51571 if (!registered_fb[con2fb.framebuffer])
51572 request_module("fb%d", con2fb.framebuffer);
51573@@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
51574 __u32 data;
51575 int err;
51576
51577- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
51578+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
51579
51580 data = (__u32) (unsigned long) fix->smem_start;
51581 err |= put_user(data, &fix32->smem_start);
51582diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
51583index 130708f..cdac1a9 100644
51584--- a/drivers/video/hyperv_fb.c
51585+++ b/drivers/video/hyperv_fb.c
51586@@ -233,7 +233,7 @@ static uint screen_fb_size;
51587 static inline int synthvid_send(struct hv_device *hdev,
51588 struct synthvid_msg *msg)
51589 {
51590- static atomic64_t request_id = ATOMIC64_INIT(0);
51591+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
51592 int ret;
51593
51594 msg->pipe_hdr.type = PIPE_MSG_DATA;
51595@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
51596
51597 ret = vmbus_sendpacket(hdev->channel, msg,
51598 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
51599- atomic64_inc_return(&request_id),
51600+ atomic64_inc_return_unchecked(&request_id),
51601 VM_PKT_DATA_INBAND, 0);
51602
51603 if (ret)
51604diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
51605index 7672d2e..b56437f 100644
51606--- a/drivers/video/i810/i810_accel.c
51607+++ b/drivers/video/i810/i810_accel.c
51608@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
51609 }
51610 }
51611 printk("ringbuffer lockup!!!\n");
51612+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
51613 i810_report_error(mmio);
51614 par->dev_flags |= LOCKUP;
51615 info->pixmap.scan_align = 1;
51616diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
51617index 3c14e43..2630570 100644
51618--- a/drivers/video/logo/logo_linux_clut224.ppm
51619+++ b/drivers/video/logo/logo_linux_clut224.ppm
51620@@ -2,1603 +2,1123 @@ P3
51621 # Standard 224-color Linux logo
51622 80 80
51623 255
51624- 0 0 0 0 0 0 0 0 0 0 0 0
51625- 0 0 0 0 0 0 0 0 0 0 0 0
51626- 0 0 0 0 0 0 0 0 0 0 0 0
51627- 0 0 0 0 0 0 0 0 0 0 0 0
51628- 0 0 0 0 0 0 0 0 0 0 0 0
51629- 0 0 0 0 0 0 0 0 0 0 0 0
51630- 0 0 0 0 0 0 0 0 0 0 0 0
51631- 0 0 0 0 0 0 0 0 0 0 0 0
51632- 0 0 0 0 0 0 0 0 0 0 0 0
51633- 6 6 6 6 6 6 10 10 10 10 10 10
51634- 10 10 10 6 6 6 6 6 6 6 6 6
51635- 0 0 0 0 0 0 0 0 0 0 0 0
51636- 0 0 0 0 0 0 0 0 0 0 0 0
51637- 0 0 0 0 0 0 0 0 0 0 0 0
51638- 0 0 0 0 0 0 0 0 0 0 0 0
51639- 0 0 0 0 0 0 0 0 0 0 0 0
51640- 0 0 0 0 0 0 0 0 0 0 0 0
51641- 0 0 0 0 0 0 0 0 0 0 0 0
51642- 0 0 0 0 0 0 0 0 0 0 0 0
51643- 0 0 0 0 0 0 0 0 0 0 0 0
51644- 0 0 0 0 0 0 0 0 0 0 0 0
51645- 0 0 0 0 0 0 0 0 0 0 0 0
51646- 0 0 0 0 0 0 0 0 0 0 0 0
51647- 0 0 0 0 0 0 0 0 0 0 0 0
51648- 0 0 0 0 0 0 0 0 0 0 0 0
51649- 0 0 0 0 0 0 0 0 0 0 0 0
51650- 0 0 0 0 0 0 0 0 0 0 0 0
51651- 0 0 0 0 0 0 0 0 0 0 0 0
51652- 0 0 0 6 6 6 10 10 10 14 14 14
51653- 22 22 22 26 26 26 30 30 30 34 34 34
51654- 30 30 30 30 30 30 26 26 26 18 18 18
51655- 14 14 14 10 10 10 6 6 6 0 0 0
51656- 0 0 0 0 0 0 0 0 0 0 0 0
51657- 0 0 0 0 0 0 0 0 0 0 0 0
51658- 0 0 0 0 0 0 0 0 0 0 0 0
51659- 0 0 0 0 0 0 0 0 0 0 0 0
51660- 0 0 0 0 0 0 0 0 0 0 0 0
51661- 0 0 0 0 0 0 0 0 0 0 0 0
51662- 0 0 0 0 0 0 0 0 0 0 0 0
51663- 0 0 0 0 0 0 0 0 0 0 0 0
51664- 0 0 0 0 0 0 0 0 0 0 0 0
51665- 0 0 0 0 0 1 0 0 1 0 0 0
51666- 0 0 0 0 0 0 0 0 0 0 0 0
51667- 0 0 0 0 0 0 0 0 0 0 0 0
51668- 0 0 0 0 0 0 0 0 0 0 0 0
51669- 0 0 0 0 0 0 0 0 0 0 0 0
51670- 0 0 0 0 0 0 0 0 0 0 0 0
51671- 0 0 0 0 0 0 0 0 0 0 0 0
51672- 6 6 6 14 14 14 26 26 26 42 42 42
51673- 54 54 54 66 66 66 78 78 78 78 78 78
51674- 78 78 78 74 74 74 66 66 66 54 54 54
51675- 42 42 42 26 26 26 18 18 18 10 10 10
51676- 6 6 6 0 0 0 0 0 0 0 0 0
51677- 0 0 0 0 0 0 0 0 0 0 0 0
51678- 0 0 0 0 0 0 0 0 0 0 0 0
51679- 0 0 0 0 0 0 0 0 0 0 0 0
51680- 0 0 0 0 0 0 0 0 0 0 0 0
51681- 0 0 0 0 0 0 0 0 0 0 0 0
51682- 0 0 0 0 0 0 0 0 0 0 0 0
51683- 0 0 0 0 0 0 0 0 0 0 0 0
51684- 0 0 0 0 0 0 0 0 0 0 0 0
51685- 0 0 1 0 0 0 0 0 0 0 0 0
51686- 0 0 0 0 0 0 0 0 0 0 0 0
51687- 0 0 0 0 0 0 0 0 0 0 0 0
51688- 0 0 0 0 0 0 0 0 0 0 0 0
51689- 0 0 0 0 0 0 0 0 0 0 0 0
51690- 0 0 0 0 0 0 0 0 0 0 0 0
51691- 0 0 0 0 0 0 0 0 0 10 10 10
51692- 22 22 22 42 42 42 66 66 66 86 86 86
51693- 66 66 66 38 38 38 38 38 38 22 22 22
51694- 26 26 26 34 34 34 54 54 54 66 66 66
51695- 86 86 86 70 70 70 46 46 46 26 26 26
51696- 14 14 14 6 6 6 0 0 0 0 0 0
51697- 0 0 0 0 0 0 0 0 0 0 0 0
51698- 0 0 0 0 0 0 0 0 0 0 0 0
51699- 0 0 0 0 0 0 0 0 0 0 0 0
51700- 0 0 0 0 0 0 0 0 0 0 0 0
51701- 0 0 0 0 0 0 0 0 0 0 0 0
51702- 0 0 0 0 0 0 0 0 0 0 0 0
51703- 0 0 0 0 0 0 0 0 0 0 0 0
51704- 0 0 0 0 0 0 0 0 0 0 0 0
51705- 0 0 1 0 0 1 0 0 1 0 0 0
51706- 0 0 0 0 0 0 0 0 0 0 0 0
51707- 0 0 0 0 0 0 0 0 0 0 0 0
51708- 0 0 0 0 0 0 0 0 0 0 0 0
51709- 0 0 0 0 0 0 0 0 0 0 0 0
51710- 0 0 0 0 0 0 0 0 0 0 0 0
51711- 0 0 0 0 0 0 10 10 10 26 26 26
51712- 50 50 50 82 82 82 58 58 58 6 6 6
51713- 2 2 6 2 2 6 2 2 6 2 2 6
51714- 2 2 6 2 2 6 2 2 6 2 2 6
51715- 6 6 6 54 54 54 86 86 86 66 66 66
51716- 38 38 38 18 18 18 6 6 6 0 0 0
51717- 0 0 0 0 0 0 0 0 0 0 0 0
51718- 0 0 0 0 0 0 0 0 0 0 0 0
51719- 0 0 0 0 0 0 0 0 0 0 0 0
51720- 0 0 0 0 0 0 0 0 0 0 0 0
51721- 0 0 0 0 0 0 0 0 0 0 0 0
51722- 0 0 0 0 0 0 0 0 0 0 0 0
51723- 0 0 0 0 0 0 0 0 0 0 0 0
51724- 0 0 0 0 0 0 0 0 0 0 0 0
51725- 0 0 0 0 0 0 0 0 0 0 0 0
51726- 0 0 0 0 0 0 0 0 0 0 0 0
51727- 0 0 0 0 0 0 0 0 0 0 0 0
51728- 0 0 0 0 0 0 0 0 0 0 0 0
51729- 0 0 0 0 0 0 0 0 0 0 0 0
51730- 0 0 0 0 0 0 0 0 0 0 0 0
51731- 0 0 0 6 6 6 22 22 22 50 50 50
51732- 78 78 78 34 34 34 2 2 6 2 2 6
51733- 2 2 6 2 2 6 2 2 6 2 2 6
51734- 2 2 6 2 2 6 2 2 6 2 2 6
51735- 2 2 6 2 2 6 6 6 6 70 70 70
51736- 78 78 78 46 46 46 22 22 22 6 6 6
51737- 0 0 0 0 0 0 0 0 0 0 0 0
51738- 0 0 0 0 0 0 0 0 0 0 0 0
51739- 0 0 0 0 0 0 0 0 0 0 0 0
51740- 0 0 0 0 0 0 0 0 0 0 0 0
51741- 0 0 0 0 0 0 0 0 0 0 0 0
51742- 0 0 0 0 0 0 0 0 0 0 0 0
51743- 0 0 0 0 0 0 0 0 0 0 0 0
51744- 0 0 0 0 0 0 0 0 0 0 0 0
51745- 0 0 1 0 0 1 0 0 1 0 0 0
51746- 0 0 0 0 0 0 0 0 0 0 0 0
51747- 0 0 0 0 0 0 0 0 0 0 0 0
51748- 0 0 0 0 0 0 0 0 0 0 0 0
51749- 0 0 0 0 0 0 0 0 0 0 0 0
51750- 0 0 0 0 0 0 0 0 0 0 0 0
51751- 6 6 6 18 18 18 42 42 42 82 82 82
51752- 26 26 26 2 2 6 2 2 6 2 2 6
51753- 2 2 6 2 2 6 2 2 6 2 2 6
51754- 2 2 6 2 2 6 2 2 6 14 14 14
51755- 46 46 46 34 34 34 6 6 6 2 2 6
51756- 42 42 42 78 78 78 42 42 42 18 18 18
51757- 6 6 6 0 0 0 0 0 0 0 0 0
51758- 0 0 0 0 0 0 0 0 0 0 0 0
51759- 0 0 0 0 0 0 0 0 0 0 0 0
51760- 0 0 0 0 0 0 0 0 0 0 0 0
51761- 0 0 0 0 0 0 0 0 0 0 0 0
51762- 0 0 0 0 0 0 0 0 0 0 0 0
51763- 0 0 0 0 0 0 0 0 0 0 0 0
51764- 0 0 0 0 0 0 0 0 0 0 0 0
51765- 0 0 1 0 0 0 0 0 1 0 0 0
51766- 0 0 0 0 0 0 0 0 0 0 0 0
51767- 0 0 0 0 0 0 0 0 0 0 0 0
51768- 0 0 0 0 0 0 0 0 0 0 0 0
51769- 0 0 0 0 0 0 0 0 0 0 0 0
51770- 0 0 0 0 0 0 0 0 0 0 0 0
51771- 10 10 10 30 30 30 66 66 66 58 58 58
51772- 2 2 6 2 2 6 2 2 6 2 2 6
51773- 2 2 6 2 2 6 2 2 6 2 2 6
51774- 2 2 6 2 2 6 2 2 6 26 26 26
51775- 86 86 86 101 101 101 46 46 46 10 10 10
51776- 2 2 6 58 58 58 70 70 70 34 34 34
51777- 10 10 10 0 0 0 0 0 0 0 0 0
51778- 0 0 0 0 0 0 0 0 0 0 0 0
51779- 0 0 0 0 0 0 0 0 0 0 0 0
51780- 0 0 0 0 0 0 0 0 0 0 0 0
51781- 0 0 0 0 0 0 0 0 0 0 0 0
51782- 0 0 0 0 0 0 0 0 0 0 0 0
51783- 0 0 0 0 0 0 0 0 0 0 0 0
51784- 0 0 0 0 0 0 0 0 0 0 0 0
51785- 0 0 1 0 0 1 0 0 1 0 0 0
51786- 0 0 0 0 0 0 0 0 0 0 0 0
51787- 0 0 0 0 0 0 0 0 0 0 0 0
51788- 0 0 0 0 0 0 0 0 0 0 0 0
51789- 0 0 0 0 0 0 0 0 0 0 0 0
51790- 0 0 0 0 0 0 0 0 0 0 0 0
51791- 14 14 14 42 42 42 86 86 86 10 10 10
51792- 2 2 6 2 2 6 2 2 6 2 2 6
51793- 2 2 6 2 2 6 2 2 6 2 2 6
51794- 2 2 6 2 2 6 2 2 6 30 30 30
51795- 94 94 94 94 94 94 58 58 58 26 26 26
51796- 2 2 6 6 6 6 78 78 78 54 54 54
51797- 22 22 22 6 6 6 0 0 0 0 0 0
51798- 0 0 0 0 0 0 0 0 0 0 0 0
51799- 0 0 0 0 0 0 0 0 0 0 0 0
51800- 0 0 0 0 0 0 0 0 0 0 0 0
51801- 0 0 0 0 0 0 0 0 0 0 0 0
51802- 0 0 0 0 0 0 0 0 0 0 0 0
51803- 0 0 0 0 0 0 0 0 0 0 0 0
51804- 0 0 0 0 0 0 0 0 0 0 0 0
51805- 0 0 0 0 0 0 0 0 0 0 0 0
51806- 0 0 0 0 0 0 0 0 0 0 0 0
51807- 0 0 0 0 0 0 0 0 0 0 0 0
51808- 0 0 0 0 0 0 0 0 0 0 0 0
51809- 0 0 0 0 0 0 0 0 0 0 0 0
51810- 0 0 0 0 0 0 0 0 0 6 6 6
51811- 22 22 22 62 62 62 62 62 62 2 2 6
51812- 2 2 6 2 2 6 2 2 6 2 2 6
51813- 2 2 6 2 2 6 2 2 6 2 2 6
51814- 2 2 6 2 2 6 2 2 6 26 26 26
51815- 54 54 54 38 38 38 18 18 18 10 10 10
51816- 2 2 6 2 2 6 34 34 34 82 82 82
51817- 38 38 38 14 14 14 0 0 0 0 0 0
51818- 0 0 0 0 0 0 0 0 0 0 0 0
51819- 0 0 0 0 0 0 0 0 0 0 0 0
51820- 0 0 0 0 0 0 0 0 0 0 0 0
51821- 0 0 0 0 0 0 0 0 0 0 0 0
51822- 0 0 0 0 0 0 0 0 0 0 0 0
51823- 0 0 0 0 0 0 0 0 0 0 0 0
51824- 0 0 0 0 0 0 0 0 0 0 0 0
51825- 0 0 0 0 0 1 0 0 1 0 0 0
51826- 0 0 0 0 0 0 0 0 0 0 0 0
51827- 0 0 0 0 0 0 0 0 0 0 0 0
51828- 0 0 0 0 0 0 0 0 0 0 0 0
51829- 0 0 0 0 0 0 0 0 0 0 0 0
51830- 0 0 0 0 0 0 0 0 0 6 6 6
51831- 30 30 30 78 78 78 30 30 30 2 2 6
51832- 2 2 6 2 2 6 2 2 6 2 2 6
51833- 2 2 6 2 2 6 2 2 6 2 2 6
51834- 2 2 6 2 2 6 2 2 6 10 10 10
51835- 10 10 10 2 2 6 2 2 6 2 2 6
51836- 2 2 6 2 2 6 2 2 6 78 78 78
51837- 50 50 50 18 18 18 6 6 6 0 0 0
51838- 0 0 0 0 0 0 0 0 0 0 0 0
51839- 0 0 0 0 0 0 0 0 0 0 0 0
51840- 0 0 0 0 0 0 0 0 0 0 0 0
51841- 0 0 0 0 0 0 0 0 0 0 0 0
51842- 0 0 0 0 0 0 0 0 0 0 0 0
51843- 0 0 0 0 0 0 0 0 0 0 0 0
51844- 0 0 0 0 0 0 0 0 0 0 0 0
51845- 0 0 1 0 0 0 0 0 0 0 0 0
51846- 0 0 0 0 0 0 0 0 0 0 0 0
51847- 0 0 0 0 0 0 0 0 0 0 0 0
51848- 0 0 0 0 0 0 0 0 0 0 0 0
51849- 0 0 0 0 0 0 0 0 0 0 0 0
51850- 0 0 0 0 0 0 0 0 0 10 10 10
51851- 38 38 38 86 86 86 14 14 14 2 2 6
51852- 2 2 6 2 2 6 2 2 6 2 2 6
51853- 2 2 6 2 2 6 2 2 6 2 2 6
51854- 2 2 6 2 2 6 2 2 6 2 2 6
51855- 2 2 6 2 2 6 2 2 6 2 2 6
51856- 2 2 6 2 2 6 2 2 6 54 54 54
51857- 66 66 66 26 26 26 6 6 6 0 0 0
51858- 0 0 0 0 0 0 0 0 0 0 0 0
51859- 0 0 0 0 0 0 0 0 0 0 0 0
51860- 0 0 0 0 0 0 0 0 0 0 0 0
51861- 0 0 0 0 0 0 0 0 0 0 0 0
51862- 0 0 0 0 0 0 0 0 0 0 0 0
51863- 0 0 0 0 0 0 0 0 0 0 0 0
51864- 0 0 0 0 0 0 0 0 0 0 0 0
51865- 0 0 0 0 0 1 0 0 1 0 0 0
51866- 0 0 0 0 0 0 0 0 0 0 0 0
51867- 0 0 0 0 0 0 0 0 0 0 0 0
51868- 0 0 0 0 0 0 0 0 0 0 0 0
51869- 0 0 0 0 0 0 0 0 0 0 0 0
51870- 0 0 0 0 0 0 0 0 0 14 14 14
51871- 42 42 42 82 82 82 2 2 6 2 2 6
51872- 2 2 6 6 6 6 10 10 10 2 2 6
51873- 2 2 6 2 2 6 2 2 6 2 2 6
51874- 2 2 6 2 2 6 2 2 6 6 6 6
51875- 14 14 14 10 10 10 2 2 6 2 2 6
51876- 2 2 6 2 2 6 2 2 6 18 18 18
51877- 82 82 82 34 34 34 10 10 10 0 0 0
51878- 0 0 0 0 0 0 0 0 0 0 0 0
51879- 0 0 0 0 0 0 0 0 0 0 0 0
51880- 0 0 0 0 0 0 0 0 0 0 0 0
51881- 0 0 0 0 0 0 0 0 0 0 0 0
51882- 0 0 0 0 0 0 0 0 0 0 0 0
51883- 0 0 0 0 0 0 0 0 0 0 0 0
51884- 0 0 0 0 0 0 0 0 0 0 0 0
51885- 0 0 1 0 0 0 0 0 0 0 0 0
51886- 0 0 0 0 0 0 0 0 0 0 0 0
51887- 0 0 0 0 0 0 0 0 0 0 0 0
51888- 0 0 0 0 0 0 0 0 0 0 0 0
51889- 0 0 0 0 0 0 0 0 0 0 0 0
51890- 0 0 0 0 0 0 0 0 0 14 14 14
51891- 46 46 46 86 86 86 2 2 6 2 2 6
51892- 6 6 6 6 6 6 22 22 22 34 34 34
51893- 6 6 6 2 2 6 2 2 6 2 2 6
51894- 2 2 6 2 2 6 18 18 18 34 34 34
51895- 10 10 10 50 50 50 22 22 22 2 2 6
51896- 2 2 6 2 2 6 2 2 6 10 10 10
51897- 86 86 86 42 42 42 14 14 14 0 0 0
51898- 0 0 0 0 0 0 0 0 0 0 0 0
51899- 0 0 0 0 0 0 0 0 0 0 0 0
51900- 0 0 0 0 0 0 0 0 0 0 0 0
51901- 0 0 0 0 0 0 0 0 0 0 0 0
51902- 0 0 0 0 0 0 0 0 0 0 0 0
51903- 0 0 0 0 0 0 0 0 0 0 0 0
51904- 0 0 0 0 0 0 0 0 0 0 0 0
51905- 0 0 1 0 0 1 0 0 1 0 0 0
51906- 0 0 0 0 0 0 0 0 0 0 0 0
51907- 0 0 0 0 0 0 0 0 0 0 0 0
51908- 0 0 0 0 0 0 0 0 0 0 0 0
51909- 0 0 0 0 0 0 0 0 0 0 0 0
51910- 0 0 0 0 0 0 0 0 0 14 14 14
51911- 46 46 46 86 86 86 2 2 6 2 2 6
51912- 38 38 38 116 116 116 94 94 94 22 22 22
51913- 22 22 22 2 2 6 2 2 6 2 2 6
51914- 14 14 14 86 86 86 138 138 138 162 162 162
51915-154 154 154 38 38 38 26 26 26 6 6 6
51916- 2 2 6 2 2 6 2 2 6 2 2 6
51917- 86 86 86 46 46 46 14 14 14 0 0 0
51918- 0 0 0 0 0 0 0 0 0 0 0 0
51919- 0 0 0 0 0 0 0 0 0 0 0 0
51920- 0 0 0 0 0 0 0 0 0 0 0 0
51921- 0 0 0 0 0 0 0 0 0 0 0 0
51922- 0 0 0 0 0 0 0 0 0 0 0 0
51923- 0 0 0 0 0 0 0 0 0 0 0 0
51924- 0 0 0 0 0 0 0 0 0 0 0 0
51925- 0 0 0 0 0 0 0 0 0 0 0 0
51926- 0 0 0 0 0 0 0 0 0 0 0 0
51927- 0 0 0 0 0 0 0 0 0 0 0 0
51928- 0 0 0 0 0 0 0 0 0 0 0 0
51929- 0 0 0 0 0 0 0 0 0 0 0 0
51930- 0 0 0 0 0 0 0 0 0 14 14 14
51931- 46 46 46 86 86 86 2 2 6 14 14 14
51932-134 134 134 198 198 198 195 195 195 116 116 116
51933- 10 10 10 2 2 6 2 2 6 6 6 6
51934-101 98 89 187 187 187 210 210 210 218 218 218
51935-214 214 214 134 134 134 14 14 14 6 6 6
51936- 2 2 6 2 2 6 2 2 6 2 2 6
51937- 86 86 86 50 50 50 18 18 18 6 6 6
51938- 0 0 0 0 0 0 0 0 0 0 0 0
51939- 0 0 0 0 0 0 0 0 0 0 0 0
51940- 0 0 0 0 0 0 0 0 0 0 0 0
51941- 0 0 0 0 0 0 0 0 0 0 0 0
51942- 0 0 0 0 0 0 0 0 0 0 0 0
51943- 0 0 0 0 0 0 0 0 0 0 0 0
51944- 0 0 0 0 0 0 0 0 1 0 0 0
51945- 0 0 1 0 0 1 0 0 1 0 0 0
51946- 0 0 0 0 0 0 0 0 0 0 0 0
51947- 0 0 0 0 0 0 0 0 0 0 0 0
51948- 0 0 0 0 0 0 0 0 0 0 0 0
51949- 0 0 0 0 0 0 0 0 0 0 0 0
51950- 0 0 0 0 0 0 0 0 0 14 14 14
51951- 46 46 46 86 86 86 2 2 6 54 54 54
51952-218 218 218 195 195 195 226 226 226 246 246 246
51953- 58 58 58 2 2 6 2 2 6 30 30 30
51954-210 210 210 253 253 253 174 174 174 123 123 123
51955-221 221 221 234 234 234 74 74 74 2 2 6
51956- 2 2 6 2 2 6 2 2 6 2 2 6
51957- 70 70 70 58 58 58 22 22 22 6 6 6
51958- 0 0 0 0 0 0 0 0 0 0 0 0
51959- 0 0 0 0 0 0 0 0 0 0 0 0
51960- 0 0 0 0 0 0 0 0 0 0 0 0
51961- 0 0 0 0 0 0 0 0 0 0 0 0
51962- 0 0 0 0 0 0 0 0 0 0 0 0
51963- 0 0 0 0 0 0 0 0 0 0 0 0
51964- 0 0 0 0 0 0 0 0 0 0 0 0
51965- 0 0 0 0 0 0 0 0 0 0 0 0
51966- 0 0 0 0 0 0 0 0 0 0 0 0
51967- 0 0 0 0 0 0 0 0 0 0 0 0
51968- 0 0 0 0 0 0 0 0 0 0 0 0
51969- 0 0 0 0 0 0 0 0 0 0 0 0
51970- 0 0 0 0 0 0 0 0 0 14 14 14
51971- 46 46 46 82 82 82 2 2 6 106 106 106
51972-170 170 170 26 26 26 86 86 86 226 226 226
51973-123 123 123 10 10 10 14 14 14 46 46 46
51974-231 231 231 190 190 190 6 6 6 70 70 70
51975- 90 90 90 238 238 238 158 158 158 2 2 6
51976- 2 2 6 2 2 6 2 2 6 2 2 6
51977- 70 70 70 58 58 58 22 22 22 6 6 6
51978- 0 0 0 0 0 0 0 0 0 0 0 0
51979- 0 0 0 0 0 0 0 0 0 0 0 0
51980- 0 0 0 0 0 0 0 0 0 0 0 0
51981- 0 0 0 0 0 0 0 0 0 0 0 0
51982- 0 0 0 0 0 0 0 0 0 0 0 0
51983- 0 0 0 0 0 0 0 0 0 0 0 0
51984- 0 0 0 0 0 0 0 0 1 0 0 0
51985- 0 0 1 0 0 1 0 0 1 0 0 0
51986- 0 0 0 0 0 0 0 0 0 0 0 0
51987- 0 0 0 0 0 0 0 0 0 0 0 0
51988- 0 0 0 0 0 0 0 0 0 0 0 0
51989- 0 0 0 0 0 0 0 0 0 0 0 0
51990- 0 0 0 0 0 0 0 0 0 14 14 14
51991- 42 42 42 86 86 86 6 6 6 116 116 116
51992-106 106 106 6 6 6 70 70 70 149 149 149
51993-128 128 128 18 18 18 38 38 38 54 54 54
51994-221 221 221 106 106 106 2 2 6 14 14 14
51995- 46 46 46 190 190 190 198 198 198 2 2 6
51996- 2 2 6 2 2 6 2 2 6 2 2 6
51997- 74 74 74 62 62 62 22 22 22 6 6 6
51998- 0 0 0 0 0 0 0 0 0 0 0 0
51999- 0 0 0 0 0 0 0 0 0 0 0 0
52000- 0 0 0 0 0 0 0 0 0 0 0 0
52001- 0 0 0 0 0 0 0 0 0 0 0 0
52002- 0 0 0 0 0 0 0 0 0 0 0 0
52003- 0 0 0 0 0 0 0 0 0 0 0 0
52004- 0 0 0 0 0 0 0 0 1 0 0 0
52005- 0 0 1 0 0 0 0 0 1 0 0 0
52006- 0 0 0 0 0 0 0 0 0 0 0 0
52007- 0 0 0 0 0 0 0 0 0 0 0 0
52008- 0 0 0 0 0 0 0 0 0 0 0 0
52009- 0 0 0 0 0 0 0 0 0 0 0 0
52010- 0 0 0 0 0 0 0 0 0 14 14 14
52011- 42 42 42 94 94 94 14 14 14 101 101 101
52012-128 128 128 2 2 6 18 18 18 116 116 116
52013-118 98 46 121 92 8 121 92 8 98 78 10
52014-162 162 162 106 106 106 2 2 6 2 2 6
52015- 2 2 6 195 195 195 195 195 195 6 6 6
52016- 2 2 6 2 2 6 2 2 6 2 2 6
52017- 74 74 74 62 62 62 22 22 22 6 6 6
52018- 0 0 0 0 0 0 0 0 0 0 0 0
52019- 0 0 0 0 0 0 0 0 0 0 0 0
52020- 0 0 0 0 0 0 0 0 0 0 0 0
52021- 0 0 0 0 0 0 0 0 0 0 0 0
52022- 0 0 0 0 0 0 0 0 0 0 0 0
52023- 0 0 0 0 0 0 0 0 0 0 0 0
52024- 0 0 0 0 0 0 0 0 1 0 0 1
52025- 0 0 1 0 0 0 0 0 1 0 0 0
52026- 0 0 0 0 0 0 0 0 0 0 0 0
52027- 0 0 0 0 0 0 0 0 0 0 0 0
52028- 0 0 0 0 0 0 0 0 0 0 0 0
52029- 0 0 0 0 0 0 0 0 0 0 0 0
52030- 0 0 0 0 0 0 0 0 0 10 10 10
52031- 38 38 38 90 90 90 14 14 14 58 58 58
52032-210 210 210 26 26 26 54 38 6 154 114 10
52033-226 170 11 236 186 11 225 175 15 184 144 12
52034-215 174 15 175 146 61 37 26 9 2 2 6
52035- 70 70 70 246 246 246 138 138 138 2 2 6
52036- 2 2 6 2 2 6 2 2 6 2 2 6
52037- 70 70 70 66 66 66 26 26 26 6 6 6
52038- 0 0 0 0 0 0 0 0 0 0 0 0
52039- 0 0 0 0 0 0 0 0 0 0 0 0
52040- 0 0 0 0 0 0 0 0 0 0 0 0
52041- 0 0 0 0 0 0 0 0 0 0 0 0
52042- 0 0 0 0 0 0 0 0 0 0 0 0
52043- 0 0 0 0 0 0 0 0 0 0 0 0
52044- 0 0 0 0 0 0 0 0 0 0 0 0
52045- 0 0 0 0 0 0 0 0 0 0 0 0
52046- 0 0 0 0 0 0 0 0 0 0 0 0
52047- 0 0 0 0 0 0 0 0 0 0 0 0
52048- 0 0 0 0 0 0 0 0 0 0 0 0
52049- 0 0 0 0 0 0 0 0 0 0 0 0
52050- 0 0 0 0 0 0 0 0 0 10 10 10
52051- 38 38 38 86 86 86 14 14 14 10 10 10
52052-195 195 195 188 164 115 192 133 9 225 175 15
52053-239 182 13 234 190 10 232 195 16 232 200 30
52054-245 207 45 241 208 19 232 195 16 184 144 12
52055-218 194 134 211 206 186 42 42 42 2 2 6
52056- 2 2 6 2 2 6 2 2 6 2 2 6
52057- 50 50 50 74 74 74 30 30 30 6 6 6
52058- 0 0 0 0 0 0 0 0 0 0 0 0
52059- 0 0 0 0 0 0 0 0 0 0 0 0
52060- 0 0 0 0 0 0 0 0 0 0 0 0
52061- 0 0 0 0 0 0 0 0 0 0 0 0
52062- 0 0 0 0 0 0 0 0 0 0 0 0
52063- 0 0 0 0 0 0 0 0 0 0 0 0
52064- 0 0 0 0 0 0 0 0 0 0 0 0
52065- 0 0 0 0 0 0 0 0 0 0 0 0
52066- 0 0 0 0 0 0 0 0 0 0 0 0
52067- 0 0 0 0 0 0 0 0 0 0 0 0
52068- 0 0 0 0 0 0 0 0 0 0 0 0
52069- 0 0 0 0 0 0 0 0 0 0 0 0
52070- 0 0 0 0 0 0 0 0 0 10 10 10
52071- 34 34 34 86 86 86 14 14 14 2 2 6
52072-121 87 25 192 133 9 219 162 10 239 182 13
52073-236 186 11 232 195 16 241 208 19 244 214 54
52074-246 218 60 246 218 38 246 215 20 241 208 19
52075-241 208 19 226 184 13 121 87 25 2 2 6
52076- 2 2 6 2 2 6 2 2 6 2 2 6
52077- 50 50 50 82 82 82 34 34 34 10 10 10
52078- 0 0 0 0 0 0 0 0 0 0 0 0
52079- 0 0 0 0 0 0 0 0 0 0 0 0
52080- 0 0 0 0 0 0 0 0 0 0 0 0
52081- 0 0 0 0 0 0 0 0 0 0 0 0
52082- 0 0 0 0 0 0 0 0 0 0 0 0
52083- 0 0 0 0 0 0 0 0 0 0 0 0
52084- 0 0 0 0 0 0 0 0 0 0 0 0
52085- 0 0 0 0 0 0 0 0 0 0 0 0
52086- 0 0 0 0 0 0 0 0 0 0 0 0
52087- 0 0 0 0 0 0 0 0 0 0 0 0
52088- 0 0 0 0 0 0 0 0 0 0 0 0
52089- 0 0 0 0 0 0 0 0 0 0 0 0
52090- 0 0 0 0 0 0 0 0 0 10 10 10
52091- 34 34 34 82 82 82 30 30 30 61 42 6
52092-180 123 7 206 145 10 230 174 11 239 182 13
52093-234 190 10 238 202 15 241 208 19 246 218 74
52094-246 218 38 246 215 20 246 215 20 246 215 20
52095-226 184 13 215 174 15 184 144 12 6 6 6
52096- 2 2 6 2 2 6 2 2 6 2 2 6
52097- 26 26 26 94 94 94 42 42 42 14 14 14
52098- 0 0 0 0 0 0 0 0 0 0 0 0
52099- 0 0 0 0 0 0 0 0 0 0 0 0
52100- 0 0 0 0 0 0 0 0 0 0 0 0
52101- 0 0 0 0 0 0 0 0 0 0 0 0
52102- 0 0 0 0 0 0 0 0 0 0 0 0
52103- 0 0 0 0 0 0 0 0 0 0 0 0
52104- 0 0 0 0 0 0 0 0 0 0 0 0
52105- 0 0 0 0 0 0 0 0 0 0 0 0
52106- 0 0 0 0 0 0 0 0 0 0 0 0
52107- 0 0 0 0 0 0 0 0 0 0 0 0
52108- 0 0 0 0 0 0 0 0 0 0 0 0
52109- 0 0 0 0 0 0 0 0 0 0 0 0
52110- 0 0 0 0 0 0 0 0 0 10 10 10
52111- 30 30 30 78 78 78 50 50 50 104 69 6
52112-192 133 9 216 158 10 236 178 12 236 186 11
52113-232 195 16 241 208 19 244 214 54 245 215 43
52114-246 215 20 246 215 20 241 208 19 198 155 10
52115-200 144 11 216 158 10 156 118 10 2 2 6
52116- 2 2 6 2 2 6 2 2 6 2 2 6
52117- 6 6 6 90 90 90 54 54 54 18 18 18
52118- 6 6 6 0 0 0 0 0 0 0 0 0
52119- 0 0 0 0 0 0 0 0 0 0 0 0
52120- 0 0 0 0 0 0 0 0 0 0 0 0
52121- 0 0 0 0 0 0 0 0 0 0 0 0
52122- 0 0 0 0 0 0 0 0 0 0 0 0
52123- 0 0 0 0 0 0 0 0 0 0 0 0
52124- 0 0 0 0 0 0 0 0 0 0 0 0
52125- 0 0 0 0 0 0 0 0 0 0 0 0
52126- 0 0 0 0 0 0 0 0 0 0 0 0
52127- 0 0 0 0 0 0 0 0 0 0 0 0
52128- 0 0 0 0 0 0 0 0 0 0 0 0
52129- 0 0 0 0 0 0 0 0 0 0 0 0
52130- 0 0 0 0 0 0 0 0 0 10 10 10
52131- 30 30 30 78 78 78 46 46 46 22 22 22
52132-137 92 6 210 162 10 239 182 13 238 190 10
52133-238 202 15 241 208 19 246 215 20 246 215 20
52134-241 208 19 203 166 17 185 133 11 210 150 10
52135-216 158 10 210 150 10 102 78 10 2 2 6
52136- 6 6 6 54 54 54 14 14 14 2 2 6
52137- 2 2 6 62 62 62 74 74 74 30 30 30
52138- 10 10 10 0 0 0 0 0 0 0 0 0
52139- 0 0 0 0 0 0 0 0 0 0 0 0
52140- 0 0 0 0 0 0 0 0 0 0 0 0
52141- 0 0 0 0 0 0 0 0 0 0 0 0
52142- 0 0 0 0 0 0 0 0 0 0 0 0
52143- 0 0 0 0 0 0 0 0 0 0 0 0
52144- 0 0 0 0 0 0 0 0 0 0 0 0
52145- 0 0 0 0 0 0 0 0 0 0 0 0
52146- 0 0 0 0 0 0 0 0 0 0 0 0
52147- 0 0 0 0 0 0 0 0 0 0 0 0
52148- 0 0 0 0 0 0 0 0 0 0 0 0
52149- 0 0 0 0 0 0 0 0 0 0 0 0
52150- 0 0 0 0 0 0 0 0 0 10 10 10
52151- 34 34 34 78 78 78 50 50 50 6 6 6
52152- 94 70 30 139 102 15 190 146 13 226 184 13
52153-232 200 30 232 195 16 215 174 15 190 146 13
52154-168 122 10 192 133 9 210 150 10 213 154 11
52155-202 150 34 182 157 106 101 98 89 2 2 6
52156- 2 2 6 78 78 78 116 116 116 58 58 58
52157- 2 2 6 22 22 22 90 90 90 46 46 46
52158- 18 18 18 6 6 6 0 0 0 0 0 0
52159- 0 0 0 0 0 0 0 0 0 0 0 0
52160- 0 0 0 0 0 0 0 0 0 0 0 0
52161- 0 0 0 0 0 0 0 0 0 0 0 0
52162- 0 0 0 0 0 0 0 0 0 0 0 0
52163- 0 0 0 0 0 0 0 0 0 0 0 0
52164- 0 0 0 0 0 0 0 0 0 0 0 0
52165- 0 0 0 0 0 0 0 0 0 0 0 0
52166- 0 0 0 0 0 0 0 0 0 0 0 0
52167- 0 0 0 0 0 0 0 0 0 0 0 0
52168- 0 0 0 0 0 0 0 0 0 0 0 0
52169- 0 0 0 0 0 0 0 0 0 0 0 0
52170- 0 0 0 0 0 0 0 0 0 10 10 10
52171- 38 38 38 86 86 86 50 50 50 6 6 6
52172-128 128 128 174 154 114 156 107 11 168 122 10
52173-198 155 10 184 144 12 197 138 11 200 144 11
52174-206 145 10 206 145 10 197 138 11 188 164 115
52175-195 195 195 198 198 198 174 174 174 14 14 14
52176- 2 2 6 22 22 22 116 116 116 116 116 116
52177- 22 22 22 2 2 6 74 74 74 70 70 70
52178- 30 30 30 10 10 10 0 0 0 0 0 0
52179- 0 0 0 0 0 0 0 0 0 0 0 0
52180- 0 0 0 0 0 0 0 0 0 0 0 0
52181- 0 0 0 0 0 0 0 0 0 0 0 0
52182- 0 0 0 0 0 0 0 0 0 0 0 0
52183- 0 0 0 0 0 0 0 0 0 0 0 0
52184- 0 0 0 0 0 0 0 0 0 0 0 0
52185- 0 0 0 0 0 0 0 0 0 0 0 0
52186- 0 0 0 0 0 0 0 0 0 0 0 0
52187- 0 0 0 0 0 0 0 0 0 0 0 0
52188- 0 0 0 0 0 0 0 0 0 0 0 0
52189- 0 0 0 0 0 0 0 0 0 0 0 0
52190- 0 0 0 0 0 0 6 6 6 18 18 18
52191- 50 50 50 101 101 101 26 26 26 10 10 10
52192-138 138 138 190 190 190 174 154 114 156 107 11
52193-197 138 11 200 144 11 197 138 11 192 133 9
52194-180 123 7 190 142 34 190 178 144 187 187 187
52195-202 202 202 221 221 221 214 214 214 66 66 66
52196- 2 2 6 2 2 6 50 50 50 62 62 62
52197- 6 6 6 2 2 6 10 10 10 90 90 90
52198- 50 50 50 18 18 18 6 6 6 0 0 0
52199- 0 0 0 0 0 0 0 0 0 0 0 0
52200- 0 0 0 0 0 0 0 0 0 0 0 0
52201- 0 0 0 0 0 0 0 0 0 0 0 0
52202- 0 0 0 0 0 0 0 0 0 0 0 0
52203- 0 0 0 0 0 0 0 0 0 0 0 0
52204- 0 0 0 0 0 0 0 0 0 0 0 0
52205- 0 0 0 0 0 0 0 0 0 0 0 0
52206- 0 0 0 0 0 0 0 0 0 0 0 0
52207- 0 0 0 0 0 0 0 0 0 0 0 0
52208- 0 0 0 0 0 0 0 0 0 0 0 0
52209- 0 0 0 0 0 0 0 0 0 0 0 0
52210- 0 0 0 0 0 0 10 10 10 34 34 34
52211- 74 74 74 74 74 74 2 2 6 6 6 6
52212-144 144 144 198 198 198 190 190 190 178 166 146
52213-154 121 60 156 107 11 156 107 11 168 124 44
52214-174 154 114 187 187 187 190 190 190 210 210 210
52215-246 246 246 253 253 253 253 253 253 182 182 182
52216- 6 6 6 2 2 6 2 2 6 2 2 6
52217- 2 2 6 2 2 6 2 2 6 62 62 62
52218- 74 74 74 34 34 34 14 14 14 0 0 0
52219- 0 0 0 0 0 0 0 0 0 0 0 0
52220- 0 0 0 0 0 0 0 0 0 0 0 0
52221- 0 0 0 0 0 0 0 0 0 0 0 0
52222- 0 0 0 0 0 0 0 0 0 0 0 0
52223- 0 0 0 0 0 0 0 0 0 0 0 0
52224- 0 0 0 0 0 0 0 0 0 0 0 0
52225- 0 0 0 0 0 0 0 0 0 0 0 0
52226- 0 0 0 0 0 0 0 0 0 0 0 0
52227- 0 0 0 0 0 0 0 0 0 0 0 0
52228- 0 0 0 0 0 0 0 0 0 0 0 0
52229- 0 0 0 0 0 0 0 0 0 0 0 0
52230- 0 0 0 10 10 10 22 22 22 54 54 54
52231- 94 94 94 18 18 18 2 2 6 46 46 46
52232-234 234 234 221 221 221 190 190 190 190 190 190
52233-190 190 190 187 187 187 187 187 187 190 190 190
52234-190 190 190 195 195 195 214 214 214 242 242 242
52235-253 253 253 253 253 253 253 253 253 253 253 253
52236- 82 82 82 2 2 6 2 2 6 2 2 6
52237- 2 2 6 2 2 6 2 2 6 14 14 14
52238- 86 86 86 54 54 54 22 22 22 6 6 6
52239- 0 0 0 0 0 0 0 0 0 0 0 0
52240- 0 0 0 0 0 0 0 0 0 0 0 0
52241- 0 0 0 0 0 0 0 0 0 0 0 0
52242- 0 0 0 0 0 0 0 0 0 0 0 0
52243- 0 0 0 0 0 0 0 0 0 0 0 0
52244- 0 0 0 0 0 0 0 0 0 0 0 0
52245- 0 0 0 0 0 0 0 0 0 0 0 0
52246- 0 0 0 0 0 0 0 0 0 0 0 0
52247- 0 0 0 0 0 0 0 0 0 0 0 0
52248- 0 0 0 0 0 0 0 0 0 0 0 0
52249- 0 0 0 0 0 0 0 0 0 0 0 0
52250- 6 6 6 18 18 18 46 46 46 90 90 90
52251- 46 46 46 18 18 18 6 6 6 182 182 182
52252-253 253 253 246 246 246 206 206 206 190 190 190
52253-190 190 190 190 190 190 190 190 190 190 190 190
52254-206 206 206 231 231 231 250 250 250 253 253 253
52255-253 253 253 253 253 253 253 253 253 253 253 253
52256-202 202 202 14 14 14 2 2 6 2 2 6
52257- 2 2 6 2 2 6 2 2 6 2 2 6
52258- 42 42 42 86 86 86 42 42 42 18 18 18
52259- 6 6 6 0 0 0 0 0 0 0 0 0
52260- 0 0 0 0 0 0 0 0 0 0 0 0
52261- 0 0 0 0 0 0 0 0 0 0 0 0
52262- 0 0 0 0 0 0 0 0 0 0 0 0
52263- 0 0 0 0 0 0 0 0 0 0 0 0
52264- 0 0 0 0 0 0 0 0 0 0 0 0
52265- 0 0 0 0 0 0 0 0 0 0 0 0
52266- 0 0 0 0 0 0 0 0 0 0 0 0
52267- 0 0 0 0 0 0 0 0 0 0 0 0
52268- 0 0 0 0 0 0 0 0 0 0 0 0
52269- 0 0 0 0 0 0 0 0 0 6 6 6
52270- 14 14 14 38 38 38 74 74 74 66 66 66
52271- 2 2 6 6 6 6 90 90 90 250 250 250
52272-253 253 253 253 253 253 238 238 238 198 198 198
52273-190 190 190 190 190 190 195 195 195 221 221 221
52274-246 246 246 253 253 253 253 253 253 253 253 253
52275-253 253 253 253 253 253 253 253 253 253 253 253
52276-253 253 253 82 82 82 2 2 6 2 2 6
52277- 2 2 6 2 2 6 2 2 6 2 2 6
52278- 2 2 6 78 78 78 70 70 70 34 34 34
52279- 14 14 14 6 6 6 0 0 0 0 0 0
52280- 0 0 0 0 0 0 0 0 0 0 0 0
52281- 0 0 0 0 0 0 0 0 0 0 0 0
52282- 0 0 0 0 0 0 0 0 0 0 0 0
52283- 0 0 0 0 0 0 0 0 0 0 0 0
52284- 0 0 0 0 0 0 0 0 0 0 0 0
52285- 0 0 0 0 0 0 0 0 0 0 0 0
52286- 0 0 0 0 0 0 0 0 0 0 0 0
52287- 0 0 0 0 0 0 0 0 0 0 0 0
52288- 0 0 0 0 0 0 0 0 0 0 0 0
52289- 0 0 0 0 0 0 0 0 0 14 14 14
52290- 34 34 34 66 66 66 78 78 78 6 6 6
52291- 2 2 6 18 18 18 218 218 218 253 253 253
52292-253 253 253 253 253 253 253 253 253 246 246 246
52293-226 226 226 231 231 231 246 246 246 253 253 253
52294-253 253 253 253 253 253 253 253 253 253 253 253
52295-253 253 253 253 253 253 253 253 253 253 253 253
52296-253 253 253 178 178 178 2 2 6 2 2 6
52297- 2 2 6 2 2 6 2 2 6 2 2 6
52298- 2 2 6 18 18 18 90 90 90 62 62 62
52299- 30 30 30 10 10 10 0 0 0 0 0 0
52300- 0 0 0 0 0 0 0 0 0 0 0 0
52301- 0 0 0 0 0 0 0 0 0 0 0 0
52302- 0 0 0 0 0 0 0 0 0 0 0 0
52303- 0 0 0 0 0 0 0 0 0 0 0 0
52304- 0 0 0 0 0 0 0 0 0 0 0 0
52305- 0 0 0 0 0 0 0 0 0 0 0 0
52306- 0 0 0 0 0 0 0 0 0 0 0 0
52307- 0 0 0 0 0 0 0 0 0 0 0 0
52308- 0 0 0 0 0 0 0 0 0 0 0 0
52309- 0 0 0 0 0 0 10 10 10 26 26 26
52310- 58 58 58 90 90 90 18 18 18 2 2 6
52311- 2 2 6 110 110 110 253 253 253 253 253 253
52312-253 253 253 253 253 253 253 253 253 253 253 253
52313-250 250 250 253 253 253 253 253 253 253 253 253
52314-253 253 253 253 253 253 253 253 253 253 253 253
52315-253 253 253 253 253 253 253 253 253 253 253 253
52316-253 253 253 231 231 231 18 18 18 2 2 6
52317- 2 2 6 2 2 6 2 2 6 2 2 6
52318- 2 2 6 2 2 6 18 18 18 94 94 94
52319- 54 54 54 26 26 26 10 10 10 0 0 0
52320- 0 0 0 0 0 0 0 0 0 0 0 0
52321- 0 0 0 0 0 0 0 0 0 0 0 0
52322- 0 0 0 0 0 0 0 0 0 0 0 0
52323- 0 0 0 0 0 0 0 0 0 0 0 0
52324- 0 0 0 0 0 0 0 0 0 0 0 0
52325- 0 0 0 0 0 0 0 0 0 0 0 0
52326- 0 0 0 0 0 0 0 0 0 0 0 0
52327- 0 0 0 0 0 0 0 0 0 0 0 0
52328- 0 0 0 0 0 0 0 0 0 0 0 0
52329- 0 0 0 6 6 6 22 22 22 50 50 50
52330- 90 90 90 26 26 26 2 2 6 2 2 6
52331- 14 14 14 195 195 195 250 250 250 253 253 253
52332-253 253 253 253 253 253 253 253 253 253 253 253
52333-253 253 253 253 253 253 253 253 253 253 253 253
52334-253 253 253 253 253 253 253 253 253 253 253 253
52335-253 253 253 253 253 253 253 253 253 253 253 253
52336-250 250 250 242 242 242 54 54 54 2 2 6
52337- 2 2 6 2 2 6 2 2 6 2 2 6
52338- 2 2 6 2 2 6 2 2 6 38 38 38
52339- 86 86 86 50 50 50 22 22 22 6 6 6
52340- 0 0 0 0 0 0 0 0 0 0 0 0
52341- 0 0 0 0 0 0 0 0 0 0 0 0
52342- 0 0 0 0 0 0 0 0 0 0 0 0
52343- 0 0 0 0 0 0 0 0 0 0 0 0
52344- 0 0 0 0 0 0 0 0 0 0 0 0
52345- 0 0 0 0 0 0 0 0 0 0 0 0
52346- 0 0 0 0 0 0 0 0 0 0 0 0
52347- 0 0 0 0 0 0 0 0 0 0 0 0
52348- 0 0 0 0 0 0 0 0 0 0 0 0
52349- 6 6 6 14 14 14 38 38 38 82 82 82
52350- 34 34 34 2 2 6 2 2 6 2 2 6
52351- 42 42 42 195 195 195 246 246 246 253 253 253
52352-253 253 253 253 253 253 253 253 253 250 250 250
52353-242 242 242 242 242 242 250 250 250 253 253 253
52354-253 253 253 253 253 253 253 253 253 253 253 253
52355-253 253 253 250 250 250 246 246 246 238 238 238
52356-226 226 226 231 231 231 101 101 101 6 6 6
52357- 2 2 6 2 2 6 2 2 6 2 2 6
52358- 2 2 6 2 2 6 2 2 6 2 2 6
52359- 38 38 38 82 82 82 42 42 42 14 14 14
52360- 6 6 6 0 0 0 0 0 0 0 0 0
52361- 0 0 0 0 0 0 0 0 0 0 0 0
52362- 0 0 0 0 0 0 0 0 0 0 0 0
52363- 0 0 0 0 0 0 0 0 0 0 0 0
52364- 0 0 0 0 0 0 0 0 0 0 0 0
52365- 0 0 0 0 0 0 0 0 0 0 0 0
52366- 0 0 0 0 0 0 0 0 0 0 0 0
52367- 0 0 0 0 0 0 0 0 0 0 0 0
52368- 0 0 0 0 0 0 0 0 0 0 0 0
52369- 10 10 10 26 26 26 62 62 62 66 66 66
52370- 2 2 6 2 2 6 2 2 6 6 6 6
52371- 70 70 70 170 170 170 206 206 206 234 234 234
52372-246 246 246 250 250 250 250 250 250 238 238 238
52373-226 226 226 231 231 231 238 238 238 250 250 250
52374-250 250 250 250 250 250 246 246 246 231 231 231
52375-214 214 214 206 206 206 202 202 202 202 202 202
52376-198 198 198 202 202 202 182 182 182 18 18 18
52377- 2 2 6 2 2 6 2 2 6 2 2 6
52378- 2 2 6 2 2 6 2 2 6 2 2 6
52379- 2 2 6 62 62 62 66 66 66 30 30 30
52380- 10 10 10 0 0 0 0 0 0 0 0 0
52381- 0 0 0 0 0 0 0 0 0 0 0 0
52382- 0 0 0 0 0 0 0 0 0 0 0 0
52383- 0 0 0 0 0 0 0 0 0 0 0 0
52384- 0 0 0 0 0 0 0 0 0 0 0 0
52385- 0 0 0 0 0 0 0 0 0 0 0 0
52386- 0 0 0 0 0 0 0 0 0 0 0 0
52387- 0 0 0 0 0 0 0 0 0 0 0 0
52388- 0 0 0 0 0 0 0 0 0 0 0 0
52389- 14 14 14 42 42 42 82 82 82 18 18 18
52390- 2 2 6 2 2 6 2 2 6 10 10 10
52391- 94 94 94 182 182 182 218 218 218 242 242 242
52392-250 250 250 253 253 253 253 253 253 250 250 250
52393-234 234 234 253 253 253 253 253 253 253 253 253
52394-253 253 253 253 253 253 253 253 253 246 246 246
52395-238 238 238 226 226 226 210 210 210 202 202 202
52396-195 195 195 195 195 195 210 210 210 158 158 158
52397- 6 6 6 14 14 14 50 50 50 14 14 14
52398- 2 2 6 2 2 6 2 2 6 2 2 6
52399- 2 2 6 6 6 6 86 86 86 46 46 46
52400- 18 18 18 6 6 6 0 0 0 0 0 0
52401- 0 0 0 0 0 0 0 0 0 0 0 0
52402- 0 0 0 0 0 0 0 0 0 0 0 0
52403- 0 0 0 0 0 0 0 0 0 0 0 0
52404- 0 0 0 0 0 0 0 0 0 0 0 0
52405- 0 0 0 0 0 0 0 0 0 0 0 0
52406- 0 0 0 0 0 0 0 0 0 0 0 0
52407- 0 0 0 0 0 0 0 0 0 0 0 0
52408- 0 0 0 0 0 0 0 0 0 6 6 6
52409- 22 22 22 54 54 54 70 70 70 2 2 6
52410- 2 2 6 10 10 10 2 2 6 22 22 22
52411-166 166 166 231 231 231 250 250 250 253 253 253
52412-253 253 253 253 253 253 253 253 253 250 250 250
52413-242 242 242 253 253 253 253 253 253 253 253 253
52414-253 253 253 253 253 253 253 253 253 253 253 253
52415-253 253 253 253 253 253 253 253 253 246 246 246
52416-231 231 231 206 206 206 198 198 198 226 226 226
52417- 94 94 94 2 2 6 6 6 6 38 38 38
52418- 30 30 30 2 2 6 2 2 6 2 2 6
52419- 2 2 6 2 2 6 62 62 62 66 66 66
52420- 26 26 26 10 10 10 0 0 0 0 0 0
52421- 0 0 0 0 0 0 0 0 0 0 0 0
52422- 0 0 0 0 0 0 0 0 0 0 0 0
52423- 0 0 0 0 0 0 0 0 0 0 0 0
52424- 0 0 0 0 0 0 0 0 0 0 0 0
52425- 0 0 0 0 0 0 0 0 0 0 0 0
52426- 0 0 0 0 0 0 0 0 0 0 0 0
52427- 0 0 0 0 0 0 0 0 0 0 0 0
52428- 0 0 0 0 0 0 0 0 0 10 10 10
52429- 30 30 30 74 74 74 50 50 50 2 2 6
52430- 26 26 26 26 26 26 2 2 6 106 106 106
52431-238 238 238 253 253 253 253 253 253 253 253 253
52432-253 253 253 253 253 253 253 253 253 253 253 253
52433-253 253 253 253 253 253 253 253 253 253 253 253
52434-253 253 253 253 253 253 253 253 253 253 253 253
52435-253 253 253 253 253 253 253 253 253 253 253 253
52436-253 253 253 246 246 246 218 218 218 202 202 202
52437-210 210 210 14 14 14 2 2 6 2 2 6
52438- 30 30 30 22 22 22 2 2 6 2 2 6
52439- 2 2 6 2 2 6 18 18 18 86 86 86
52440- 42 42 42 14 14 14 0 0 0 0 0 0
52441- 0 0 0 0 0 0 0 0 0 0 0 0
52442- 0 0 0 0 0 0 0 0 0 0 0 0
52443- 0 0 0 0 0 0 0 0 0 0 0 0
52444- 0 0 0 0 0 0 0 0 0 0 0 0
52445- 0 0 0 0 0 0 0 0 0 0 0 0
52446- 0 0 0 0 0 0 0 0 0 0 0 0
52447- 0 0 0 0 0 0 0 0 0 0 0 0
52448- 0 0 0 0 0 0 0 0 0 14 14 14
52449- 42 42 42 90 90 90 22 22 22 2 2 6
52450- 42 42 42 2 2 6 18 18 18 218 218 218
52451-253 253 253 253 253 253 253 253 253 253 253 253
52452-253 253 253 253 253 253 253 253 253 253 253 253
52453-253 253 253 253 253 253 253 253 253 253 253 253
52454-253 253 253 253 253 253 253 253 253 253 253 253
52455-253 253 253 253 253 253 253 253 253 253 253 253
52456-253 253 253 253 253 253 250 250 250 221 221 221
52457-218 218 218 101 101 101 2 2 6 14 14 14
52458- 18 18 18 38 38 38 10 10 10 2 2 6
52459- 2 2 6 2 2 6 2 2 6 78 78 78
52460- 58 58 58 22 22 22 6 6 6 0 0 0
52461- 0 0 0 0 0 0 0 0 0 0 0 0
52462- 0 0 0 0 0 0 0 0 0 0 0 0
52463- 0 0 0 0 0 0 0 0 0 0 0 0
52464- 0 0 0 0 0 0 0 0 0 0 0 0
52465- 0 0 0 0 0 0 0 0 0 0 0 0
52466- 0 0 0 0 0 0 0 0 0 0 0 0
52467- 0 0 0 0 0 0 0 0 0 0 0 0
52468- 0 0 0 0 0 0 6 6 6 18 18 18
52469- 54 54 54 82 82 82 2 2 6 26 26 26
52470- 22 22 22 2 2 6 123 123 123 253 253 253
52471-253 253 253 253 253 253 253 253 253 253 253 253
52472-253 253 253 253 253 253 253 253 253 253 253 253
52473-253 253 253 253 253 253 253 253 253 253 253 253
52474-253 253 253 253 253 253 253 253 253 253 253 253
52475-253 253 253 253 253 253 253 253 253 253 253 253
52476-253 253 253 253 253 253 253 253 253 250 250 250
52477-238 238 238 198 198 198 6 6 6 38 38 38
52478- 58 58 58 26 26 26 38 38 38 2 2 6
52479- 2 2 6 2 2 6 2 2 6 46 46 46
52480- 78 78 78 30 30 30 10 10 10 0 0 0
52481- 0 0 0 0 0 0 0 0 0 0 0 0
52482- 0 0 0 0 0 0 0 0 0 0 0 0
52483- 0 0 0 0 0 0 0 0 0 0 0 0
52484- 0 0 0 0 0 0 0 0 0 0 0 0
52485- 0 0 0 0 0 0 0 0 0 0 0 0
52486- 0 0 0 0 0 0 0 0 0 0 0 0
52487- 0 0 0 0 0 0 0 0 0 0 0 0
52488- 0 0 0 0 0 0 10 10 10 30 30 30
52489- 74 74 74 58 58 58 2 2 6 42 42 42
52490- 2 2 6 22 22 22 231 231 231 253 253 253
52491-253 253 253 253 253 253 253 253 253 253 253 253
52492-253 253 253 253 253 253 253 253 253 250 250 250
52493-253 253 253 253 253 253 253 253 253 253 253 253
52494-253 253 253 253 253 253 253 253 253 253 253 253
52495-253 253 253 253 253 253 253 253 253 253 253 253
52496-253 253 253 253 253 253 253 253 253 253 253 253
52497-253 253 253 246 246 246 46 46 46 38 38 38
52498- 42 42 42 14 14 14 38 38 38 14 14 14
52499- 2 2 6 2 2 6 2 2 6 6 6 6
52500- 86 86 86 46 46 46 14 14 14 0 0 0
52501- 0 0 0 0 0 0 0 0 0 0 0 0
52502- 0 0 0 0 0 0 0 0 0 0 0 0
52503- 0 0 0 0 0 0 0 0 0 0 0 0
52504- 0 0 0 0 0 0 0 0 0 0 0 0
52505- 0 0 0 0 0 0 0 0 0 0 0 0
52506- 0 0 0 0 0 0 0 0 0 0 0 0
52507- 0 0 0 0 0 0 0 0 0 0 0 0
52508- 0 0 0 6 6 6 14 14 14 42 42 42
52509- 90 90 90 18 18 18 18 18 18 26 26 26
52510- 2 2 6 116 116 116 253 253 253 253 253 253
52511-253 253 253 253 253 253 253 253 253 253 253 253
52512-253 253 253 253 253 253 250 250 250 238 238 238
52513-253 253 253 253 253 253 253 253 253 253 253 253
52514-253 253 253 253 253 253 253 253 253 253 253 253
52515-253 253 253 253 253 253 253 253 253 253 253 253
52516-253 253 253 253 253 253 253 253 253 253 253 253
52517-253 253 253 253 253 253 94 94 94 6 6 6
52518- 2 2 6 2 2 6 10 10 10 34 34 34
52519- 2 2 6 2 2 6 2 2 6 2 2 6
52520- 74 74 74 58 58 58 22 22 22 6 6 6
52521- 0 0 0 0 0 0 0 0 0 0 0 0
52522- 0 0 0 0 0 0 0 0 0 0 0 0
52523- 0 0 0 0 0 0 0 0 0 0 0 0
52524- 0 0 0 0 0 0 0 0 0 0 0 0
52525- 0 0 0 0 0 0 0 0 0 0 0 0
52526- 0 0 0 0 0 0 0 0 0 0 0 0
52527- 0 0 0 0 0 0 0 0 0 0 0 0
52528- 0 0 0 10 10 10 26 26 26 66 66 66
52529- 82 82 82 2 2 6 38 38 38 6 6 6
52530- 14 14 14 210 210 210 253 253 253 253 253 253
52531-253 253 253 253 253 253 253 253 253 253 253 253
52532-253 253 253 253 253 253 246 246 246 242 242 242
52533-253 253 253 253 253 253 253 253 253 253 253 253
52534-253 253 253 253 253 253 253 253 253 253 253 253
52535-253 253 253 253 253 253 253 253 253 253 253 253
52536-253 253 253 253 253 253 253 253 253 253 253 253
52537-253 253 253 253 253 253 144 144 144 2 2 6
52538- 2 2 6 2 2 6 2 2 6 46 46 46
52539- 2 2 6 2 2 6 2 2 6 2 2 6
52540- 42 42 42 74 74 74 30 30 30 10 10 10
52541- 0 0 0 0 0 0 0 0 0 0 0 0
52542- 0 0 0 0 0 0 0 0 0 0 0 0
52543- 0 0 0 0 0 0 0 0 0 0 0 0
52544- 0 0 0 0 0 0 0 0 0 0 0 0
52545- 0 0 0 0 0 0 0 0 0 0 0 0
52546- 0 0 0 0 0 0 0 0 0 0 0 0
52547- 0 0 0 0 0 0 0 0 0 0 0 0
52548- 6 6 6 14 14 14 42 42 42 90 90 90
52549- 26 26 26 6 6 6 42 42 42 2 2 6
52550- 74 74 74 250 250 250 253 253 253 253 253 253
52551-253 253 253 253 253 253 253 253 253 253 253 253
52552-253 253 253 253 253 253 242 242 242 242 242 242
52553-253 253 253 253 253 253 253 253 253 253 253 253
52554-253 253 253 253 253 253 253 253 253 253 253 253
52555-253 253 253 253 253 253 253 253 253 253 253 253
52556-253 253 253 253 253 253 253 253 253 253 253 253
52557-253 253 253 253 253 253 182 182 182 2 2 6
52558- 2 2 6 2 2 6 2 2 6 46 46 46
52559- 2 2 6 2 2 6 2 2 6 2 2 6
52560- 10 10 10 86 86 86 38 38 38 10 10 10
52561- 0 0 0 0 0 0 0 0 0 0 0 0
52562- 0 0 0 0 0 0 0 0 0 0 0 0
52563- 0 0 0 0 0 0 0 0 0 0 0 0
52564- 0 0 0 0 0 0 0 0 0 0 0 0
52565- 0 0 0 0 0 0 0 0 0 0 0 0
52566- 0 0 0 0 0 0 0 0 0 0 0 0
52567- 0 0 0 0 0 0 0 0 0 0 0 0
52568- 10 10 10 26 26 26 66 66 66 82 82 82
52569- 2 2 6 22 22 22 18 18 18 2 2 6
52570-149 149 149 253 253 253 253 253 253 253 253 253
52571-253 253 253 253 253 253 253 253 253 253 253 253
52572-253 253 253 253 253 253 234 234 234 242 242 242
52573-253 253 253 253 253 253 253 253 253 253 253 253
52574-253 253 253 253 253 253 253 253 253 253 253 253
52575-253 253 253 253 253 253 253 253 253 253 253 253
52576-253 253 253 253 253 253 253 253 253 253 253 253
52577-253 253 253 253 253 253 206 206 206 2 2 6
52578- 2 2 6 2 2 6 2 2 6 38 38 38
52579- 2 2 6 2 2 6 2 2 6 2 2 6
52580- 6 6 6 86 86 86 46 46 46 14 14 14
52581- 0 0 0 0 0 0 0 0 0 0 0 0
52582- 0 0 0 0 0 0 0 0 0 0 0 0
52583- 0 0 0 0 0 0 0 0 0 0 0 0
52584- 0 0 0 0 0 0 0 0 0 0 0 0
52585- 0 0 0 0 0 0 0 0 0 0 0 0
52586- 0 0 0 0 0 0 0 0 0 0 0 0
52587- 0 0 0 0 0 0 0 0 0 6 6 6
52588- 18 18 18 46 46 46 86 86 86 18 18 18
52589- 2 2 6 34 34 34 10 10 10 6 6 6
52590-210 210 210 253 253 253 253 253 253 253 253 253
52591-253 253 253 253 253 253 253 253 253 253 253 253
52592-253 253 253 253 253 253 234 234 234 242 242 242
52593-253 253 253 253 253 253 253 253 253 253 253 253
52594-253 253 253 253 253 253 253 253 253 253 253 253
52595-253 253 253 253 253 253 253 253 253 253 253 253
52596-253 253 253 253 253 253 253 253 253 253 253 253
52597-253 253 253 253 253 253 221 221 221 6 6 6
52598- 2 2 6 2 2 6 6 6 6 30 30 30
52599- 2 2 6 2 2 6 2 2 6 2 2 6
52600- 2 2 6 82 82 82 54 54 54 18 18 18
52601- 6 6 6 0 0 0 0 0 0 0 0 0
52602- 0 0 0 0 0 0 0 0 0 0 0 0
52603- 0 0 0 0 0 0 0 0 0 0 0 0
52604- 0 0 0 0 0 0 0 0 0 0 0 0
52605- 0 0 0 0 0 0 0 0 0 0 0 0
52606- 0 0 0 0 0 0 0 0 0 0 0 0
52607- 0 0 0 0 0 0 0 0 0 10 10 10
52608- 26 26 26 66 66 66 62 62 62 2 2 6
52609- 2 2 6 38 38 38 10 10 10 26 26 26
52610-238 238 238 253 253 253 253 253 253 253 253 253
52611-253 253 253 253 253 253 253 253 253 253 253 253
52612-253 253 253 253 253 253 231 231 231 238 238 238
52613-253 253 253 253 253 253 253 253 253 253 253 253
52614-253 253 253 253 253 253 253 253 253 253 253 253
52615-253 253 253 253 253 253 253 253 253 253 253 253
52616-253 253 253 253 253 253 253 253 253 253 253 253
52617-253 253 253 253 253 253 231 231 231 6 6 6
52618- 2 2 6 2 2 6 10 10 10 30 30 30
52619- 2 2 6 2 2 6 2 2 6 2 2 6
52620- 2 2 6 66 66 66 58 58 58 22 22 22
52621- 6 6 6 0 0 0 0 0 0 0 0 0
52622- 0 0 0 0 0 0 0 0 0 0 0 0
52623- 0 0 0 0 0 0 0 0 0 0 0 0
52624- 0 0 0 0 0 0 0 0 0 0 0 0
52625- 0 0 0 0 0 0 0 0 0 0 0 0
52626- 0 0 0 0 0 0 0 0 0 0 0 0
52627- 0 0 0 0 0 0 0 0 0 10 10 10
52628- 38 38 38 78 78 78 6 6 6 2 2 6
52629- 2 2 6 46 46 46 14 14 14 42 42 42
52630-246 246 246 253 253 253 253 253 253 253 253 253
52631-253 253 253 253 253 253 253 253 253 253 253 253
52632-253 253 253 253 253 253 231 231 231 242 242 242
52633-253 253 253 253 253 253 253 253 253 253 253 253
52634-253 253 253 253 253 253 253 253 253 253 253 253
52635-253 253 253 253 253 253 253 253 253 253 253 253
52636-253 253 253 253 253 253 253 253 253 253 253 253
52637-253 253 253 253 253 253 234 234 234 10 10 10
52638- 2 2 6 2 2 6 22 22 22 14 14 14
52639- 2 2 6 2 2 6 2 2 6 2 2 6
52640- 2 2 6 66 66 66 62 62 62 22 22 22
52641- 6 6 6 0 0 0 0 0 0 0 0 0
52642- 0 0 0 0 0 0 0 0 0 0 0 0
52643- 0 0 0 0 0 0 0 0 0 0 0 0
52644- 0 0 0 0 0 0 0 0 0 0 0 0
52645- 0 0 0 0 0 0 0 0 0 0 0 0
52646- 0 0 0 0 0 0 0 0 0 0 0 0
52647- 0 0 0 0 0 0 6 6 6 18 18 18
52648- 50 50 50 74 74 74 2 2 6 2 2 6
52649- 14 14 14 70 70 70 34 34 34 62 62 62
52650-250 250 250 253 253 253 253 253 253 253 253 253
52651-253 253 253 253 253 253 253 253 253 253 253 253
52652-253 253 253 253 253 253 231 231 231 246 246 246
52653-253 253 253 253 253 253 253 253 253 253 253 253
52654-253 253 253 253 253 253 253 253 253 253 253 253
52655-253 253 253 253 253 253 253 253 253 253 253 253
52656-253 253 253 253 253 253 253 253 253 253 253 253
52657-253 253 253 253 253 253 234 234 234 14 14 14
52658- 2 2 6 2 2 6 30 30 30 2 2 6
52659- 2 2 6 2 2 6 2 2 6 2 2 6
52660- 2 2 6 66 66 66 62 62 62 22 22 22
52661- 6 6 6 0 0 0 0 0 0 0 0 0
52662- 0 0 0 0 0 0 0 0 0 0 0 0
52663- 0 0 0 0 0 0 0 0 0 0 0 0
52664- 0 0 0 0 0 0 0 0 0 0 0 0
52665- 0 0 0 0 0 0 0 0 0 0 0 0
52666- 0 0 0 0 0 0 0 0 0 0 0 0
52667- 0 0 0 0 0 0 6 6 6 18 18 18
52668- 54 54 54 62 62 62 2 2 6 2 2 6
52669- 2 2 6 30 30 30 46 46 46 70 70 70
52670-250 250 250 253 253 253 253 253 253 253 253 253
52671-253 253 253 253 253 253 253 253 253 253 253 253
52672-253 253 253 253 253 253 231 231 231 246 246 246
52673-253 253 253 253 253 253 253 253 253 253 253 253
52674-253 253 253 253 253 253 253 253 253 253 253 253
52675-253 253 253 253 253 253 253 253 253 253 253 253
52676-253 253 253 253 253 253 253 253 253 253 253 253
52677-253 253 253 253 253 253 226 226 226 10 10 10
52678- 2 2 6 6 6 6 30 30 30 2 2 6
52679- 2 2 6 2 2 6 2 2 6 2 2 6
52680- 2 2 6 66 66 66 58 58 58 22 22 22
52681- 6 6 6 0 0 0 0 0 0 0 0 0
52682- 0 0 0 0 0 0 0 0 0 0 0 0
52683- 0 0 0 0 0 0 0 0 0 0 0 0
52684- 0 0 0 0 0 0 0 0 0 0 0 0
52685- 0 0 0 0 0 0 0 0 0 0 0 0
52686- 0 0 0 0 0 0 0 0 0 0 0 0
52687- 0 0 0 0 0 0 6 6 6 22 22 22
52688- 58 58 58 62 62 62 2 2 6 2 2 6
52689- 2 2 6 2 2 6 30 30 30 78 78 78
52690-250 250 250 253 253 253 253 253 253 253 253 253
52691-253 253 253 253 253 253 253 253 253 253 253 253
52692-253 253 253 253 253 253 231 231 231 246 246 246
52693-253 253 253 253 253 253 253 253 253 253 253 253
52694-253 253 253 253 253 253 253 253 253 253 253 253
52695-253 253 253 253 253 253 253 253 253 253 253 253
52696-253 253 253 253 253 253 253 253 253 253 253 253
52697-253 253 253 253 253 253 206 206 206 2 2 6
52698- 22 22 22 34 34 34 18 14 6 22 22 22
52699- 26 26 26 18 18 18 6 6 6 2 2 6
52700- 2 2 6 82 82 82 54 54 54 18 18 18
52701- 6 6 6 0 0 0 0 0 0 0 0 0
52702- 0 0 0 0 0 0 0 0 0 0 0 0
52703- 0 0 0 0 0 0 0 0 0 0 0 0
52704- 0 0 0 0 0 0 0 0 0 0 0 0
52705- 0 0 0 0 0 0 0 0 0 0 0 0
52706- 0 0 0 0 0 0 0 0 0 0 0 0
52707- 0 0 0 0 0 0 6 6 6 26 26 26
52708- 62 62 62 106 106 106 74 54 14 185 133 11
52709-210 162 10 121 92 8 6 6 6 62 62 62
52710-238 238 238 253 253 253 253 253 253 253 253 253
52711-253 253 253 253 253 253 253 253 253 253 253 253
52712-253 253 253 253 253 253 231 231 231 246 246 246
52713-253 253 253 253 253 253 253 253 253 253 253 253
52714-253 253 253 253 253 253 253 253 253 253 253 253
52715-253 253 253 253 253 253 253 253 253 253 253 253
52716-253 253 253 253 253 253 253 253 253 253 253 253
52717-253 253 253 253 253 253 158 158 158 18 18 18
52718- 14 14 14 2 2 6 2 2 6 2 2 6
52719- 6 6 6 18 18 18 66 66 66 38 38 38
52720- 6 6 6 94 94 94 50 50 50 18 18 18
52721- 6 6 6 0 0 0 0 0 0 0 0 0
52722- 0 0 0 0 0 0 0 0 0 0 0 0
52723- 0 0 0 0 0 0 0 0 0 0 0 0
52724- 0 0 0 0 0 0 0 0 0 0 0 0
52725- 0 0 0 0 0 0 0 0 0 0 0 0
52726- 0 0 0 0 0 0 0 0 0 6 6 6
52727- 10 10 10 10 10 10 18 18 18 38 38 38
52728- 78 78 78 142 134 106 216 158 10 242 186 14
52729-246 190 14 246 190 14 156 118 10 10 10 10
52730- 90 90 90 238 238 238 253 253 253 253 253 253
52731-253 253 253 253 253 253 253 253 253 253 253 253
52732-253 253 253 253 253 253 231 231 231 250 250 250
52733-253 253 253 253 253 253 253 253 253 253 253 253
52734-253 253 253 253 253 253 253 253 253 253 253 253
52735-253 253 253 253 253 253 253 253 253 253 253 253
52736-253 253 253 253 253 253 253 253 253 246 230 190
52737-238 204 91 238 204 91 181 142 44 37 26 9
52738- 2 2 6 2 2 6 2 2 6 2 2 6
52739- 2 2 6 2 2 6 38 38 38 46 46 46
52740- 26 26 26 106 106 106 54 54 54 18 18 18
52741- 6 6 6 0 0 0 0 0 0 0 0 0
52742- 0 0 0 0 0 0 0 0 0 0 0 0
52743- 0 0 0 0 0 0 0 0 0 0 0 0
52744- 0 0 0 0 0 0 0 0 0 0 0 0
52745- 0 0 0 0 0 0 0 0 0 0 0 0
52746- 0 0 0 6 6 6 14 14 14 22 22 22
52747- 30 30 30 38 38 38 50 50 50 70 70 70
52748-106 106 106 190 142 34 226 170 11 242 186 14
52749-246 190 14 246 190 14 246 190 14 154 114 10
52750- 6 6 6 74 74 74 226 226 226 253 253 253
52751-253 253 253 253 253 253 253 253 253 253 253 253
52752-253 253 253 253 253 253 231 231 231 250 250 250
52753-253 253 253 253 253 253 253 253 253 253 253 253
52754-253 253 253 253 253 253 253 253 253 253 253 253
52755-253 253 253 253 253 253 253 253 253 253 253 253
52756-253 253 253 253 253 253 253 253 253 228 184 62
52757-241 196 14 241 208 19 232 195 16 38 30 10
52758- 2 2 6 2 2 6 2 2 6 2 2 6
52759- 2 2 6 6 6 6 30 30 30 26 26 26
52760-203 166 17 154 142 90 66 66 66 26 26 26
52761- 6 6 6 0 0 0 0 0 0 0 0 0
52762- 0 0 0 0 0 0 0 0 0 0 0 0
52763- 0 0 0 0 0 0 0 0 0 0 0 0
52764- 0 0 0 0 0 0 0 0 0 0 0 0
52765- 0 0 0 0 0 0 0 0 0 0 0 0
52766- 6 6 6 18 18 18 38 38 38 58 58 58
52767- 78 78 78 86 86 86 101 101 101 123 123 123
52768-175 146 61 210 150 10 234 174 13 246 186 14
52769-246 190 14 246 190 14 246 190 14 238 190 10
52770-102 78 10 2 2 6 46 46 46 198 198 198
52771-253 253 253 253 253 253 253 253 253 253 253 253
52772-253 253 253 253 253 253 234 234 234 242 242 242
52773-253 253 253 253 253 253 253 253 253 253 253 253
52774-253 253 253 253 253 253 253 253 253 253 253 253
52775-253 253 253 253 253 253 253 253 253 253 253 253
52776-253 253 253 253 253 253 253 253 253 224 178 62
52777-242 186 14 241 196 14 210 166 10 22 18 6
52778- 2 2 6 2 2 6 2 2 6 2 2 6
52779- 2 2 6 2 2 6 6 6 6 121 92 8
52780-238 202 15 232 195 16 82 82 82 34 34 34
52781- 10 10 10 0 0 0 0 0 0 0 0 0
52782- 0 0 0 0 0 0 0 0 0 0 0 0
52783- 0 0 0 0 0 0 0 0 0 0 0 0
52784- 0 0 0 0 0 0 0 0 0 0 0 0
52785- 0 0 0 0 0 0 0 0 0 0 0 0
52786- 14 14 14 38 38 38 70 70 70 154 122 46
52787-190 142 34 200 144 11 197 138 11 197 138 11
52788-213 154 11 226 170 11 242 186 14 246 190 14
52789-246 190 14 246 190 14 246 190 14 246 190 14
52790-225 175 15 46 32 6 2 2 6 22 22 22
52791-158 158 158 250 250 250 253 253 253 253 253 253
52792-253 253 253 253 253 253 253 253 253 253 253 253
52793-253 253 253 253 253 253 253 253 253 253 253 253
52794-253 253 253 253 253 253 253 253 253 253 253 253
52795-253 253 253 253 253 253 253 253 253 253 253 253
52796-253 253 253 250 250 250 242 242 242 224 178 62
52797-239 182 13 236 186 11 213 154 11 46 32 6
52798- 2 2 6 2 2 6 2 2 6 2 2 6
52799- 2 2 6 2 2 6 61 42 6 225 175 15
52800-238 190 10 236 186 11 112 100 78 42 42 42
52801- 14 14 14 0 0 0 0 0 0 0 0 0
52802- 0 0 0 0 0 0 0 0 0 0 0 0
52803- 0 0 0 0 0 0 0 0 0 0 0 0
52804- 0 0 0 0 0 0 0 0 0 0 0 0
52805- 0 0 0 0 0 0 0 0 0 6 6 6
52806- 22 22 22 54 54 54 154 122 46 213 154 11
52807-226 170 11 230 174 11 226 170 11 226 170 11
52808-236 178 12 242 186 14 246 190 14 246 190 14
52809-246 190 14 246 190 14 246 190 14 246 190 14
52810-241 196 14 184 144 12 10 10 10 2 2 6
52811- 6 6 6 116 116 116 242 242 242 253 253 253
52812-253 253 253 253 253 253 253 253 253 253 253 253
52813-253 253 253 253 253 253 253 253 253 253 253 253
52814-253 253 253 253 253 253 253 253 253 253 253 253
52815-253 253 253 253 253 253 253 253 253 253 253 253
52816-253 253 253 231 231 231 198 198 198 214 170 54
52817-236 178 12 236 178 12 210 150 10 137 92 6
52818- 18 14 6 2 2 6 2 2 6 2 2 6
52819- 6 6 6 70 47 6 200 144 11 236 178 12
52820-239 182 13 239 182 13 124 112 88 58 58 58
52821- 22 22 22 6 6 6 0 0 0 0 0 0
52822- 0 0 0 0 0 0 0 0 0 0 0 0
52823- 0 0 0 0 0 0 0 0 0 0 0 0
52824- 0 0 0 0 0 0 0 0 0 0 0 0
52825- 0 0 0 0 0 0 0 0 0 10 10 10
52826- 30 30 30 70 70 70 180 133 36 226 170 11
52827-239 182 13 242 186 14 242 186 14 246 186 14
52828-246 190 14 246 190 14 246 190 14 246 190 14
52829-246 190 14 246 190 14 246 190 14 246 190 14
52830-246 190 14 232 195 16 98 70 6 2 2 6
52831- 2 2 6 2 2 6 66 66 66 221 221 221
52832-253 253 253 253 253 253 253 253 253 253 253 253
52833-253 253 253 253 253 253 253 253 253 253 253 253
52834-253 253 253 253 253 253 253 253 253 253 253 253
52835-253 253 253 253 253 253 253 253 253 253 253 253
52836-253 253 253 206 206 206 198 198 198 214 166 58
52837-230 174 11 230 174 11 216 158 10 192 133 9
52838-163 110 8 116 81 8 102 78 10 116 81 8
52839-167 114 7 197 138 11 226 170 11 239 182 13
52840-242 186 14 242 186 14 162 146 94 78 78 78
52841- 34 34 34 14 14 14 6 6 6 0 0 0
52842- 0 0 0 0 0 0 0 0 0 0 0 0
52843- 0 0 0 0 0 0 0 0 0 0 0 0
52844- 0 0 0 0 0 0 0 0 0 0 0 0
52845- 0 0 0 0 0 0 0 0 0 6 6 6
52846- 30 30 30 78 78 78 190 142 34 226 170 11
52847-239 182 13 246 190 14 246 190 14 246 190 14
52848-246 190 14 246 190 14 246 190 14 246 190 14
52849-246 190 14 246 190 14 246 190 14 246 190 14
52850-246 190 14 241 196 14 203 166 17 22 18 6
52851- 2 2 6 2 2 6 2 2 6 38 38 38
52852-218 218 218 253 253 253 253 253 253 253 253 253
52853-253 253 253 253 253 253 253 253 253 253 253 253
52854-253 253 253 253 253 253 253 253 253 253 253 253
52855-253 253 253 253 253 253 253 253 253 253 253 253
52856-250 250 250 206 206 206 198 198 198 202 162 69
52857-226 170 11 236 178 12 224 166 10 210 150 10
52858-200 144 11 197 138 11 192 133 9 197 138 11
52859-210 150 10 226 170 11 242 186 14 246 190 14
52860-246 190 14 246 186 14 225 175 15 124 112 88
52861- 62 62 62 30 30 30 14 14 14 6 6 6
52862- 0 0 0 0 0 0 0 0 0 0 0 0
52863- 0 0 0 0 0 0 0 0 0 0 0 0
52864- 0 0 0 0 0 0 0 0 0 0 0 0
52865- 0 0 0 0 0 0 0 0 0 10 10 10
52866- 30 30 30 78 78 78 174 135 50 224 166 10
52867-239 182 13 246 190 14 246 190 14 246 190 14
52868-246 190 14 246 190 14 246 190 14 246 190 14
52869-246 190 14 246 190 14 246 190 14 246 190 14
52870-246 190 14 246 190 14 241 196 14 139 102 15
52871- 2 2 6 2 2 6 2 2 6 2 2 6
52872- 78 78 78 250 250 250 253 253 253 253 253 253
52873-253 253 253 253 253 253 253 253 253 253 253 253
52874-253 253 253 253 253 253 253 253 253 253 253 253
52875-253 253 253 253 253 253 253 253 253 253 253 253
52876-250 250 250 214 214 214 198 198 198 190 150 46
52877-219 162 10 236 178 12 234 174 13 224 166 10
52878-216 158 10 213 154 11 213 154 11 216 158 10
52879-226 170 11 239 182 13 246 190 14 246 190 14
52880-246 190 14 246 190 14 242 186 14 206 162 42
52881-101 101 101 58 58 58 30 30 30 14 14 14
52882- 6 6 6 0 0 0 0 0 0 0 0 0
52883- 0 0 0 0 0 0 0 0 0 0 0 0
52884- 0 0 0 0 0 0 0 0 0 0 0 0
52885- 0 0 0 0 0 0 0 0 0 10 10 10
52886- 30 30 30 74 74 74 174 135 50 216 158 10
52887-236 178 12 246 190 14 246 190 14 246 190 14
52888-246 190 14 246 190 14 246 190 14 246 190 14
52889-246 190 14 246 190 14 246 190 14 246 190 14
52890-246 190 14 246 190 14 241 196 14 226 184 13
52891- 61 42 6 2 2 6 2 2 6 2 2 6
52892- 22 22 22 238 238 238 253 253 253 253 253 253
52893-253 253 253 253 253 253 253 253 253 253 253 253
52894-253 253 253 253 253 253 253 253 253 253 253 253
52895-253 253 253 253 253 253 253 253 253 253 253 253
52896-253 253 253 226 226 226 187 187 187 180 133 36
52897-216 158 10 236 178 12 239 182 13 236 178 12
52898-230 174 11 226 170 11 226 170 11 230 174 11
52899-236 178 12 242 186 14 246 190 14 246 190 14
52900-246 190 14 246 190 14 246 186 14 239 182 13
52901-206 162 42 106 106 106 66 66 66 34 34 34
52902- 14 14 14 6 6 6 0 0 0 0 0 0
52903- 0 0 0 0 0 0 0 0 0 0 0 0
52904- 0 0 0 0 0 0 0 0 0 0 0 0
52905- 0 0 0 0 0 0 0 0 0 6 6 6
52906- 26 26 26 70 70 70 163 133 67 213 154 11
52907-236 178 12 246 190 14 246 190 14 246 190 14
52908-246 190 14 246 190 14 246 190 14 246 190 14
52909-246 190 14 246 190 14 246 190 14 246 190 14
52910-246 190 14 246 190 14 246 190 14 241 196 14
52911-190 146 13 18 14 6 2 2 6 2 2 6
52912- 46 46 46 246 246 246 253 253 253 253 253 253
52913-253 253 253 253 253 253 253 253 253 253 253 253
52914-253 253 253 253 253 253 253 253 253 253 253 253
52915-253 253 253 253 253 253 253 253 253 253 253 253
52916-253 253 253 221 221 221 86 86 86 156 107 11
52917-216 158 10 236 178 12 242 186 14 246 186 14
52918-242 186 14 239 182 13 239 182 13 242 186 14
52919-242 186 14 246 186 14 246 190 14 246 190 14
52920-246 190 14 246 190 14 246 190 14 246 190 14
52921-242 186 14 225 175 15 142 122 72 66 66 66
52922- 30 30 30 10 10 10 0 0 0 0 0 0
52923- 0 0 0 0 0 0 0 0 0 0 0 0
52924- 0 0 0 0 0 0 0 0 0 0 0 0
52925- 0 0 0 0 0 0 0 0 0 6 6 6
52926- 26 26 26 70 70 70 163 133 67 210 150 10
52927-236 178 12 246 190 14 246 190 14 246 190 14
52928-246 190 14 246 190 14 246 190 14 246 190 14
52929-246 190 14 246 190 14 246 190 14 246 190 14
52930-246 190 14 246 190 14 246 190 14 246 190 14
52931-232 195 16 121 92 8 34 34 34 106 106 106
52932-221 221 221 253 253 253 253 253 253 253 253 253
52933-253 253 253 253 253 253 253 253 253 253 253 253
52934-253 253 253 253 253 253 253 253 253 253 253 253
52935-253 253 253 253 253 253 253 253 253 253 253 253
52936-242 242 242 82 82 82 18 14 6 163 110 8
52937-216 158 10 236 178 12 242 186 14 246 190 14
52938-246 190 14 246 190 14 246 190 14 246 190 14
52939-246 190 14 246 190 14 246 190 14 246 190 14
52940-246 190 14 246 190 14 246 190 14 246 190 14
52941-246 190 14 246 190 14 242 186 14 163 133 67
52942- 46 46 46 18 18 18 6 6 6 0 0 0
52943- 0 0 0 0 0 0 0 0 0 0 0 0
52944- 0 0 0 0 0 0 0 0 0 0 0 0
52945- 0 0 0 0 0 0 0 0 0 10 10 10
52946- 30 30 30 78 78 78 163 133 67 210 150 10
52947-236 178 12 246 186 14 246 190 14 246 190 14
52948-246 190 14 246 190 14 246 190 14 246 190 14
52949-246 190 14 246 190 14 246 190 14 246 190 14
52950-246 190 14 246 190 14 246 190 14 246 190 14
52951-241 196 14 215 174 15 190 178 144 253 253 253
52952-253 253 253 253 253 253 253 253 253 253 253 253
52953-253 253 253 253 253 253 253 253 253 253 253 253
52954-253 253 253 253 253 253 253 253 253 253 253 253
52955-253 253 253 253 253 253 253 253 253 218 218 218
52956- 58 58 58 2 2 6 22 18 6 167 114 7
52957-216 158 10 236 178 12 246 186 14 246 190 14
52958-246 190 14 246 190 14 246 190 14 246 190 14
52959-246 190 14 246 190 14 246 190 14 246 190 14
52960-246 190 14 246 190 14 246 190 14 246 190 14
52961-246 190 14 246 186 14 242 186 14 190 150 46
52962- 54 54 54 22 22 22 6 6 6 0 0 0
52963- 0 0 0 0 0 0 0 0 0 0 0 0
52964- 0 0 0 0 0 0 0 0 0 0 0 0
52965- 0 0 0 0 0 0 0 0 0 14 14 14
52966- 38 38 38 86 86 86 180 133 36 213 154 11
52967-236 178 12 246 186 14 246 190 14 246 190 14
52968-246 190 14 246 190 14 246 190 14 246 190 14
52969-246 190 14 246 190 14 246 190 14 246 190 14
52970-246 190 14 246 190 14 246 190 14 246 190 14
52971-246 190 14 232 195 16 190 146 13 214 214 214
52972-253 253 253 253 253 253 253 253 253 253 253 253
52973-253 253 253 253 253 253 253 253 253 253 253 253
52974-253 253 253 253 253 253 253 253 253 253 253 253
52975-253 253 253 250 250 250 170 170 170 26 26 26
52976- 2 2 6 2 2 6 37 26 9 163 110 8
52977-219 162 10 239 182 13 246 186 14 246 190 14
52978-246 190 14 246 190 14 246 190 14 246 190 14
52979-246 190 14 246 190 14 246 190 14 246 190 14
52980-246 190 14 246 190 14 246 190 14 246 190 14
52981-246 186 14 236 178 12 224 166 10 142 122 72
52982- 46 46 46 18 18 18 6 6 6 0 0 0
52983- 0 0 0 0 0 0 0 0 0 0 0 0
52984- 0 0 0 0 0 0 0 0 0 0 0 0
52985- 0 0 0 0 0 0 6 6 6 18 18 18
52986- 50 50 50 109 106 95 192 133 9 224 166 10
52987-242 186 14 246 190 14 246 190 14 246 190 14
52988-246 190 14 246 190 14 246 190 14 246 190 14
52989-246 190 14 246 190 14 246 190 14 246 190 14
52990-246 190 14 246 190 14 246 190 14 246 190 14
52991-242 186 14 226 184 13 210 162 10 142 110 46
52992-226 226 226 253 253 253 253 253 253 253 253 253
52993-253 253 253 253 253 253 253 253 253 253 253 253
52994-253 253 253 253 253 253 253 253 253 253 253 253
52995-198 198 198 66 66 66 2 2 6 2 2 6
52996- 2 2 6 2 2 6 50 34 6 156 107 11
52997-219 162 10 239 182 13 246 186 14 246 190 14
52998-246 190 14 246 190 14 246 190 14 246 190 14
52999-246 190 14 246 190 14 246 190 14 246 190 14
53000-246 190 14 246 190 14 246 190 14 242 186 14
53001-234 174 13 213 154 11 154 122 46 66 66 66
53002- 30 30 30 10 10 10 0 0 0 0 0 0
53003- 0 0 0 0 0 0 0 0 0 0 0 0
53004- 0 0 0 0 0 0 0 0 0 0 0 0
53005- 0 0 0 0 0 0 6 6 6 22 22 22
53006- 58 58 58 154 121 60 206 145 10 234 174 13
53007-242 186 14 246 186 14 246 190 14 246 190 14
53008-246 190 14 246 190 14 246 190 14 246 190 14
53009-246 190 14 246 190 14 246 190 14 246 190 14
53010-246 190 14 246 190 14 246 190 14 246 190 14
53011-246 186 14 236 178 12 210 162 10 163 110 8
53012- 61 42 6 138 138 138 218 218 218 250 250 250
53013-253 253 253 253 253 253 253 253 253 250 250 250
53014-242 242 242 210 210 210 144 144 144 66 66 66
53015- 6 6 6 2 2 6 2 2 6 2 2 6
53016- 2 2 6 2 2 6 61 42 6 163 110 8
53017-216 158 10 236 178 12 246 190 14 246 190 14
53018-246 190 14 246 190 14 246 190 14 246 190 14
53019-246 190 14 246 190 14 246 190 14 246 190 14
53020-246 190 14 239 182 13 230 174 11 216 158 10
53021-190 142 34 124 112 88 70 70 70 38 38 38
53022- 18 18 18 6 6 6 0 0 0 0 0 0
53023- 0 0 0 0 0 0 0 0 0 0 0 0
53024- 0 0 0 0 0 0 0 0 0 0 0 0
53025- 0 0 0 0 0 0 6 6 6 22 22 22
53026- 62 62 62 168 124 44 206 145 10 224 166 10
53027-236 178 12 239 182 13 242 186 14 242 186 14
53028-246 186 14 246 190 14 246 190 14 246 190 14
53029-246 190 14 246 190 14 246 190 14 246 190 14
53030-246 190 14 246 190 14 246 190 14 246 190 14
53031-246 190 14 236 178 12 216 158 10 175 118 6
53032- 80 54 7 2 2 6 6 6 6 30 30 30
53033- 54 54 54 62 62 62 50 50 50 38 38 38
53034- 14 14 14 2 2 6 2 2 6 2 2 6
53035- 2 2 6 2 2 6 2 2 6 2 2 6
53036- 2 2 6 6 6 6 80 54 7 167 114 7
53037-213 154 11 236 178 12 246 190 14 246 190 14
53038-246 190 14 246 190 14 246 190 14 246 190 14
53039-246 190 14 242 186 14 239 182 13 239 182 13
53040-230 174 11 210 150 10 174 135 50 124 112 88
53041- 82 82 82 54 54 54 34 34 34 18 18 18
53042- 6 6 6 0 0 0 0 0 0 0 0 0
53043- 0 0 0 0 0 0 0 0 0 0 0 0
53044- 0 0 0 0 0 0 0 0 0 0 0 0
53045- 0 0 0 0 0 0 6 6 6 18 18 18
53046- 50 50 50 158 118 36 192 133 9 200 144 11
53047-216 158 10 219 162 10 224 166 10 226 170 11
53048-230 174 11 236 178 12 239 182 13 239 182 13
53049-242 186 14 246 186 14 246 190 14 246 190 14
53050-246 190 14 246 190 14 246 190 14 246 190 14
53051-246 186 14 230 174 11 210 150 10 163 110 8
53052-104 69 6 10 10 10 2 2 6 2 2 6
53053- 2 2 6 2 2 6 2 2 6 2 2 6
53054- 2 2 6 2 2 6 2 2 6 2 2 6
53055- 2 2 6 2 2 6 2 2 6 2 2 6
53056- 2 2 6 6 6 6 91 60 6 167 114 7
53057-206 145 10 230 174 11 242 186 14 246 190 14
53058-246 190 14 246 190 14 246 186 14 242 186 14
53059-239 182 13 230 174 11 224 166 10 213 154 11
53060-180 133 36 124 112 88 86 86 86 58 58 58
53061- 38 38 38 22 22 22 10 10 10 6 6 6
53062- 0 0 0 0 0 0 0 0 0 0 0 0
53063- 0 0 0 0 0 0 0 0 0 0 0 0
53064- 0 0 0 0 0 0 0 0 0 0 0 0
53065- 0 0 0 0 0 0 0 0 0 14 14 14
53066- 34 34 34 70 70 70 138 110 50 158 118 36
53067-167 114 7 180 123 7 192 133 9 197 138 11
53068-200 144 11 206 145 10 213 154 11 219 162 10
53069-224 166 10 230 174 11 239 182 13 242 186 14
53070-246 186 14 246 186 14 246 186 14 246 186 14
53071-239 182 13 216 158 10 185 133 11 152 99 6
53072-104 69 6 18 14 6 2 2 6 2 2 6
53073- 2 2 6 2 2 6 2 2 6 2 2 6
53074- 2 2 6 2 2 6 2 2 6 2 2 6
53075- 2 2 6 2 2 6 2 2 6 2 2 6
53076- 2 2 6 6 6 6 80 54 7 152 99 6
53077-192 133 9 219 162 10 236 178 12 239 182 13
53078-246 186 14 242 186 14 239 182 13 236 178 12
53079-224 166 10 206 145 10 192 133 9 154 121 60
53080- 94 94 94 62 62 62 42 42 42 22 22 22
53081- 14 14 14 6 6 6 0 0 0 0 0 0
53082- 0 0 0 0 0 0 0 0 0 0 0 0
53083- 0 0 0 0 0 0 0 0 0 0 0 0
53084- 0 0 0 0 0 0 0 0 0 0 0 0
53085- 0 0 0 0 0 0 0 0 0 6 6 6
53086- 18 18 18 34 34 34 58 58 58 78 78 78
53087-101 98 89 124 112 88 142 110 46 156 107 11
53088-163 110 8 167 114 7 175 118 6 180 123 7
53089-185 133 11 197 138 11 210 150 10 219 162 10
53090-226 170 11 236 178 12 236 178 12 234 174 13
53091-219 162 10 197 138 11 163 110 8 130 83 6
53092- 91 60 6 10 10 10 2 2 6 2 2 6
53093- 18 18 18 38 38 38 38 38 38 38 38 38
53094- 38 38 38 38 38 38 38 38 38 38 38 38
53095- 38 38 38 38 38 38 26 26 26 2 2 6
53096- 2 2 6 6 6 6 70 47 6 137 92 6
53097-175 118 6 200 144 11 219 162 10 230 174 11
53098-234 174 13 230 174 11 219 162 10 210 150 10
53099-192 133 9 163 110 8 124 112 88 82 82 82
53100- 50 50 50 30 30 30 14 14 14 6 6 6
53101- 0 0 0 0 0 0 0 0 0 0 0 0
53102- 0 0 0 0 0 0 0 0 0 0 0 0
53103- 0 0 0 0 0 0 0 0 0 0 0 0
53104- 0 0 0 0 0 0 0 0 0 0 0 0
53105- 0 0 0 0 0 0 0 0 0 0 0 0
53106- 6 6 6 14 14 14 22 22 22 34 34 34
53107- 42 42 42 58 58 58 74 74 74 86 86 86
53108-101 98 89 122 102 70 130 98 46 121 87 25
53109-137 92 6 152 99 6 163 110 8 180 123 7
53110-185 133 11 197 138 11 206 145 10 200 144 11
53111-180 123 7 156 107 11 130 83 6 104 69 6
53112- 50 34 6 54 54 54 110 110 110 101 98 89
53113- 86 86 86 82 82 82 78 78 78 78 78 78
53114- 78 78 78 78 78 78 78 78 78 78 78 78
53115- 78 78 78 82 82 82 86 86 86 94 94 94
53116-106 106 106 101 101 101 86 66 34 124 80 6
53117-156 107 11 180 123 7 192 133 9 200 144 11
53118-206 145 10 200 144 11 192 133 9 175 118 6
53119-139 102 15 109 106 95 70 70 70 42 42 42
53120- 22 22 22 10 10 10 0 0 0 0 0 0
53121- 0 0 0 0 0 0 0 0 0 0 0 0
53122- 0 0 0 0 0 0 0 0 0 0 0 0
53123- 0 0 0 0 0 0 0 0 0 0 0 0
53124- 0 0 0 0 0 0 0 0 0 0 0 0
53125- 0 0 0 0 0 0 0 0 0 0 0 0
53126- 0 0 0 0 0 0 6 6 6 10 10 10
53127- 14 14 14 22 22 22 30 30 30 38 38 38
53128- 50 50 50 62 62 62 74 74 74 90 90 90
53129-101 98 89 112 100 78 121 87 25 124 80 6
53130-137 92 6 152 99 6 152 99 6 152 99 6
53131-138 86 6 124 80 6 98 70 6 86 66 30
53132-101 98 89 82 82 82 58 58 58 46 46 46
53133- 38 38 38 34 34 34 34 34 34 34 34 34
53134- 34 34 34 34 34 34 34 34 34 34 34 34
53135- 34 34 34 34 34 34 38 38 38 42 42 42
53136- 54 54 54 82 82 82 94 86 76 91 60 6
53137-134 86 6 156 107 11 167 114 7 175 118 6
53138-175 118 6 167 114 7 152 99 6 121 87 25
53139-101 98 89 62 62 62 34 34 34 18 18 18
53140- 6 6 6 0 0 0 0 0 0 0 0 0
53141- 0 0 0 0 0 0 0 0 0 0 0 0
53142- 0 0 0 0 0 0 0 0 0 0 0 0
53143- 0 0 0 0 0 0 0 0 0 0 0 0
53144- 0 0 0 0 0 0 0 0 0 0 0 0
53145- 0 0 0 0 0 0 0 0 0 0 0 0
53146- 0 0 0 0 0 0 0 0 0 0 0 0
53147- 0 0 0 6 6 6 6 6 6 10 10 10
53148- 18 18 18 22 22 22 30 30 30 42 42 42
53149- 50 50 50 66 66 66 86 86 86 101 98 89
53150-106 86 58 98 70 6 104 69 6 104 69 6
53151-104 69 6 91 60 6 82 62 34 90 90 90
53152- 62 62 62 38 38 38 22 22 22 14 14 14
53153- 10 10 10 10 10 10 10 10 10 10 10 10
53154- 10 10 10 10 10 10 6 6 6 10 10 10
53155- 10 10 10 10 10 10 10 10 10 14 14 14
53156- 22 22 22 42 42 42 70 70 70 89 81 66
53157- 80 54 7 104 69 6 124 80 6 137 92 6
53158-134 86 6 116 81 8 100 82 52 86 86 86
53159- 58 58 58 30 30 30 14 14 14 6 6 6
53160- 0 0 0 0 0 0 0 0 0 0 0 0
53161- 0 0 0 0 0 0 0 0 0 0 0 0
53162- 0 0 0 0 0 0 0 0 0 0 0 0
53163- 0 0 0 0 0 0 0 0 0 0 0 0
53164- 0 0 0 0 0 0 0 0 0 0 0 0
53165- 0 0 0 0 0 0 0 0 0 0 0 0
53166- 0 0 0 0 0 0 0 0 0 0 0 0
53167- 0 0 0 0 0 0 0 0 0 0 0 0
53168- 0 0 0 6 6 6 10 10 10 14 14 14
53169- 18 18 18 26 26 26 38 38 38 54 54 54
53170- 70 70 70 86 86 86 94 86 76 89 81 66
53171- 89 81 66 86 86 86 74 74 74 50 50 50
53172- 30 30 30 14 14 14 6 6 6 0 0 0
53173- 0 0 0 0 0 0 0 0 0 0 0 0
53174- 0 0 0 0 0 0 0 0 0 0 0 0
53175- 0 0 0 0 0 0 0 0 0 0 0 0
53176- 6 6 6 18 18 18 34 34 34 58 58 58
53177- 82 82 82 89 81 66 89 81 66 89 81 66
53178- 94 86 66 94 86 76 74 74 74 50 50 50
53179- 26 26 26 14 14 14 6 6 6 0 0 0
53180- 0 0 0 0 0 0 0 0 0 0 0 0
53181- 0 0 0 0 0 0 0 0 0 0 0 0
53182- 0 0 0 0 0 0 0 0 0 0 0 0
53183- 0 0 0 0 0 0 0 0 0 0 0 0
53184- 0 0 0 0 0 0 0 0 0 0 0 0
53185- 0 0 0 0 0 0 0 0 0 0 0 0
53186- 0 0 0 0 0 0 0 0 0 0 0 0
53187- 0 0 0 0 0 0 0 0 0 0 0 0
53188- 0 0 0 0 0 0 0 0 0 0 0 0
53189- 6 6 6 6 6 6 14 14 14 18 18 18
53190- 30 30 30 38 38 38 46 46 46 54 54 54
53191- 50 50 50 42 42 42 30 30 30 18 18 18
53192- 10 10 10 0 0 0 0 0 0 0 0 0
53193- 0 0 0 0 0 0 0 0 0 0 0 0
53194- 0 0 0 0 0 0 0 0 0 0 0 0
53195- 0 0 0 0 0 0 0 0 0 0 0 0
53196- 0 0 0 6 6 6 14 14 14 26 26 26
53197- 38 38 38 50 50 50 58 58 58 58 58 58
53198- 54 54 54 42 42 42 30 30 30 18 18 18
53199- 10 10 10 0 0 0 0 0 0 0 0 0
53200- 0 0 0 0 0 0 0 0 0 0 0 0
53201- 0 0 0 0 0 0 0 0 0 0 0 0
53202- 0 0 0 0 0 0 0 0 0 0 0 0
53203- 0 0 0 0 0 0 0 0 0 0 0 0
53204- 0 0 0 0 0 0 0 0 0 0 0 0
53205- 0 0 0 0 0 0 0 0 0 0 0 0
53206- 0 0 0 0 0 0 0 0 0 0 0 0
53207- 0 0 0 0 0 0 0 0 0 0 0 0
53208- 0 0 0 0 0 0 0 0 0 0 0 0
53209- 0 0 0 0 0 0 0 0 0 6 6 6
53210- 6 6 6 10 10 10 14 14 14 18 18 18
53211- 18 18 18 14 14 14 10 10 10 6 6 6
53212- 0 0 0 0 0 0 0 0 0 0 0 0
53213- 0 0 0 0 0 0 0 0 0 0 0 0
53214- 0 0 0 0 0 0 0 0 0 0 0 0
53215- 0 0 0 0 0 0 0 0 0 0 0 0
53216- 0 0 0 0 0 0 0 0 0 6 6 6
53217- 14 14 14 18 18 18 22 22 22 22 22 22
53218- 18 18 18 14 14 14 10 10 10 6 6 6
53219- 0 0 0 0 0 0 0 0 0 0 0 0
53220- 0 0 0 0 0 0 0 0 0 0 0 0
53221- 0 0 0 0 0 0 0 0 0 0 0 0
53222- 0 0 0 0 0 0 0 0 0 0 0 0
53223- 0 0 0 0 0 0 0 0 0 0 0 0
53224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53237+4 4 4 4 4 4
53238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53251+4 4 4 4 4 4
53252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53265+4 4 4 4 4 4
53266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53279+4 4 4 4 4 4
53280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53293+4 4 4 4 4 4
53294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53307+4 4 4 4 4 4
53308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53312+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
53313+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
53314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53317+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
53318+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
53319+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
53320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53321+4 4 4 4 4 4
53322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53326+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
53327+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
53328+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53331+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
53332+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
53333+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
53334+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53335+4 4 4 4 4 4
53336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53340+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
53341+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
53342+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
53343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53345+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
53346+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
53347+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
53348+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
53349+4 4 4 4 4 4
53350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53353+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
53354+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
53355+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
53356+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
53357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53358+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
53359+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
53360+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
53361+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
53362+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
53363+4 4 4 4 4 4
53364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53367+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
53368+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
53369+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
53370+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
53371+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
53372+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
53373+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
53374+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
53375+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
53376+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
53377+4 4 4 4 4 4
53378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
53381+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
53382+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
53383+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
53384+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
53385+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
53386+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
53387+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
53388+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
53389+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
53390+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
53391+4 4 4 4 4 4
53392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53394+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
53395+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
53396+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
53397+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
53398+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
53399+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
53400+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
53401+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
53402+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
53403+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
53404+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
53405+4 4 4 4 4 4
53406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53408+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
53409+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
53410+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
53411+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
53412+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
53413+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
53414+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
53415+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
53416+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
53417+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
53418+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
53419+4 4 4 4 4 4
53420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53422+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
53423+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
53424+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
53425+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
53426+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
53427+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
53428+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
53429+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
53430+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
53431+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
53432+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
53433+4 4 4 4 4 4
53434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53436+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
53437+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
53438+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
53439+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
53440+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
53441+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
53442+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
53443+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
53444+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
53445+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
53446+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
53447+4 4 4 4 4 4
53448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53449+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
53450+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
53451+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
53452+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
53453+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
53454+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
53455+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
53456+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
53457+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
53458+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
53459+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
53460+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
53461+4 4 4 4 4 4
53462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53463+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
53464+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
53465+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
53466+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
53467+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
53468+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
53469+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
53470+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
53471+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
53472+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
53473+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
53474+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
53475+0 0 0 4 4 4
53476+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
53477+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
53478+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
53479+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
53480+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
53481+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
53482+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
53483+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
53484+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
53485+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
53486+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
53487+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
53488+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
53489+2 0 0 0 0 0
53490+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
53491+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
53492+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
53493+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
53494+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
53495+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
53496+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
53497+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
53498+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
53499+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
53500+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
53501+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
53502+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
53503+37 38 37 0 0 0
53504+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
53505+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
53506+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
53507+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
53508+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
53509+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
53510+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
53511+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
53512+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
53513+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
53514+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
53515+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
53516+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
53517+85 115 134 4 0 0
53518+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
53519+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
53520+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
53521+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
53522+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
53523+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
53524+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
53525+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
53526+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
53527+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
53528+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
53529+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
53530+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
53531+60 73 81 4 0 0
53532+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
53533+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
53534+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
53535+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
53536+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
53537+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
53538+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
53539+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
53540+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
53541+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
53542+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
53543+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
53544+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
53545+16 19 21 4 0 0
53546+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
53547+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
53548+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
53549+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
53550+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
53551+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
53552+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
53553+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
53554+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
53555+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
53556+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
53557+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
53558+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
53559+4 0 0 4 3 3
53560+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
53561+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
53562+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
53563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
53564+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
53565+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
53566+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
53567+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
53568+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
53569+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
53570+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
53571+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
53572+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
53573+3 2 2 4 4 4
53574+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
53575+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
53576+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
53577+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
53578+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
53579+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
53580+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
53581+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
53582+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
53583+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
53584+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
53585+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
53586+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
53587+4 4 4 4 4 4
53588+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
53589+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
53590+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
53591+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
53592+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
53593+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
53594+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
53595+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
53596+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
53597+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
53598+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
53599+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
53600+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
53601+4 4 4 4 4 4
53602+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
53603+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
53604+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
53605+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
53606+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
53607+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
53608+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
53609+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
53610+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
53611+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
53612+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
53613+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
53614+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
53615+5 5 5 5 5 5
53616+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
53617+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
53618+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
53619+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
53620+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
53621+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53622+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
53623+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
53624+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
53625+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
53626+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
53627+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
53628+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
53629+5 5 5 4 4 4
53630+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
53631+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
53632+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
53633+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
53634+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
53635+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
53636+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
53637+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
53638+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
53639+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
53640+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
53641+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
53642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53643+4 4 4 4 4 4
53644+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
53645+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
53646+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
53647+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
53648+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
53649+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53650+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53651+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
53652+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
53653+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
53654+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
53655+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
53656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53657+4 4 4 4 4 4
53658+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
53659+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
53660+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
53661+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
53662+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
53663+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
53664+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
53665+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
53666+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
53667+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
53668+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
53669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53671+4 4 4 4 4 4
53672+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
53673+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
53674+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
53675+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
53676+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
53677+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53678+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53679+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
53680+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
53681+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
53682+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
53683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53685+4 4 4 4 4 4
53686+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
53687+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
53688+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
53689+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
53690+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
53691+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
53692+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
53693+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
53694+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
53695+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
53696+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53699+4 4 4 4 4 4
53700+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
53701+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
53702+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
53703+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
53704+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
53705+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
53706+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
53707+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
53708+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
53709+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
53710+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
53711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53713+4 4 4 4 4 4
53714+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
53715+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
53716+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
53717+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
53718+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
53719+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
53720+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
53721+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
53722+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
53723+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
53724+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
53725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53727+4 4 4 4 4 4
53728+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
53729+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
53730+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
53731+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
53732+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
53733+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
53734+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
53735+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
53736+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
53737+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
53738+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53741+4 4 4 4 4 4
53742+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
53743+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
53744+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
53745+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
53746+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53747+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
53748+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
53749+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
53750+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
53751+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
53752+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53755+4 4 4 4 4 4
53756+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
53757+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
53758+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
53759+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
53760+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53761+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
53762+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
53763+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
53764+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
53765+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
53766+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53769+4 4 4 4 4 4
53770+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
53771+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
53772+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
53773+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
53774+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53775+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
53776+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
53777+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
53778+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
53779+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53780+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53783+4 4 4 4 4 4
53784+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
53785+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
53786+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
53787+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
53788+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
53789+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
53790+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
53791+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
53792+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53793+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53794+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53797+4 4 4 4 4 4
53798+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
53799+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
53800+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
53801+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
53802+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53803+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
53804+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
53805+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
53806+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
53807+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53808+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53811+4 4 4 4 4 4
53812+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
53813+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
53814+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
53815+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
53816+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
53817+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
53818+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
53819+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
53820+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53821+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53822+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53825+4 4 4 4 4 4
53826+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
53827+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
53828+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
53829+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
53830+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
53831+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
53832+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
53833+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
53834+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
53835+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53836+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53839+4 4 4 4 4 4
53840+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
53841+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
53842+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
53843+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
53844+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
53845+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
53846+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
53847+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
53848+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53849+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53850+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53853+4 4 4 4 4 4
53854+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
53855+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
53856+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
53857+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
53858+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
53859+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
53860+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
53861+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
53862+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
53863+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53864+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53867+4 4 4 4 4 4
53868+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
53869+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
53870+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
53871+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
53872+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
53873+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
53874+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
53875+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
53876+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53877+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53878+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53881+4 4 4 4 4 4
53882+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53883+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
53884+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
53885+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
53886+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
53887+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
53888+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
53889+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
53890+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
53891+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53892+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53895+4 4 4 4 4 4
53896+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
53897+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
53898+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
53899+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
53900+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
53901+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
53902+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53903+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
53904+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53905+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53906+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53909+4 4 4 4 4 4
53910+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53911+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
53912+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
53913+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
53914+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
53915+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
53916+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53917+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
53918+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
53919+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53920+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53923+4 4 4 4 4 4
53924+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
53925+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
53926+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
53927+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
53928+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
53929+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
53930+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
53931+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
53932+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
53933+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53934+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53937+4 4 4 4 4 4
53938+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53939+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
53940+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
53941+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
53942+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
53943+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
53944+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
53945+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
53946+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
53947+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53948+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53951+4 4 4 4 4 4
53952+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
53953+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
53954+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
53955+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
53956+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
53957+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
53958+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
53959+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
53960+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
53961+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53962+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53965+4 4 4 4 4 4
53966+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53967+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
53968+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
53969+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
53970+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
53971+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
53972+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
53973+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
53974+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
53975+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53976+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53979+4 4 4 4 4 4
53980+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
53981+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
53982+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
53983+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
53984+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
53985+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
53986+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
53987+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
53988+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
53989+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
53990+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53993+4 4 4 4 4 4
53994+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
53995+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
53996+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
53997+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
53998+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
53999+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
54000+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
54001+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
54002+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
54003+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
54004+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54007+4 4 4 4 4 4
54008+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
54009+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54010+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
54011+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
54012+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
54013+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
54014+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54015+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
54016+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
54017+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
54018+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54021+4 4 4 4 4 4
54022+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
54023+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
54024+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
54025+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
54026+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
54027+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
54028+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
54029+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
54030+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
54031+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
54032+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54035+4 4 4 4 4 4
54036+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
54037+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
54038+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54039+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
54040+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
54041+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
54042+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
54043+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
54044+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
54045+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
54046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54049+4 4 4 4 4 4
54050+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54051+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
54052+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
54053+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
54054+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
54055+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
54056+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
54057+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
54058+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
54059+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54063+4 4 4 4 4 4
54064+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
54065+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
54066+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
54067+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
54068+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
54069+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
54070+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
54071+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
54072+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
54073+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
54074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54077+4 4 4 4 4 4
54078+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
54079+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
54080+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
54081+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
54082+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
54083+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
54084+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
54085+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
54086+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54087+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54091+4 4 4 4 4 4
54092+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
54093+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54094+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
54095+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54096+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
54097+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
54098+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
54099+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
54100+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
54101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54105+4 4 4 4 4 4
54106+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
54107+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
54108+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
54109+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
54110+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
54111+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
54112+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
54113+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
54114+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
54115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54119+4 4 4 4 4 4
54120+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54121+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
54122+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
54123+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
54124+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
54125+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
54126+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
54127+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
54128+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54133+4 4 4 4 4 4
54134+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
54135+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
54136+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54137+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
54138+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
54139+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
54140+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
54141+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
54142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54147+4 4 4 4 4 4
54148+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
54149+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
54150+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
54151+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
54152+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
54153+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
54154+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
54155+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54161+4 4 4 4 4 4
54162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54163+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
54164+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54165+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
54166+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
54167+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
54168+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
54169+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
54170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54175+4 4 4 4 4 4
54176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54177+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
54178+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
54179+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
54180+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
54181+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
54182+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
54183+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
54184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54189+4 4 4 4 4 4
54190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54191+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
54192+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
54193+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54194+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
54195+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
54196+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
54197+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54203+4 4 4 4 4 4
54204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54206+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54207+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
54208+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
54209+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
54210+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
54211+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54217+4 4 4 4 4 4
54218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54221+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54222+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
54223+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
54224+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
54225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54231+4 4 4 4 4 4
54232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54235+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54236+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54237+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
54238+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
54239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54245+4 4 4 4 4 4
54246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54249+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54250+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54251+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54252+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
54253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54259+4 4 4 4 4 4
54260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54263+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
54264+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
54265+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
54266+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
54267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54273+4 4 4 4 4 4
54274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54278+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
54279+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54280+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54287+4 4 4 4 4 4
54288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54292+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
54293+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
54294+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
54295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54301+4 4 4 4 4 4
54302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54306+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
54307+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
54308+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54315+4 4 4 4 4 4
54316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54320+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
54321+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
54322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54329+4 4 4 4 4 4
54330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54334+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
54335+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
54336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54343+4 4 4 4 4 4
54344diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
54345index a01147f..5d896f8 100644
54346--- a/drivers/video/matrox/matroxfb_DAC1064.c
54347+++ b/drivers/video/matrox/matroxfb_DAC1064.c
54348@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54349
54350 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54351 struct matrox_switch matrox_mystique = {
54352- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54353+ .preinit = MGA1064_preinit,
54354+ .reset = MGA1064_reset,
54355+ .init = MGA1064_init,
54356+ .restore = MGA1064_restore,
54357 };
54358 EXPORT_SYMBOL(matrox_mystique);
54359 #endif
54360
54361 #ifdef CONFIG_FB_MATROX_G
54362 struct matrox_switch matrox_G100 = {
54363- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54364+ .preinit = MGAG100_preinit,
54365+ .reset = MGAG100_reset,
54366+ .init = MGAG100_init,
54367+ .restore = MGAG100_restore,
54368 };
54369 EXPORT_SYMBOL(matrox_G100);
54370 #endif
54371diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
54372index 195ad7c..09743fc 100644
54373--- a/drivers/video/matrox/matroxfb_Ti3026.c
54374+++ b/drivers/video/matrox/matroxfb_Ti3026.c
54375@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54376 }
54377
54378 struct matrox_switch matrox_millennium = {
54379- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54380+ .preinit = Ti3026_preinit,
54381+ .reset = Ti3026_reset,
54382+ .init = Ti3026_init,
54383+ .restore = Ti3026_restore
54384 };
54385 EXPORT_SYMBOL(matrox_millennium);
54386 #endif
54387diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
54388index fe92eed..106e085 100644
54389--- a/drivers/video/mb862xx/mb862xxfb_accel.c
54390+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
54391@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54392 struct mb862xxfb_par *par = info->par;
54393
54394 if (info->var.bits_per_pixel == 32) {
54395- info->fbops->fb_fillrect = cfb_fillrect;
54396- info->fbops->fb_copyarea = cfb_copyarea;
54397- info->fbops->fb_imageblit = cfb_imageblit;
54398+ pax_open_kernel();
54399+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54400+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54401+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54402+ pax_close_kernel();
54403 } else {
54404 outreg(disp, GC_L0EM, 3);
54405- info->fbops->fb_fillrect = mb86290fb_fillrect;
54406- info->fbops->fb_copyarea = mb86290fb_copyarea;
54407- info->fbops->fb_imageblit = mb86290fb_imageblit;
54408+ pax_open_kernel();
54409+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54410+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54411+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54412+ pax_close_kernel();
54413 }
54414 outreg(draw, GDC_REG_DRAW_BASE, 0);
54415 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54416diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
54417index ff22871..b129bed 100644
54418--- a/drivers/video/nvidia/nvidia.c
54419+++ b/drivers/video/nvidia/nvidia.c
54420@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54421 info->fix.line_length = (info->var.xres_virtual *
54422 info->var.bits_per_pixel) >> 3;
54423 if (info->var.accel_flags) {
54424- info->fbops->fb_imageblit = nvidiafb_imageblit;
54425- info->fbops->fb_fillrect = nvidiafb_fillrect;
54426- info->fbops->fb_copyarea = nvidiafb_copyarea;
54427- info->fbops->fb_sync = nvidiafb_sync;
54428+ pax_open_kernel();
54429+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54430+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54431+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54432+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54433+ pax_close_kernel();
54434 info->pixmap.scan_align = 4;
54435 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54436 info->flags |= FBINFO_READS_FAST;
54437 NVResetGraphics(info);
54438 } else {
54439- info->fbops->fb_imageblit = cfb_imageblit;
54440- info->fbops->fb_fillrect = cfb_fillrect;
54441- info->fbops->fb_copyarea = cfb_copyarea;
54442- info->fbops->fb_sync = NULL;
54443+ pax_open_kernel();
54444+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54445+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54446+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54447+ *(void **)&info->fbops->fb_sync = NULL;
54448+ pax_close_kernel();
54449 info->pixmap.scan_align = 1;
54450 info->flags |= FBINFO_HWACCEL_DISABLED;
54451 info->flags &= ~FBINFO_READS_FAST;
54452@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54453 info->pixmap.size = 8 * 1024;
54454 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54455
54456- if (!hwcur)
54457- info->fbops->fb_cursor = NULL;
54458+ if (!hwcur) {
54459+ pax_open_kernel();
54460+ *(void **)&info->fbops->fb_cursor = NULL;
54461+ pax_close_kernel();
54462+ }
54463
54464 info->var.accel_flags = (!noaccel);
54465
54466diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
54467index 669a81f..e216d76 100644
54468--- a/drivers/video/omap2/dss/display.c
54469+++ b/drivers/video/omap2/dss/display.c
54470@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54471 snprintf(dssdev->alias, sizeof(dssdev->alias),
54472 "display%d", disp_num_counter++);
54473
54474+ pax_open_kernel();
54475 if (drv && drv->get_resolution == NULL)
54476- drv->get_resolution = omapdss_default_get_resolution;
54477+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54478 if (drv && drv->get_recommended_bpp == NULL)
54479- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54480+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54481 if (drv && drv->get_timings == NULL)
54482- drv->get_timings = omapdss_default_get_timings;
54483+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54484+ pax_close_kernel();
54485
54486 mutex_lock(&panel_list_mutex);
54487 list_add_tail(&dssdev->panel_list, &panel_list);
54488diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
54489index 83433cb..71e9b98 100644
54490--- a/drivers/video/s1d13xxxfb.c
54491+++ b/drivers/video/s1d13xxxfb.c
54492@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54493
54494 switch(prod_id) {
54495 case S1D13506_PROD_ID: /* activate acceleration */
54496- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54497- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54498+ pax_open_kernel();
54499+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54500+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54501+ pax_close_kernel();
54502 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54503 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54504 break;
54505diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
54506index d513ed6..90b0de9 100644
54507--- a/drivers/video/smscufx.c
54508+++ b/drivers/video/smscufx.c
54509@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54510 fb_deferred_io_cleanup(info);
54511 kfree(info->fbdefio);
54512 info->fbdefio = NULL;
54513- info->fbops->fb_mmap = ufx_ops_mmap;
54514+ pax_open_kernel();
54515+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54516+ pax_close_kernel();
54517 }
54518
54519 pr_debug("released /dev/fb%d user=%d count=%d",
54520diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
54521index 025f14e..20eb4db 100644
54522--- a/drivers/video/udlfb.c
54523+++ b/drivers/video/udlfb.c
54524@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54525 dlfb_urb_completion(urb);
54526
54527 error:
54528- atomic_add(bytes_sent, &dev->bytes_sent);
54529- atomic_add(bytes_identical, &dev->bytes_identical);
54530- atomic_add(width*height*2, &dev->bytes_rendered);
54531+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54532+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54533+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54534 end_cycles = get_cycles();
54535- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54536+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54537 >> 10)), /* Kcycles */
54538 &dev->cpu_kcycles_used);
54539
54540@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54541 dlfb_urb_completion(urb);
54542
54543 error:
54544- atomic_add(bytes_sent, &dev->bytes_sent);
54545- atomic_add(bytes_identical, &dev->bytes_identical);
54546- atomic_add(bytes_rendered, &dev->bytes_rendered);
54547+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54548+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54549+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54550 end_cycles = get_cycles();
54551- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54552+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54553 >> 10)), /* Kcycles */
54554 &dev->cpu_kcycles_used);
54555 }
54556@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54557 fb_deferred_io_cleanup(info);
54558 kfree(info->fbdefio);
54559 info->fbdefio = NULL;
54560- info->fbops->fb_mmap = dlfb_ops_mmap;
54561+ pax_open_kernel();
54562+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54563+ pax_close_kernel();
54564 }
54565
54566 pr_warn("released /dev/fb%d user=%d count=%d\n",
54567@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54568 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54569 struct dlfb_data *dev = fb_info->par;
54570 return snprintf(buf, PAGE_SIZE, "%u\n",
54571- atomic_read(&dev->bytes_rendered));
54572+ atomic_read_unchecked(&dev->bytes_rendered));
54573 }
54574
54575 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54576@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54577 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54578 struct dlfb_data *dev = fb_info->par;
54579 return snprintf(buf, PAGE_SIZE, "%u\n",
54580- atomic_read(&dev->bytes_identical));
54581+ atomic_read_unchecked(&dev->bytes_identical));
54582 }
54583
54584 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54585@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54586 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54587 struct dlfb_data *dev = fb_info->par;
54588 return snprintf(buf, PAGE_SIZE, "%u\n",
54589- atomic_read(&dev->bytes_sent));
54590+ atomic_read_unchecked(&dev->bytes_sent));
54591 }
54592
54593 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54594@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54595 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54596 struct dlfb_data *dev = fb_info->par;
54597 return snprintf(buf, PAGE_SIZE, "%u\n",
54598- atomic_read(&dev->cpu_kcycles_used));
54599+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54600 }
54601
54602 static ssize_t edid_show(
54603@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
54604 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54605 struct dlfb_data *dev = fb_info->par;
54606
54607- atomic_set(&dev->bytes_rendered, 0);
54608- atomic_set(&dev->bytes_identical, 0);
54609- atomic_set(&dev->bytes_sent, 0);
54610- atomic_set(&dev->cpu_kcycles_used, 0);
54611+ atomic_set_unchecked(&dev->bytes_rendered, 0);
54612+ atomic_set_unchecked(&dev->bytes_identical, 0);
54613+ atomic_set_unchecked(&dev->bytes_sent, 0);
54614+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
54615
54616 return count;
54617 }
54618diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
54619index 256fba7..6e75516 100644
54620--- a/drivers/video/uvesafb.c
54621+++ b/drivers/video/uvesafb.c
54622@@ -19,6 +19,7 @@
54623 #include <linux/io.h>
54624 #include <linux/mutex.h>
54625 #include <linux/slab.h>
54626+#include <linux/moduleloader.h>
54627 #include <video/edid.h>
54628 #include <video/uvesafb.h>
54629 #ifdef CONFIG_X86
54630@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
54631 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
54632 par->pmi_setpal = par->ypan = 0;
54633 } else {
54634+
54635+#ifdef CONFIG_PAX_KERNEXEC
54636+#ifdef CONFIG_MODULES
54637+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
54638+#endif
54639+ if (!par->pmi_code) {
54640+ par->pmi_setpal = par->ypan = 0;
54641+ return 0;
54642+ }
54643+#endif
54644+
54645 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
54646 + task->t.regs.edi);
54647+
54648+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54649+ pax_open_kernel();
54650+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
54651+ pax_close_kernel();
54652+
54653+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
54654+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
54655+#else
54656 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
54657 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
54658+#endif
54659+
54660 printk(KERN_INFO "uvesafb: protected mode interface info at "
54661 "%04x:%04x\n",
54662 (u16)task->t.regs.es, (u16)task->t.regs.edi);
54663@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
54664 par->ypan = ypan;
54665
54666 if (par->pmi_setpal || par->ypan) {
54667+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
54668 if (__supported_pte_mask & _PAGE_NX) {
54669 par->pmi_setpal = par->ypan = 0;
54670 printk(KERN_WARNING "uvesafb: NX protection is active, "
54671 "better not use the PMI.\n");
54672- } else {
54673+ } else
54674+#endif
54675 uvesafb_vbe_getpmi(task, par);
54676- }
54677 }
54678 #else
54679 /* The protected mode interface is not available on non-x86. */
54680@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
54681 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
54682
54683 /* Disable blanking if the user requested so. */
54684- if (!blank)
54685- info->fbops->fb_blank = NULL;
54686+ if (!blank) {
54687+ pax_open_kernel();
54688+ *(void **)&info->fbops->fb_blank = NULL;
54689+ pax_close_kernel();
54690+ }
54691
54692 /*
54693 * Find out how much IO memory is required for the mode with
54694@@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
54695 info->flags = FBINFO_FLAG_DEFAULT |
54696 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
54697
54698- if (!par->ypan)
54699- info->fbops->fb_pan_display = NULL;
54700+ if (!par->ypan) {
54701+ pax_open_kernel();
54702+ *(void **)&info->fbops->fb_pan_display = NULL;
54703+ pax_close_kernel();
54704+ }
54705 }
54706
54707 static void uvesafb_init_mtrr(struct fb_info *info)
54708@@ -1792,6 +1822,11 @@ out_mode:
54709 out:
54710 kfree(par->vbe_modes);
54711
54712+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54713+ if (par->pmi_code)
54714+ module_free_exec(NULL, par->pmi_code);
54715+#endif
54716+
54717 framebuffer_release(info);
54718 return err;
54719 }
54720@@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platform_device *dev)
54721 kfree(par->vbe_modes);
54722 kfree(par->vbe_state_orig);
54723 kfree(par->vbe_state_saved);
54724+
54725+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54726+ if (par->pmi_code)
54727+ module_free_exec(NULL, par->pmi_code);
54728+#endif
54729+
54730 }
54731
54732 framebuffer_release(info);
54733diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
54734index 1c7da3b..56ea0bd 100644
54735--- a/drivers/video/vesafb.c
54736+++ b/drivers/video/vesafb.c
54737@@ -9,6 +9,7 @@
54738 */
54739
54740 #include <linux/module.h>
54741+#include <linux/moduleloader.h>
54742 #include <linux/kernel.h>
54743 #include <linux/errno.h>
54744 #include <linux/string.h>
54745@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
54746 static int vram_total; /* Set total amount of memory */
54747 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
54748 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
54749-static void (*pmi_start)(void) __read_mostly;
54750-static void (*pmi_pal) (void) __read_mostly;
54751+static void (*pmi_start)(void) __read_only;
54752+static void (*pmi_pal) (void) __read_only;
54753 static int depth __read_mostly;
54754 static int vga_compat __read_mostly;
54755 /* --------------------------------------------------------------------- */
54756@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
54757 unsigned int size_remap;
54758 unsigned int size_total;
54759 char *option = NULL;
54760+ void *pmi_code = NULL;
54761
54762 /* ignore error return of fb_get_options */
54763 fb_get_options("vesafb", &option);
54764@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
54765 size_remap = size_total;
54766 vesafb_fix.smem_len = size_remap;
54767
54768-#ifndef __i386__
54769- screen_info.vesapm_seg = 0;
54770-#endif
54771-
54772 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
54773 printk(KERN_WARNING
54774 "vesafb: cannot reserve video memory at 0x%lx\n",
54775@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
54776 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
54777 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
54778
54779+#ifdef __i386__
54780+
54781+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54782+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
54783+ if (!pmi_code)
54784+#elif !defined(CONFIG_PAX_KERNEXEC)
54785+ if (0)
54786+#endif
54787+
54788+#endif
54789+ screen_info.vesapm_seg = 0;
54790+
54791 if (screen_info.vesapm_seg) {
54792- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
54793- screen_info.vesapm_seg,screen_info.vesapm_off);
54794+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
54795+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
54796 }
54797
54798 if (screen_info.vesapm_seg < 0xc000)
54799@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
54800
54801 if (ypan || pmi_setpal) {
54802 unsigned short *pmi_base;
54803+
54804 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
54805- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
54806- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
54807+
54808+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54809+ pax_open_kernel();
54810+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
54811+#else
54812+ pmi_code = pmi_base;
54813+#endif
54814+
54815+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
54816+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
54817+
54818+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54819+ pmi_start = ktva_ktla(pmi_start);
54820+ pmi_pal = ktva_ktla(pmi_pal);
54821+ pax_close_kernel();
54822+#endif
54823+
54824 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
54825 if (pmi_base[3]) {
54826 printk(KERN_INFO "vesafb: pmi: ports = ");
54827@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
54828 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
54829 (ypan ? FBINFO_HWACCEL_YPAN : 0);
54830
54831- if (!ypan)
54832- info->fbops->fb_pan_display = NULL;
54833+ if (!ypan) {
54834+ pax_open_kernel();
54835+ *(void **)&info->fbops->fb_pan_display = NULL;
54836+ pax_close_kernel();
54837+ }
54838
54839 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
54840 err = -ENOMEM;
54841@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
54842 fb_info(info, "%s frame buffer device\n", info->fix.id);
54843 return 0;
54844 err:
54845+
54846+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54847+ module_free_exec(NULL, pmi_code);
54848+#endif
54849+
54850 if (info->screen_base)
54851 iounmap(info->screen_base);
54852 framebuffer_release(info);
54853diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
54854index 88714ae..16c2e11 100644
54855--- a/drivers/video/via/via_clock.h
54856+++ b/drivers/video/via/via_clock.h
54857@@ -56,7 +56,7 @@ struct via_clock {
54858
54859 void (*set_engine_pll_state)(u8 state);
54860 void (*set_engine_pll)(struct via_pll_config config);
54861-};
54862+} __no_const;
54863
54864
54865 static inline u32 get_pll_internal_frequency(u32 ref_freq,
54866diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
54867index fef20db..d28b1ab 100644
54868--- a/drivers/xen/xenfs/xenstored.c
54869+++ b/drivers/xen/xenfs/xenstored.c
54870@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
54871 static int xsd_kva_open(struct inode *inode, struct file *file)
54872 {
54873 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
54874+#ifdef CONFIG_GRKERNSEC_HIDESYM
54875+ NULL);
54876+#else
54877 xen_store_interface);
54878+#endif
54879+
54880 if (!file->private_data)
54881 return -ENOMEM;
54882 return 0;
54883diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
54884index 9ff073f..05cef23 100644
54885--- a/fs/9p/vfs_addr.c
54886+++ b/fs/9p/vfs_addr.c
54887@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
54888
54889 retval = v9fs_file_write_internal(inode,
54890 v9inode->writeback_fid,
54891- (__force const char __user *)buffer,
54892+ (const char __force_user *)buffer,
54893 len, &offset, 0);
54894 if (retval > 0)
54895 retval = 0;
54896diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
54897index 4e65aa9..043dc9a 100644
54898--- a/fs/9p/vfs_inode.c
54899+++ b/fs/9p/vfs_inode.c
54900@@ -1306,7 +1306,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54901 void
54902 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54903 {
54904- char *s = nd_get_link(nd);
54905+ const char *s = nd_get_link(nd);
54906
54907 p9_debug(P9_DEBUG_VFS, " %s %s\n",
54908 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
54909diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
54910index 370b24c..ff0be7b 100644
54911--- a/fs/Kconfig.binfmt
54912+++ b/fs/Kconfig.binfmt
54913@@ -103,7 +103,7 @@ config HAVE_AOUT
54914
54915 config BINFMT_AOUT
54916 tristate "Kernel support for a.out and ECOFF binaries"
54917- depends on HAVE_AOUT
54918+ depends on HAVE_AOUT && BROKEN
54919 ---help---
54920 A.out (Assembler.OUTput) is a set of formats for libraries and
54921 executables used in the earliest versions of UNIX. Linux used
54922diff --git a/fs/afs/inode.c b/fs/afs/inode.c
54923index ce25d75..dc09eeb 100644
54924--- a/fs/afs/inode.c
54925+++ b/fs/afs/inode.c
54926@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
54927 struct afs_vnode *vnode;
54928 struct super_block *sb;
54929 struct inode *inode;
54930- static atomic_t afs_autocell_ino;
54931+ static atomic_unchecked_t afs_autocell_ino;
54932
54933 _enter("{%x:%u},%*.*s,",
54934 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
54935@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
54936 data.fid.unique = 0;
54937 data.fid.vnode = 0;
54938
54939- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
54940+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
54941 afs_iget5_autocell_test, afs_iget5_set,
54942 &data);
54943 if (!inode) {
54944diff --git a/fs/aio.c b/fs/aio.c
54945index 062a5f6..e5618e0 100644
54946--- a/fs/aio.c
54947+++ b/fs/aio.c
54948@@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx *ctx)
54949 size += sizeof(struct io_event) * nr_events;
54950
54951 nr_pages = PFN_UP(size);
54952- if (nr_pages < 0)
54953+ if (nr_pages <= 0)
54954 return -EINVAL;
54955
54956 file = aio_private_file(ctx, nr_pages);
54957diff --git a/fs/attr.c b/fs/attr.c
54958index 267968d..5dd8f96 100644
54959--- a/fs/attr.c
54960+++ b/fs/attr.c
54961@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
54962 unsigned long limit;
54963
54964 limit = rlimit(RLIMIT_FSIZE);
54965+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
54966 if (limit != RLIM_INFINITY && offset > limit)
54967 goto out_sig;
54968 if (offset > inode->i_sb->s_maxbytes)
54969diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
54970index 689e40d..515cac5 100644
54971--- a/fs/autofs4/waitq.c
54972+++ b/fs/autofs4/waitq.c
54973@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
54974 {
54975 unsigned long sigpipe, flags;
54976 mm_segment_t fs;
54977- const char *data = (const char *)addr;
54978+ const char __user *data = (const char __force_user *)addr;
54979 ssize_t wr = 0;
54980
54981 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
54982@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
54983 return 1;
54984 }
54985
54986+#ifdef CONFIG_GRKERNSEC_HIDESYM
54987+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
54988+#endif
54989+
54990 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
54991 enum autofs_notify notify)
54992 {
54993@@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
54994
54995 /* If this is a direct mount request create a dummy name */
54996 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
54997+#ifdef CONFIG_GRKERNSEC_HIDESYM
54998+ /* this name does get written to userland via autofs4_write() */
54999+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
55000+#else
55001 qstr.len = sprintf(name, "%p", dentry);
55002+#endif
55003 else {
55004 qstr.len = autofs4_getpath(sbi, dentry, &name);
55005 if (!qstr.len) {
55006diff --git a/fs/befs/endian.h b/fs/befs/endian.h
55007index 2722387..56059b5 100644
55008--- a/fs/befs/endian.h
55009+++ b/fs/befs/endian.h
55010@@ -11,7 +11,7 @@
55011
55012 #include <asm/byteorder.h>
55013
55014-static inline u64
55015+static inline u64 __intentional_overflow(-1)
55016 fs64_to_cpu(const struct super_block *sb, fs64 n)
55017 {
55018 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55019@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
55020 return (__force fs64)cpu_to_be64(n);
55021 }
55022
55023-static inline u32
55024+static inline u32 __intentional_overflow(-1)
55025 fs32_to_cpu(const struct super_block *sb, fs32 n)
55026 {
55027 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55028@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
55029 return (__force fs32)cpu_to_be32(n);
55030 }
55031
55032-static inline u16
55033+static inline u16 __intentional_overflow(-1)
55034 fs16_to_cpu(const struct super_block *sb, fs16 n)
55035 {
55036 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55037diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
55038index ca0ba15..0fa3257 100644
55039--- a/fs/binfmt_aout.c
55040+++ b/fs/binfmt_aout.c
55041@@ -16,6 +16,7 @@
55042 #include <linux/string.h>
55043 #include <linux/fs.h>
55044 #include <linux/file.h>
55045+#include <linux/security.h>
55046 #include <linux/stat.h>
55047 #include <linux/fcntl.h>
55048 #include <linux/ptrace.h>
55049@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
55050 #endif
55051 # define START_STACK(u) ((void __user *)u.start_stack)
55052
55053+ memset(&dump, 0, sizeof(dump));
55054+
55055 fs = get_fs();
55056 set_fs(KERNEL_DS);
55057 has_dumped = 1;
55058@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
55059
55060 /* If the size of the dump file exceeds the rlimit, then see what would happen
55061 if we wrote the stack, but not the data area. */
55062+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
55063 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
55064 dump.u_dsize = 0;
55065
55066 /* Make sure we have enough room to write the stack and data areas. */
55067+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
55068 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
55069 dump.u_ssize = 0;
55070
55071@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
55072 rlim = rlimit(RLIMIT_DATA);
55073 if (rlim >= RLIM_INFINITY)
55074 rlim = ~0;
55075+
55076+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
55077 if (ex.a_data + ex.a_bss > rlim)
55078 return -ENOMEM;
55079
55080@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
55081
55082 install_exec_creds(bprm);
55083
55084+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55085+ current->mm->pax_flags = 0UL;
55086+#endif
55087+
55088+#ifdef CONFIG_PAX_PAGEEXEC
55089+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
55090+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
55091+
55092+#ifdef CONFIG_PAX_EMUTRAMP
55093+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
55094+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
55095+#endif
55096+
55097+#ifdef CONFIG_PAX_MPROTECT
55098+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
55099+ current->mm->pax_flags |= MF_PAX_MPROTECT;
55100+#endif
55101+
55102+ }
55103+#endif
55104+
55105 if (N_MAGIC(ex) == OMAGIC) {
55106 unsigned long text_addr, map_size;
55107 loff_t pos;
55108@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
55109 }
55110
55111 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
55112- PROT_READ | PROT_WRITE | PROT_EXEC,
55113+ PROT_READ | PROT_WRITE,
55114 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
55115 fd_offset + ex.a_text);
55116 if (error != N_DATADDR(ex)) {
55117diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
55118index 571a423..dbb9c6c 100644
55119--- a/fs/binfmt_elf.c
55120+++ b/fs/binfmt_elf.c
55121@@ -34,6 +34,7 @@
55122 #include <linux/utsname.h>
55123 #include <linux/coredump.h>
55124 #include <linux/sched.h>
55125+#include <linux/xattr.h>
55126 #include <asm/uaccess.h>
55127 #include <asm/param.h>
55128 #include <asm/page.h>
55129@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
55130 #define elf_core_dump NULL
55131 #endif
55132
55133+#ifdef CONFIG_PAX_MPROTECT
55134+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
55135+#endif
55136+
55137+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55138+static void elf_handle_mmap(struct file *file);
55139+#endif
55140+
55141 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
55142 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
55143 #else
55144@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
55145 .load_binary = load_elf_binary,
55146 .load_shlib = load_elf_library,
55147 .core_dump = elf_core_dump,
55148+
55149+#ifdef CONFIG_PAX_MPROTECT
55150+ .handle_mprotect= elf_handle_mprotect,
55151+#endif
55152+
55153+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55154+ .handle_mmap = elf_handle_mmap,
55155+#endif
55156+
55157 .min_coredump = ELF_EXEC_PAGESIZE,
55158 };
55159
55160@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
55161
55162 static int set_brk(unsigned long start, unsigned long end)
55163 {
55164+ unsigned long e = end;
55165+
55166 start = ELF_PAGEALIGN(start);
55167 end = ELF_PAGEALIGN(end);
55168 if (end > start) {
55169@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
55170 if (BAD_ADDR(addr))
55171 return addr;
55172 }
55173- current->mm->start_brk = current->mm->brk = end;
55174+ current->mm->start_brk = current->mm->brk = e;
55175 return 0;
55176 }
55177
55178@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55179 elf_addr_t __user *u_rand_bytes;
55180 const char *k_platform = ELF_PLATFORM;
55181 const char *k_base_platform = ELF_BASE_PLATFORM;
55182- unsigned char k_rand_bytes[16];
55183+ u32 k_rand_bytes[4];
55184 int items;
55185 elf_addr_t *elf_info;
55186 int ei_index = 0;
55187 const struct cred *cred = current_cred();
55188 struct vm_area_struct *vma;
55189+ unsigned long saved_auxv[AT_VECTOR_SIZE];
55190
55191 /*
55192 * In some cases (e.g. Hyper-Threading), we want to avoid L1
55193@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55194 * Generate 16 random bytes for userspace PRNG seeding.
55195 */
55196 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
55197- u_rand_bytes = (elf_addr_t __user *)
55198- STACK_ALLOC(p, sizeof(k_rand_bytes));
55199+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
55200+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
55201+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
55202+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
55203+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
55204+ u_rand_bytes = (elf_addr_t __user *) p;
55205 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
55206 return -EFAULT;
55207
55208@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55209 return -EFAULT;
55210 current->mm->env_end = p;
55211
55212+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
55213+
55214 /* Put the elf_info on the stack in the right place. */
55215 sp = (elf_addr_t __user *)envp + 1;
55216- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
55217+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
55218 return -EFAULT;
55219 return 0;
55220 }
55221@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
55222 an ELF header */
55223
55224 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55225- struct file *interpreter, unsigned long *interp_map_addr,
55226- unsigned long no_base)
55227+ struct file *interpreter, unsigned long no_base)
55228 {
55229 struct elf_phdr *elf_phdata;
55230 struct elf_phdr *eppnt;
55231- unsigned long load_addr = 0;
55232+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
55233 int load_addr_set = 0;
55234 unsigned long last_bss = 0, elf_bss = 0;
55235- unsigned long error = ~0UL;
55236+ unsigned long error = -EINVAL;
55237 unsigned long total_size;
55238 int retval, i, size;
55239
55240@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55241 goto out_close;
55242 }
55243
55244+#ifdef CONFIG_PAX_SEGMEXEC
55245+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
55246+ pax_task_size = SEGMEXEC_TASK_SIZE;
55247+#endif
55248+
55249 eppnt = elf_phdata;
55250 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
55251 if (eppnt->p_type == PT_LOAD) {
55252@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55253 map_addr = elf_map(interpreter, load_addr + vaddr,
55254 eppnt, elf_prot, elf_type, total_size);
55255 total_size = 0;
55256- if (!*interp_map_addr)
55257- *interp_map_addr = map_addr;
55258 error = map_addr;
55259 if (BAD_ADDR(map_addr))
55260 goto out_close;
55261@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55262 k = load_addr + eppnt->p_vaddr;
55263 if (BAD_ADDR(k) ||
55264 eppnt->p_filesz > eppnt->p_memsz ||
55265- eppnt->p_memsz > TASK_SIZE ||
55266- TASK_SIZE - eppnt->p_memsz < k) {
55267+ eppnt->p_memsz > pax_task_size ||
55268+ pax_task_size - eppnt->p_memsz < k) {
55269 error = -ENOMEM;
55270 goto out_close;
55271 }
55272@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55273 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
55274
55275 /* Map the last of the bss segment */
55276- error = vm_brk(elf_bss, last_bss - elf_bss);
55277- if (BAD_ADDR(error))
55278- goto out_close;
55279+ if (last_bss > elf_bss) {
55280+ error = vm_brk(elf_bss, last_bss - elf_bss);
55281+ if (BAD_ADDR(error))
55282+ goto out_close;
55283+ }
55284 }
55285
55286 error = load_addr;
55287@@ -538,6 +569,336 @@ out:
55288 return error;
55289 }
55290
55291+#ifdef CONFIG_PAX_PT_PAX_FLAGS
55292+#ifdef CONFIG_PAX_SOFTMODE
55293+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
55294+{
55295+ unsigned long pax_flags = 0UL;
55296+
55297+#ifdef CONFIG_PAX_PAGEEXEC
55298+ if (elf_phdata->p_flags & PF_PAGEEXEC)
55299+ pax_flags |= MF_PAX_PAGEEXEC;
55300+#endif
55301+
55302+#ifdef CONFIG_PAX_SEGMEXEC
55303+ if (elf_phdata->p_flags & PF_SEGMEXEC)
55304+ pax_flags |= MF_PAX_SEGMEXEC;
55305+#endif
55306+
55307+#ifdef CONFIG_PAX_EMUTRAMP
55308+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
55309+ pax_flags |= MF_PAX_EMUTRAMP;
55310+#endif
55311+
55312+#ifdef CONFIG_PAX_MPROTECT
55313+ if (elf_phdata->p_flags & PF_MPROTECT)
55314+ pax_flags |= MF_PAX_MPROTECT;
55315+#endif
55316+
55317+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55318+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
55319+ pax_flags |= MF_PAX_RANDMMAP;
55320+#endif
55321+
55322+ return pax_flags;
55323+}
55324+#endif
55325+
55326+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
55327+{
55328+ unsigned long pax_flags = 0UL;
55329+
55330+#ifdef CONFIG_PAX_PAGEEXEC
55331+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
55332+ pax_flags |= MF_PAX_PAGEEXEC;
55333+#endif
55334+
55335+#ifdef CONFIG_PAX_SEGMEXEC
55336+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
55337+ pax_flags |= MF_PAX_SEGMEXEC;
55338+#endif
55339+
55340+#ifdef CONFIG_PAX_EMUTRAMP
55341+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
55342+ pax_flags |= MF_PAX_EMUTRAMP;
55343+#endif
55344+
55345+#ifdef CONFIG_PAX_MPROTECT
55346+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
55347+ pax_flags |= MF_PAX_MPROTECT;
55348+#endif
55349+
55350+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55351+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
55352+ pax_flags |= MF_PAX_RANDMMAP;
55353+#endif
55354+
55355+ return pax_flags;
55356+}
55357+#endif
55358+
55359+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
55360+#ifdef CONFIG_PAX_SOFTMODE
55361+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
55362+{
55363+ unsigned long pax_flags = 0UL;
55364+
55365+#ifdef CONFIG_PAX_PAGEEXEC
55366+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
55367+ pax_flags |= MF_PAX_PAGEEXEC;
55368+#endif
55369+
55370+#ifdef CONFIG_PAX_SEGMEXEC
55371+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
55372+ pax_flags |= MF_PAX_SEGMEXEC;
55373+#endif
55374+
55375+#ifdef CONFIG_PAX_EMUTRAMP
55376+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
55377+ pax_flags |= MF_PAX_EMUTRAMP;
55378+#endif
55379+
55380+#ifdef CONFIG_PAX_MPROTECT
55381+ if (pax_flags_softmode & MF_PAX_MPROTECT)
55382+ pax_flags |= MF_PAX_MPROTECT;
55383+#endif
55384+
55385+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55386+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
55387+ pax_flags |= MF_PAX_RANDMMAP;
55388+#endif
55389+
55390+ return pax_flags;
55391+}
55392+#endif
55393+
55394+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
55395+{
55396+ unsigned long pax_flags = 0UL;
55397+
55398+#ifdef CONFIG_PAX_PAGEEXEC
55399+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
55400+ pax_flags |= MF_PAX_PAGEEXEC;
55401+#endif
55402+
55403+#ifdef CONFIG_PAX_SEGMEXEC
55404+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
55405+ pax_flags |= MF_PAX_SEGMEXEC;
55406+#endif
55407+
55408+#ifdef CONFIG_PAX_EMUTRAMP
55409+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
55410+ pax_flags |= MF_PAX_EMUTRAMP;
55411+#endif
55412+
55413+#ifdef CONFIG_PAX_MPROTECT
55414+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
55415+ pax_flags |= MF_PAX_MPROTECT;
55416+#endif
55417+
55418+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55419+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
55420+ pax_flags |= MF_PAX_RANDMMAP;
55421+#endif
55422+
55423+ return pax_flags;
55424+}
55425+#endif
55426+
55427+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55428+static unsigned long pax_parse_defaults(void)
55429+{
55430+ unsigned long pax_flags = 0UL;
55431+
55432+#ifdef CONFIG_PAX_SOFTMODE
55433+ if (pax_softmode)
55434+ return pax_flags;
55435+#endif
55436+
55437+#ifdef CONFIG_PAX_PAGEEXEC
55438+ pax_flags |= MF_PAX_PAGEEXEC;
55439+#endif
55440+
55441+#ifdef CONFIG_PAX_SEGMEXEC
55442+ pax_flags |= MF_PAX_SEGMEXEC;
55443+#endif
55444+
55445+#ifdef CONFIG_PAX_MPROTECT
55446+ pax_flags |= MF_PAX_MPROTECT;
55447+#endif
55448+
55449+#ifdef CONFIG_PAX_RANDMMAP
55450+ if (randomize_va_space)
55451+ pax_flags |= MF_PAX_RANDMMAP;
55452+#endif
55453+
55454+ return pax_flags;
55455+}
55456+
55457+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
55458+{
55459+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
55460+
55461+#ifdef CONFIG_PAX_EI_PAX
55462+
55463+#ifdef CONFIG_PAX_SOFTMODE
55464+ if (pax_softmode)
55465+ return pax_flags;
55466+#endif
55467+
55468+ pax_flags = 0UL;
55469+
55470+#ifdef CONFIG_PAX_PAGEEXEC
55471+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
55472+ pax_flags |= MF_PAX_PAGEEXEC;
55473+#endif
55474+
55475+#ifdef CONFIG_PAX_SEGMEXEC
55476+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
55477+ pax_flags |= MF_PAX_SEGMEXEC;
55478+#endif
55479+
55480+#ifdef CONFIG_PAX_EMUTRAMP
55481+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
55482+ pax_flags |= MF_PAX_EMUTRAMP;
55483+#endif
55484+
55485+#ifdef CONFIG_PAX_MPROTECT
55486+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
55487+ pax_flags |= MF_PAX_MPROTECT;
55488+#endif
55489+
55490+#ifdef CONFIG_PAX_ASLR
55491+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
55492+ pax_flags |= MF_PAX_RANDMMAP;
55493+#endif
55494+
55495+#endif
55496+
55497+ return pax_flags;
55498+
55499+}
55500+
55501+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
55502+{
55503+
55504+#ifdef CONFIG_PAX_PT_PAX_FLAGS
55505+ unsigned long i;
55506+
55507+ for (i = 0UL; i < elf_ex->e_phnum; i++)
55508+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
55509+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
55510+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
55511+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
55512+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
55513+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
55514+ return PAX_PARSE_FLAGS_FALLBACK;
55515+
55516+#ifdef CONFIG_PAX_SOFTMODE
55517+ if (pax_softmode)
55518+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
55519+ else
55520+#endif
55521+
55522+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
55523+ break;
55524+ }
55525+#endif
55526+
55527+ return PAX_PARSE_FLAGS_FALLBACK;
55528+}
55529+
55530+static unsigned long pax_parse_xattr_pax(struct file * const file)
55531+{
55532+
55533+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
55534+ ssize_t xattr_size, i;
55535+ unsigned char xattr_value[sizeof("pemrs") - 1];
55536+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
55537+
55538+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
55539+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
55540+ return PAX_PARSE_FLAGS_FALLBACK;
55541+
55542+ for (i = 0; i < xattr_size; i++)
55543+ switch (xattr_value[i]) {
55544+ default:
55545+ return PAX_PARSE_FLAGS_FALLBACK;
55546+
55547+#define parse_flag(option1, option2, flag) \
55548+ case option1: \
55549+ if (pax_flags_hardmode & MF_PAX_##flag) \
55550+ return PAX_PARSE_FLAGS_FALLBACK;\
55551+ pax_flags_hardmode |= MF_PAX_##flag; \
55552+ break; \
55553+ case option2: \
55554+ if (pax_flags_softmode & MF_PAX_##flag) \
55555+ return PAX_PARSE_FLAGS_FALLBACK;\
55556+ pax_flags_softmode |= MF_PAX_##flag; \
55557+ break;
55558+
55559+ parse_flag('p', 'P', PAGEEXEC);
55560+ parse_flag('e', 'E', EMUTRAMP);
55561+ parse_flag('m', 'M', MPROTECT);
55562+ parse_flag('r', 'R', RANDMMAP);
55563+ parse_flag('s', 'S', SEGMEXEC);
55564+
55565+#undef parse_flag
55566+ }
55567+
55568+ if (pax_flags_hardmode & pax_flags_softmode)
55569+ return PAX_PARSE_FLAGS_FALLBACK;
55570+
55571+#ifdef CONFIG_PAX_SOFTMODE
55572+ if (pax_softmode)
55573+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
55574+ else
55575+#endif
55576+
55577+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
55578+#else
55579+ return PAX_PARSE_FLAGS_FALLBACK;
55580+#endif
55581+
55582+}
55583+
55584+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
55585+{
55586+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
55587+
55588+ pax_flags = pax_parse_defaults();
55589+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
55590+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
55591+ xattr_pax_flags = pax_parse_xattr_pax(file);
55592+
55593+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
55594+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
55595+ pt_pax_flags != xattr_pax_flags)
55596+ return -EINVAL;
55597+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
55598+ pax_flags = xattr_pax_flags;
55599+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
55600+ pax_flags = pt_pax_flags;
55601+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
55602+ pax_flags = ei_pax_flags;
55603+
55604+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
55605+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
55606+ if ((__supported_pte_mask & _PAGE_NX))
55607+ pax_flags &= ~MF_PAX_SEGMEXEC;
55608+ else
55609+ pax_flags &= ~MF_PAX_PAGEEXEC;
55610+ }
55611+#endif
55612+
55613+ if (0 > pax_check_flags(&pax_flags))
55614+ return -EINVAL;
55615+
55616+ current->mm->pax_flags = pax_flags;
55617+ return 0;
55618+}
55619+#endif
55620+
55621 /*
55622 * These are the functions used to load ELF style executables and shared
55623 * libraries. There is no binary dependent code anywhere else.
55624@@ -554,6 +915,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
55625 {
55626 unsigned int random_variable = 0;
55627
55628+#ifdef CONFIG_PAX_RANDUSTACK
55629+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
55630+ return stack_top - current->mm->delta_stack;
55631+#endif
55632+
55633 if ((current->flags & PF_RANDOMIZE) &&
55634 !(current->personality & ADDR_NO_RANDOMIZE)) {
55635 random_variable = get_random_int() & STACK_RND_MASK;
55636@@ -572,7 +938,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
55637 unsigned long load_addr = 0, load_bias = 0;
55638 int load_addr_set = 0;
55639 char * elf_interpreter = NULL;
55640- unsigned long error;
55641+ unsigned long error = 0;
55642 struct elf_phdr *elf_ppnt, *elf_phdata;
55643 unsigned long elf_bss, elf_brk;
55644 int retval, i;
55645@@ -582,12 +948,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
55646 unsigned long start_code, end_code, start_data, end_data;
55647 unsigned long reloc_func_desc __maybe_unused = 0;
55648 int executable_stack = EXSTACK_DEFAULT;
55649- unsigned long def_flags = 0;
55650 struct pt_regs *regs = current_pt_regs();
55651 struct {
55652 struct elfhdr elf_ex;
55653 struct elfhdr interp_elf_ex;
55654 } *loc;
55655+ unsigned long pax_task_size;
55656
55657 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
55658 if (!loc) {
55659@@ -723,11 +1089,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
55660 goto out_free_dentry;
55661
55662 /* OK, This is the point of no return */
55663- current->mm->def_flags = def_flags;
55664+ current->mm->def_flags = 0;
55665
55666 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
55667 may depend on the personality. */
55668 SET_PERSONALITY(loc->elf_ex);
55669+
55670+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55671+ current->mm->pax_flags = 0UL;
55672+#endif
55673+
55674+#ifdef CONFIG_PAX_DLRESOLVE
55675+ current->mm->call_dl_resolve = 0UL;
55676+#endif
55677+
55678+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
55679+ current->mm->call_syscall = 0UL;
55680+#endif
55681+
55682+#ifdef CONFIG_PAX_ASLR
55683+ current->mm->delta_mmap = 0UL;
55684+ current->mm->delta_stack = 0UL;
55685+#endif
55686+
55687+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55688+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
55689+ send_sig(SIGKILL, current, 0);
55690+ goto out_free_dentry;
55691+ }
55692+#endif
55693+
55694+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55695+ pax_set_initial_flags(bprm);
55696+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
55697+ if (pax_set_initial_flags_func)
55698+ (pax_set_initial_flags_func)(bprm);
55699+#endif
55700+
55701+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55702+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
55703+ current->mm->context.user_cs_limit = PAGE_SIZE;
55704+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
55705+ }
55706+#endif
55707+
55708+#ifdef CONFIG_PAX_SEGMEXEC
55709+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
55710+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
55711+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
55712+ pax_task_size = SEGMEXEC_TASK_SIZE;
55713+ current->mm->def_flags |= VM_NOHUGEPAGE;
55714+ } else
55715+#endif
55716+
55717+ pax_task_size = TASK_SIZE;
55718+
55719+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
55720+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
55721+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
55722+ put_cpu();
55723+ }
55724+#endif
55725+
55726+#ifdef CONFIG_PAX_ASLR
55727+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
55728+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
55729+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
55730+ }
55731+#endif
55732+
55733+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
55734+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
55735+ executable_stack = EXSTACK_DISABLE_X;
55736+ current->personality &= ~READ_IMPLIES_EXEC;
55737+ } else
55738+#endif
55739+
55740 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
55741 current->personality |= READ_IMPLIES_EXEC;
55742
55743@@ -817,6 +1254,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
55744 #else
55745 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
55746 #endif
55747+
55748+#ifdef CONFIG_PAX_RANDMMAP
55749+ /* PaX: randomize base address at the default exe base if requested */
55750+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
55751+#ifdef CONFIG_SPARC64
55752+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
55753+#else
55754+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
55755+#endif
55756+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
55757+ elf_flags |= MAP_FIXED;
55758+ }
55759+#endif
55760+
55761 }
55762
55763 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
55764@@ -849,9 +1300,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
55765 * allowed task size. Note that p_filesz must always be
55766 * <= p_memsz so it is only necessary to check p_memsz.
55767 */
55768- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
55769- elf_ppnt->p_memsz > TASK_SIZE ||
55770- TASK_SIZE - elf_ppnt->p_memsz < k) {
55771+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
55772+ elf_ppnt->p_memsz > pax_task_size ||
55773+ pax_task_size - elf_ppnt->p_memsz < k) {
55774 /* set_brk can never work. Avoid overflows. */
55775 send_sig(SIGKILL, current, 0);
55776 retval = -EINVAL;
55777@@ -890,17 +1341,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
55778 goto out_free_dentry;
55779 }
55780 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
55781- send_sig(SIGSEGV, current, 0);
55782- retval = -EFAULT; /* Nobody gets to see this, but.. */
55783- goto out_free_dentry;
55784+ /*
55785+ * This bss-zeroing can fail if the ELF
55786+ * file specifies odd protections. So
55787+ * we don't check the return value
55788+ */
55789 }
55790
55791+#ifdef CONFIG_PAX_RANDMMAP
55792+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
55793+ unsigned long start, size, flags;
55794+ vm_flags_t vm_flags;
55795+
55796+ start = ELF_PAGEALIGN(elf_brk);
55797+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
55798+ flags = MAP_FIXED | MAP_PRIVATE;
55799+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
55800+
55801+ down_write(&current->mm->mmap_sem);
55802+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
55803+ retval = -ENOMEM;
55804+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
55805+// if (current->personality & ADDR_NO_RANDOMIZE)
55806+// vm_flags |= VM_READ | VM_MAYREAD;
55807+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
55808+ retval = IS_ERR_VALUE(start) ? start : 0;
55809+ }
55810+ up_write(&current->mm->mmap_sem);
55811+ if (retval == 0)
55812+ retval = set_brk(start + size, start + size + PAGE_SIZE);
55813+ if (retval < 0) {
55814+ send_sig(SIGKILL, current, 0);
55815+ goto out_free_dentry;
55816+ }
55817+ }
55818+#endif
55819+
55820 if (elf_interpreter) {
55821- unsigned long interp_map_addr = 0;
55822-
55823 elf_entry = load_elf_interp(&loc->interp_elf_ex,
55824 interpreter,
55825- &interp_map_addr,
55826 load_bias);
55827 if (!IS_ERR((void *)elf_entry)) {
55828 /*
55829@@ -1122,7 +1601,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
55830 * Decide what to dump of a segment, part, all or none.
55831 */
55832 static unsigned long vma_dump_size(struct vm_area_struct *vma,
55833- unsigned long mm_flags)
55834+ unsigned long mm_flags, long signr)
55835 {
55836 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
55837
55838@@ -1160,7 +1639,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
55839 if (vma->vm_file == NULL)
55840 return 0;
55841
55842- if (FILTER(MAPPED_PRIVATE))
55843+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
55844 goto whole;
55845
55846 /*
55847@@ -1367,9 +1846,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
55848 {
55849 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
55850 int i = 0;
55851- do
55852+ do {
55853 i += 2;
55854- while (auxv[i - 2] != AT_NULL);
55855+ } while (auxv[i - 2] != AT_NULL);
55856 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
55857 }
55858
55859@@ -1378,7 +1857,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
55860 {
55861 mm_segment_t old_fs = get_fs();
55862 set_fs(KERNEL_DS);
55863- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
55864+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
55865 set_fs(old_fs);
55866 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
55867 }
55868@@ -2002,14 +2481,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
55869 }
55870
55871 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
55872- unsigned long mm_flags)
55873+ struct coredump_params *cprm)
55874 {
55875 struct vm_area_struct *vma;
55876 size_t size = 0;
55877
55878 for (vma = first_vma(current, gate_vma); vma != NULL;
55879 vma = next_vma(vma, gate_vma))
55880- size += vma_dump_size(vma, mm_flags);
55881+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
55882 return size;
55883 }
55884
55885@@ -2100,7 +2579,7 @@ static int elf_core_dump(struct coredump_params *cprm)
55886
55887 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
55888
55889- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
55890+ offset += elf_core_vma_data_size(gate_vma, cprm);
55891 offset += elf_core_extra_data_size();
55892 e_shoff = offset;
55893
55894@@ -2128,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
55895 phdr.p_offset = offset;
55896 phdr.p_vaddr = vma->vm_start;
55897 phdr.p_paddr = 0;
55898- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
55899+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
55900 phdr.p_memsz = vma->vm_end - vma->vm_start;
55901 offset += phdr.p_filesz;
55902 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
55903@@ -2161,7 +2640,7 @@ static int elf_core_dump(struct coredump_params *cprm)
55904 unsigned long addr;
55905 unsigned long end;
55906
55907- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
55908+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
55909
55910 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
55911 struct page *page;
55912@@ -2202,6 +2681,167 @@ out:
55913
55914 #endif /* CONFIG_ELF_CORE */
55915
55916+#ifdef CONFIG_PAX_MPROTECT
55917+/* PaX: non-PIC ELF libraries need relocations on their executable segments
55918+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
55919+ * we'll remove VM_MAYWRITE for good on RELRO segments.
55920+ *
55921+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
55922+ * basis because we want to allow the common case and not the special ones.
55923+ */
55924+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
55925+{
55926+ struct elfhdr elf_h;
55927+ struct elf_phdr elf_p;
55928+ unsigned long i;
55929+ unsigned long oldflags;
55930+ bool is_textrel_rw, is_textrel_rx, is_relro;
55931+
55932+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
55933+ return;
55934+
55935+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
55936+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
55937+
55938+#ifdef CONFIG_PAX_ELFRELOCS
55939+ /* possible TEXTREL */
55940+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
55941+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
55942+#else
55943+ is_textrel_rw = false;
55944+ is_textrel_rx = false;
55945+#endif
55946+
55947+ /* possible RELRO */
55948+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
55949+
55950+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
55951+ return;
55952+
55953+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
55954+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
55955+
55956+#ifdef CONFIG_PAX_ETEXECRELOCS
55957+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
55958+#else
55959+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
55960+#endif
55961+
55962+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
55963+ !elf_check_arch(&elf_h) ||
55964+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
55965+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
55966+ return;
55967+
55968+ for (i = 0UL; i < elf_h.e_phnum; i++) {
55969+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
55970+ return;
55971+ switch (elf_p.p_type) {
55972+ case PT_DYNAMIC:
55973+ if (!is_textrel_rw && !is_textrel_rx)
55974+ continue;
55975+ i = 0UL;
55976+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
55977+ elf_dyn dyn;
55978+
55979+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
55980+ break;
55981+ if (dyn.d_tag == DT_NULL)
55982+ break;
55983+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
55984+ gr_log_textrel(vma);
55985+ if (is_textrel_rw)
55986+ vma->vm_flags |= VM_MAYWRITE;
55987+ else
55988+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
55989+ vma->vm_flags &= ~VM_MAYWRITE;
55990+ break;
55991+ }
55992+ i++;
55993+ }
55994+ is_textrel_rw = false;
55995+ is_textrel_rx = false;
55996+ continue;
55997+
55998+ case PT_GNU_RELRO:
55999+ if (!is_relro)
56000+ continue;
56001+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
56002+ vma->vm_flags &= ~VM_MAYWRITE;
56003+ is_relro = false;
56004+ continue;
56005+
56006+#ifdef CONFIG_PAX_PT_PAX_FLAGS
56007+ case PT_PAX_FLAGS: {
56008+ const char *msg_mprotect = "", *msg_emutramp = "";
56009+ char *buffer_lib, *buffer_exe;
56010+
56011+ if (elf_p.p_flags & PF_NOMPROTECT)
56012+ msg_mprotect = "MPROTECT disabled";
56013+
56014+#ifdef CONFIG_PAX_EMUTRAMP
56015+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
56016+ msg_emutramp = "EMUTRAMP enabled";
56017+#endif
56018+
56019+ if (!msg_mprotect[0] && !msg_emutramp[0])
56020+ continue;
56021+
56022+ if (!printk_ratelimit())
56023+ continue;
56024+
56025+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
56026+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
56027+ if (buffer_lib && buffer_exe) {
56028+ char *path_lib, *path_exe;
56029+
56030+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
56031+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
56032+
56033+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
56034+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
56035+
56036+ }
56037+ free_page((unsigned long)buffer_exe);
56038+ free_page((unsigned long)buffer_lib);
56039+ continue;
56040+ }
56041+#endif
56042+
56043+ }
56044+ }
56045+}
56046+#endif
56047+
56048+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56049+
56050+extern int grsec_enable_log_rwxmaps;
56051+
56052+static void elf_handle_mmap(struct file *file)
56053+{
56054+ struct elfhdr elf_h;
56055+ struct elf_phdr elf_p;
56056+ unsigned long i;
56057+
56058+ if (!grsec_enable_log_rwxmaps)
56059+ return;
56060+
56061+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
56062+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
56063+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
56064+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56065+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56066+ return;
56067+
56068+ for (i = 0UL; i < elf_h.e_phnum; i++) {
56069+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56070+ return;
56071+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
56072+ gr_log_ptgnustack(file);
56073+ }
56074+}
56075+#endif
56076+
56077 static int __init init_elf_binfmt(void)
56078 {
56079 register_binfmt(&elf_format);
56080diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
56081index d50bbe5..af3b649 100644
56082--- a/fs/binfmt_flat.c
56083+++ b/fs/binfmt_flat.c
56084@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
56085 realdatastart = (unsigned long) -ENOMEM;
56086 printk("Unable to allocate RAM for process data, errno %d\n",
56087 (int)-realdatastart);
56088+ down_write(&current->mm->mmap_sem);
56089 vm_munmap(textpos, text_len);
56090+ up_write(&current->mm->mmap_sem);
56091 ret = realdatastart;
56092 goto err;
56093 }
56094@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56095 }
56096 if (IS_ERR_VALUE(result)) {
56097 printk("Unable to read data+bss, errno %d\n", (int)-result);
56098+ down_write(&current->mm->mmap_sem);
56099 vm_munmap(textpos, text_len);
56100 vm_munmap(realdatastart, len);
56101+ up_write(&current->mm->mmap_sem);
56102 ret = result;
56103 goto err;
56104 }
56105@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56106 }
56107 if (IS_ERR_VALUE(result)) {
56108 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
56109+ down_write(&current->mm->mmap_sem);
56110 vm_munmap(textpos, text_len + data_len + extra +
56111 MAX_SHARED_LIBS * sizeof(unsigned long));
56112+ up_write(&current->mm->mmap_sem);
56113 ret = result;
56114 goto err;
56115 }
56116diff --git a/fs/bio.c b/fs/bio.c
56117index 33d79a4..c3c9893 100644
56118--- a/fs/bio.c
56119+++ b/fs/bio.c
56120@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
56121 /*
56122 * Overflow, abort
56123 */
56124- if (end < start)
56125+ if (end < start || end - start > INT_MAX - nr_pages)
56126 return ERR_PTR(-EINVAL);
56127
56128 nr_pages += end - start;
56129@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
56130 /*
56131 * Overflow, abort
56132 */
56133- if (end < start)
56134+ if (end < start || end - start > INT_MAX - nr_pages)
56135 return ERR_PTR(-EINVAL);
56136
56137 nr_pages += end - start;
56138@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
56139 const int read = bio_data_dir(bio) == READ;
56140 struct bio_map_data *bmd = bio->bi_private;
56141 int i;
56142- char *p = bmd->sgvecs[0].iov_base;
56143+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
56144
56145 bio_for_each_segment_all(bvec, bio, i) {
56146 char *addr = page_address(bvec->bv_page);
56147diff --git a/fs/block_dev.c b/fs/block_dev.c
56148index 1e86823..8e34695 100644
56149--- a/fs/block_dev.c
56150+++ b/fs/block_dev.c
56151@@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
56152 else if (bdev->bd_contains == bdev)
56153 return true; /* is a whole device which isn't held */
56154
56155- else if (whole->bd_holder == bd_may_claim)
56156+ else if (whole->bd_holder == (void *)bd_may_claim)
56157 return true; /* is a partition of a device that is being partitioned */
56158 else if (whole->bd_holder != NULL)
56159 return false; /* is a partition of a held device */
56160diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
56161index 316136b..e7a3a50 100644
56162--- a/fs/btrfs/ctree.c
56163+++ b/fs/btrfs/ctree.c
56164@@ -1028,9 +1028,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
56165 free_extent_buffer(buf);
56166 add_root_to_dirty_list(root);
56167 } else {
56168- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
56169- parent_start = parent->start;
56170- else
56171+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
56172+ if (parent)
56173+ parent_start = parent->start;
56174+ else
56175+ parent_start = 0;
56176+ } else
56177 parent_start = 0;
56178
56179 WARN_ON(trans->transid != btrfs_header_generation(parent));
56180diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
56181index 8d292fb..bc205c2 100644
56182--- a/fs/btrfs/delayed-inode.c
56183+++ b/fs/btrfs/delayed-inode.c
56184@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
56185
56186 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
56187 {
56188- int seq = atomic_inc_return(&delayed_root->items_seq);
56189+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
56190 if ((atomic_dec_return(&delayed_root->items) <
56191 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
56192 waitqueue_active(&delayed_root->wait))
56193@@ -1379,7 +1379,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
56194 static int refs_newer(struct btrfs_delayed_root *delayed_root,
56195 int seq, int count)
56196 {
56197- int val = atomic_read(&delayed_root->items_seq);
56198+ int val = atomic_read_unchecked(&delayed_root->items_seq);
56199
56200 if (val < seq || val >= seq + count)
56201 return 1;
56202@@ -1396,7 +1396,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
56203 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
56204 return;
56205
56206- seq = atomic_read(&delayed_root->items_seq);
56207+ seq = atomic_read_unchecked(&delayed_root->items_seq);
56208
56209 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
56210 int ret;
56211diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
56212index a4b38f9..f86a509 100644
56213--- a/fs/btrfs/delayed-inode.h
56214+++ b/fs/btrfs/delayed-inode.h
56215@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
56216 */
56217 struct list_head prepare_list;
56218 atomic_t items; /* for delayed items */
56219- atomic_t items_seq; /* for delayed items */
56220+ atomic_unchecked_t items_seq; /* for delayed items */
56221 int nodes; /* for delayed nodes */
56222 wait_queue_head_t wait;
56223 };
56224@@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
56225 struct btrfs_delayed_root *delayed_root)
56226 {
56227 atomic_set(&delayed_root->items, 0);
56228- atomic_set(&delayed_root->items_seq, 0);
56229+ atomic_set_unchecked(&delayed_root->items_seq, 0);
56230 delayed_root->nodes = 0;
56231 spin_lock_init(&delayed_root->lock);
56232 init_waitqueue_head(&delayed_root->wait);
56233diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
56234index 9f831bb..14afde5 100644
56235--- a/fs/btrfs/ioctl.c
56236+++ b/fs/btrfs/ioctl.c
56237@@ -3457,9 +3457,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
56238 for (i = 0; i < num_types; i++) {
56239 struct btrfs_space_info *tmp;
56240
56241+ /* Don't copy in more than we allocated */
56242 if (!slot_count)
56243 break;
56244
56245+ slot_count--;
56246+
56247 info = NULL;
56248 rcu_read_lock();
56249 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
56250@@ -3481,10 +3484,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
56251 memcpy(dest, &space, sizeof(space));
56252 dest++;
56253 space_args.total_spaces++;
56254- slot_count--;
56255 }
56256- if (!slot_count)
56257- break;
56258 }
56259 up_read(&info->groups_sem);
56260 }
56261diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
56262index d71a11d..384e2c4 100644
56263--- a/fs/btrfs/super.c
56264+++ b/fs/btrfs/super.c
56265@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
56266 function, line, errstr);
56267 return;
56268 }
56269- ACCESS_ONCE(trans->transaction->aborted) = errno;
56270+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
56271 /* Wake up anybody who may be waiting on this transaction */
56272 wake_up(&root->fs_info->transaction_wait);
56273 wake_up(&root->fs_info->transaction_blocked_wait);
56274diff --git a/fs/buffer.c b/fs/buffer.c
56275index 6024877..7bd000a 100644
56276--- a/fs/buffer.c
56277+++ b/fs/buffer.c
56278@@ -3426,7 +3426,7 @@ void __init buffer_init(void)
56279 bh_cachep = kmem_cache_create("buffer_head",
56280 sizeof(struct buffer_head), 0,
56281 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
56282- SLAB_MEM_SPREAD),
56283+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
56284 NULL);
56285
56286 /*
56287diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
56288index 622f469..e8d2d55 100644
56289--- a/fs/cachefiles/bind.c
56290+++ b/fs/cachefiles/bind.c
56291@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
56292 args);
56293
56294 /* start by checking things over */
56295- ASSERT(cache->fstop_percent >= 0 &&
56296- cache->fstop_percent < cache->fcull_percent &&
56297+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
56298 cache->fcull_percent < cache->frun_percent &&
56299 cache->frun_percent < 100);
56300
56301- ASSERT(cache->bstop_percent >= 0 &&
56302- cache->bstop_percent < cache->bcull_percent &&
56303+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
56304 cache->bcull_percent < cache->brun_percent &&
56305 cache->brun_percent < 100);
56306
56307diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
56308index 0a1467b..6a53245 100644
56309--- a/fs/cachefiles/daemon.c
56310+++ b/fs/cachefiles/daemon.c
56311@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
56312 if (n > buflen)
56313 return -EMSGSIZE;
56314
56315- if (copy_to_user(_buffer, buffer, n) != 0)
56316+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
56317 return -EFAULT;
56318
56319 return n;
56320@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
56321 if (test_bit(CACHEFILES_DEAD, &cache->flags))
56322 return -EIO;
56323
56324- if (datalen < 0 || datalen > PAGE_SIZE - 1)
56325+ if (datalen > PAGE_SIZE - 1)
56326 return -EOPNOTSUPP;
56327
56328 /* drag the command string into the kernel so we can parse it */
56329@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
56330 if (args[0] != '%' || args[1] != '\0')
56331 return -EINVAL;
56332
56333- if (fstop < 0 || fstop >= cache->fcull_percent)
56334+ if (fstop >= cache->fcull_percent)
56335 return cachefiles_daemon_range_error(cache, args);
56336
56337 cache->fstop_percent = fstop;
56338@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
56339 if (args[0] != '%' || args[1] != '\0')
56340 return -EINVAL;
56341
56342- if (bstop < 0 || bstop >= cache->bcull_percent)
56343+ if (bstop >= cache->bcull_percent)
56344 return cachefiles_daemon_range_error(cache, args);
56345
56346 cache->bstop_percent = bstop;
56347diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
56348index 5349473..d6c0b93 100644
56349--- a/fs/cachefiles/internal.h
56350+++ b/fs/cachefiles/internal.h
56351@@ -59,7 +59,7 @@ struct cachefiles_cache {
56352 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
56353 struct rb_root active_nodes; /* active nodes (can't be culled) */
56354 rwlock_t active_lock; /* lock for active_nodes */
56355- atomic_t gravecounter; /* graveyard uniquifier */
56356+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
56357 unsigned frun_percent; /* when to stop culling (% files) */
56358 unsigned fcull_percent; /* when to start culling (% files) */
56359 unsigned fstop_percent; /* when to stop allocating (% files) */
56360@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
56361 * proc.c
56362 */
56363 #ifdef CONFIG_CACHEFILES_HISTOGRAM
56364-extern atomic_t cachefiles_lookup_histogram[HZ];
56365-extern atomic_t cachefiles_mkdir_histogram[HZ];
56366-extern atomic_t cachefiles_create_histogram[HZ];
56367+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
56368+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
56369+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
56370
56371 extern int __init cachefiles_proc_init(void);
56372 extern void cachefiles_proc_cleanup(void);
56373 static inline
56374-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
56375+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
56376 {
56377 unsigned long jif = jiffies - start_jif;
56378 if (jif >= HZ)
56379 jif = HZ - 1;
56380- atomic_inc(&histogram[jif]);
56381+ atomic_inc_unchecked(&histogram[jif]);
56382 }
56383
56384 #else
56385diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
56386index ca65f39..48921e3 100644
56387--- a/fs/cachefiles/namei.c
56388+++ b/fs/cachefiles/namei.c
56389@@ -317,7 +317,7 @@ try_again:
56390 /* first step is to make up a grave dentry in the graveyard */
56391 sprintf(nbuffer, "%08x%08x",
56392 (uint32_t) get_seconds(),
56393- (uint32_t) atomic_inc_return(&cache->gravecounter));
56394+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
56395
56396 /* do the multiway lock magic */
56397 trap = lock_rename(cache->graveyard, dir);
56398diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
56399index eccd339..4c1d995 100644
56400--- a/fs/cachefiles/proc.c
56401+++ b/fs/cachefiles/proc.c
56402@@ -14,9 +14,9 @@
56403 #include <linux/seq_file.h>
56404 #include "internal.h"
56405
56406-atomic_t cachefiles_lookup_histogram[HZ];
56407-atomic_t cachefiles_mkdir_histogram[HZ];
56408-atomic_t cachefiles_create_histogram[HZ];
56409+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
56410+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
56411+atomic_unchecked_t cachefiles_create_histogram[HZ];
56412
56413 /*
56414 * display the latency histogram
56415@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
56416 return 0;
56417 default:
56418 index = (unsigned long) v - 3;
56419- x = atomic_read(&cachefiles_lookup_histogram[index]);
56420- y = atomic_read(&cachefiles_mkdir_histogram[index]);
56421- z = atomic_read(&cachefiles_create_histogram[index]);
56422+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
56423+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
56424+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
56425 if (x == 0 && y == 0 && z == 0)
56426 return 0;
56427
56428diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
56429index ebaff36..7e3ea26 100644
56430--- a/fs/cachefiles/rdwr.c
56431+++ b/fs/cachefiles/rdwr.c
56432@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
56433 old_fs = get_fs();
56434 set_fs(KERNEL_DS);
56435 ret = file->f_op->write(
56436- file, (const void __user *) data, len, &pos);
56437+ file, (const void __force_user *) data, len, &pos);
56438 set_fs(old_fs);
56439 kunmap(page);
56440 file_end_write(file);
56441diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
56442index 2a0bcae..34ec24e 100644
56443--- a/fs/ceph/dir.c
56444+++ b/fs/ceph/dir.c
56445@@ -240,7 +240,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
56446 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
56447 struct ceph_mds_client *mdsc = fsc->mdsc;
56448 unsigned frag = fpos_frag(ctx->pos);
56449- int off = fpos_off(ctx->pos);
56450+ unsigned int off = fpos_off(ctx->pos);
56451 int err;
56452 u32 ftype;
56453 struct ceph_mds_reply_info_parsed *rinfo;
56454diff --git a/fs/ceph/super.c b/fs/ceph/super.c
56455index 6a0951e..03fac6d 100644
56456--- a/fs/ceph/super.c
56457+++ b/fs/ceph/super.c
56458@@ -870,7 +870,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
56459 /*
56460 * construct our own bdi so we can control readahead, etc.
56461 */
56462-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
56463+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
56464
56465 static int ceph_register_bdi(struct super_block *sb,
56466 struct ceph_fs_client *fsc)
56467@@ -887,7 +887,7 @@ static int ceph_register_bdi(struct super_block *sb,
56468 default_backing_dev_info.ra_pages;
56469
56470 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
56471- atomic_long_inc_return(&bdi_seq));
56472+ atomic_long_inc_return_unchecked(&bdi_seq));
56473 if (!err)
56474 sb->s_bdi = &fsc->backing_dev_info;
56475 return err;
56476diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
56477index f3ac415..3d2420c 100644
56478--- a/fs/cifs/cifs_debug.c
56479+++ b/fs/cifs/cifs_debug.c
56480@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
56481
56482 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
56483 #ifdef CONFIG_CIFS_STATS2
56484- atomic_set(&totBufAllocCount, 0);
56485- atomic_set(&totSmBufAllocCount, 0);
56486+ atomic_set_unchecked(&totBufAllocCount, 0);
56487+ atomic_set_unchecked(&totSmBufAllocCount, 0);
56488 #endif /* CONFIG_CIFS_STATS2 */
56489 spin_lock(&cifs_tcp_ses_lock);
56490 list_for_each(tmp1, &cifs_tcp_ses_list) {
56491@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
56492 tcon = list_entry(tmp3,
56493 struct cifs_tcon,
56494 tcon_list);
56495- atomic_set(&tcon->num_smbs_sent, 0);
56496+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
56497 if (server->ops->clear_stats)
56498 server->ops->clear_stats(tcon);
56499 }
56500@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
56501 smBufAllocCount.counter, cifs_min_small);
56502 #ifdef CONFIG_CIFS_STATS2
56503 seq_printf(m, "Total Large %d Small %d Allocations\n",
56504- atomic_read(&totBufAllocCount),
56505- atomic_read(&totSmBufAllocCount));
56506+ atomic_read_unchecked(&totBufAllocCount),
56507+ atomic_read_unchecked(&totSmBufAllocCount));
56508 #endif /* CONFIG_CIFS_STATS2 */
56509
56510 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
56511@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
56512 if (tcon->need_reconnect)
56513 seq_puts(m, "\tDISCONNECTED ");
56514 seq_printf(m, "\nSMBs: %d",
56515- atomic_read(&tcon->num_smbs_sent));
56516+ atomic_read_unchecked(&tcon->num_smbs_sent));
56517 if (server->ops->print_stats)
56518 server->ops->print_stats(m, tcon);
56519 }
56520diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
56521index 849f613..eae6dec 100644
56522--- a/fs/cifs/cifsfs.c
56523+++ b/fs/cifs/cifsfs.c
56524@@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
56525 */
56526 cifs_req_cachep = kmem_cache_create("cifs_request",
56527 CIFSMaxBufSize + max_hdr_size, 0,
56528- SLAB_HWCACHE_ALIGN, NULL);
56529+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
56530 if (cifs_req_cachep == NULL)
56531 return -ENOMEM;
56532
56533@@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
56534 efficient to alloc 1 per page off the slab compared to 17K (5page)
56535 alloc of large cifs buffers even when page debugging is on */
56536 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
56537- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
56538+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
56539 NULL);
56540 if (cifs_sm_req_cachep == NULL) {
56541 mempool_destroy(cifs_req_poolp);
56542@@ -1168,8 +1168,8 @@ init_cifs(void)
56543 atomic_set(&bufAllocCount, 0);
56544 atomic_set(&smBufAllocCount, 0);
56545 #ifdef CONFIG_CIFS_STATS2
56546- atomic_set(&totBufAllocCount, 0);
56547- atomic_set(&totSmBufAllocCount, 0);
56548+ atomic_set_unchecked(&totBufAllocCount, 0);
56549+ atomic_set_unchecked(&totSmBufAllocCount, 0);
56550 #endif /* CONFIG_CIFS_STATS2 */
56551
56552 atomic_set(&midCount, 0);
56553diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
56554index f918a99..bb300d5 100644
56555--- a/fs/cifs/cifsglob.h
56556+++ b/fs/cifs/cifsglob.h
56557@@ -787,35 +787,35 @@ struct cifs_tcon {
56558 __u16 Flags; /* optional support bits */
56559 enum statusEnum tidStatus;
56560 #ifdef CONFIG_CIFS_STATS
56561- atomic_t num_smbs_sent;
56562+ atomic_unchecked_t num_smbs_sent;
56563 union {
56564 struct {
56565- atomic_t num_writes;
56566- atomic_t num_reads;
56567- atomic_t num_flushes;
56568- atomic_t num_oplock_brks;
56569- atomic_t num_opens;
56570- atomic_t num_closes;
56571- atomic_t num_deletes;
56572- atomic_t num_mkdirs;
56573- atomic_t num_posixopens;
56574- atomic_t num_posixmkdirs;
56575- atomic_t num_rmdirs;
56576- atomic_t num_renames;
56577- atomic_t num_t2renames;
56578- atomic_t num_ffirst;
56579- atomic_t num_fnext;
56580- atomic_t num_fclose;
56581- atomic_t num_hardlinks;
56582- atomic_t num_symlinks;
56583- atomic_t num_locks;
56584- atomic_t num_acl_get;
56585- atomic_t num_acl_set;
56586+ atomic_unchecked_t num_writes;
56587+ atomic_unchecked_t num_reads;
56588+ atomic_unchecked_t num_flushes;
56589+ atomic_unchecked_t num_oplock_brks;
56590+ atomic_unchecked_t num_opens;
56591+ atomic_unchecked_t num_closes;
56592+ atomic_unchecked_t num_deletes;
56593+ atomic_unchecked_t num_mkdirs;
56594+ atomic_unchecked_t num_posixopens;
56595+ atomic_unchecked_t num_posixmkdirs;
56596+ atomic_unchecked_t num_rmdirs;
56597+ atomic_unchecked_t num_renames;
56598+ atomic_unchecked_t num_t2renames;
56599+ atomic_unchecked_t num_ffirst;
56600+ atomic_unchecked_t num_fnext;
56601+ atomic_unchecked_t num_fclose;
56602+ atomic_unchecked_t num_hardlinks;
56603+ atomic_unchecked_t num_symlinks;
56604+ atomic_unchecked_t num_locks;
56605+ atomic_unchecked_t num_acl_get;
56606+ atomic_unchecked_t num_acl_set;
56607 } cifs_stats;
56608 #ifdef CONFIG_CIFS_SMB2
56609 struct {
56610- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
56611- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
56612+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
56613+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
56614 } smb2_stats;
56615 #endif /* CONFIG_CIFS_SMB2 */
56616 } stats;
56617@@ -1145,7 +1145,7 @@ convert_delimiter(char *path, char delim)
56618 }
56619
56620 #ifdef CONFIG_CIFS_STATS
56621-#define cifs_stats_inc atomic_inc
56622+#define cifs_stats_inc atomic_inc_unchecked
56623
56624 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
56625 unsigned int bytes)
56626@@ -1511,8 +1511,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
56627 /* Various Debug counters */
56628 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
56629 #ifdef CONFIG_CIFS_STATS2
56630-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
56631-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
56632+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
56633+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
56634 #endif
56635 GLOBAL_EXTERN atomic_t smBufAllocCount;
56636 GLOBAL_EXTERN atomic_t midCount;
56637diff --git a/fs/cifs/file.c b/fs/cifs/file.c
56638index 5a5a872..92c3210 100644
56639--- a/fs/cifs/file.c
56640+++ b/fs/cifs/file.c
56641@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
56642 index = mapping->writeback_index; /* Start from prev offset */
56643 end = -1;
56644 } else {
56645- index = wbc->range_start >> PAGE_CACHE_SHIFT;
56646- end = wbc->range_end >> PAGE_CACHE_SHIFT;
56647- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
56648+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
56649 range_whole = true;
56650+ index = 0;
56651+ end = ULONG_MAX;
56652+ } else {
56653+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
56654+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
56655+ }
56656 scanned = true;
56657 }
56658 retry:
56659diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
56660index 2f9f379..43f8025 100644
56661--- a/fs/cifs/misc.c
56662+++ b/fs/cifs/misc.c
56663@@ -170,7 +170,7 @@ cifs_buf_get(void)
56664 memset(ret_buf, 0, buf_size + 3);
56665 atomic_inc(&bufAllocCount);
56666 #ifdef CONFIG_CIFS_STATS2
56667- atomic_inc(&totBufAllocCount);
56668+ atomic_inc_unchecked(&totBufAllocCount);
56669 #endif /* CONFIG_CIFS_STATS2 */
56670 }
56671
56672@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
56673 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
56674 atomic_inc(&smBufAllocCount);
56675 #ifdef CONFIG_CIFS_STATS2
56676- atomic_inc(&totSmBufAllocCount);
56677+ atomic_inc_unchecked(&totSmBufAllocCount);
56678 #endif /* CONFIG_CIFS_STATS2 */
56679
56680 }
56681diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
56682index 5f5ba0d..8d6ef7d 100644
56683--- a/fs/cifs/smb1ops.c
56684+++ b/fs/cifs/smb1ops.c
56685@@ -609,27 +609,27 @@ static void
56686 cifs_clear_stats(struct cifs_tcon *tcon)
56687 {
56688 #ifdef CONFIG_CIFS_STATS
56689- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
56690- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
56691- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
56692- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
56693- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
56694- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
56695- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
56696- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
56697- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
56698- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
56699- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
56700- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
56701- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
56702- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
56703- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
56704- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
56705- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
56706- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
56707- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
56708- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
56709- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
56710+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
56711+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
56712+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
56713+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
56714+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
56715+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
56716+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
56717+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
56718+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
56719+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
56720+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
56721+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
56722+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
56723+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
56724+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
56725+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
56726+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
56727+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
56728+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
56729+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
56730+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
56731 #endif
56732 }
56733
56734@@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
56735 {
56736 #ifdef CONFIG_CIFS_STATS
56737 seq_printf(m, " Oplocks breaks: %d",
56738- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
56739+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
56740 seq_printf(m, "\nReads: %d Bytes: %llu",
56741- atomic_read(&tcon->stats.cifs_stats.num_reads),
56742+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
56743 (long long)(tcon->bytes_read));
56744 seq_printf(m, "\nWrites: %d Bytes: %llu",
56745- atomic_read(&tcon->stats.cifs_stats.num_writes),
56746+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
56747 (long long)(tcon->bytes_written));
56748 seq_printf(m, "\nFlushes: %d",
56749- atomic_read(&tcon->stats.cifs_stats.num_flushes));
56750+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
56751 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
56752- atomic_read(&tcon->stats.cifs_stats.num_locks),
56753- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
56754- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
56755+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
56756+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
56757+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
56758 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
56759- atomic_read(&tcon->stats.cifs_stats.num_opens),
56760- atomic_read(&tcon->stats.cifs_stats.num_closes),
56761- atomic_read(&tcon->stats.cifs_stats.num_deletes));
56762+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
56763+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
56764+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
56765 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
56766- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
56767- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
56768+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
56769+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
56770 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
56771- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
56772- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
56773+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
56774+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
56775 seq_printf(m, "\nRenames: %d T2 Renames %d",
56776- atomic_read(&tcon->stats.cifs_stats.num_renames),
56777- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
56778+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
56779+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
56780 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
56781- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
56782- atomic_read(&tcon->stats.cifs_stats.num_fnext),
56783- atomic_read(&tcon->stats.cifs_stats.num_fclose));
56784+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
56785+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
56786+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
56787 #endif
56788 }
56789
56790diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
56791index 757da3e..07bf1ed 100644
56792--- a/fs/cifs/smb2ops.c
56793+++ b/fs/cifs/smb2ops.c
56794@@ -370,8 +370,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
56795 #ifdef CONFIG_CIFS_STATS
56796 int i;
56797 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
56798- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
56799- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
56800+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
56801+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
56802 }
56803 #endif
56804 }
56805@@ -411,65 +411,65 @@ static void
56806 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
56807 {
56808 #ifdef CONFIG_CIFS_STATS
56809- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
56810- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
56811+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
56812+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
56813 seq_printf(m, "\nNegotiates: %d sent %d failed",
56814- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
56815- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
56816+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
56817+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
56818 seq_printf(m, "\nSessionSetups: %d sent %d failed",
56819- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
56820- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
56821+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
56822+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
56823 seq_printf(m, "\nLogoffs: %d sent %d failed",
56824- atomic_read(&sent[SMB2_LOGOFF_HE]),
56825- atomic_read(&failed[SMB2_LOGOFF_HE]));
56826+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
56827+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
56828 seq_printf(m, "\nTreeConnects: %d sent %d failed",
56829- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
56830- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
56831+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
56832+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
56833 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
56834- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
56835- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
56836+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
56837+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
56838 seq_printf(m, "\nCreates: %d sent %d failed",
56839- atomic_read(&sent[SMB2_CREATE_HE]),
56840- atomic_read(&failed[SMB2_CREATE_HE]));
56841+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
56842+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
56843 seq_printf(m, "\nCloses: %d sent %d failed",
56844- atomic_read(&sent[SMB2_CLOSE_HE]),
56845- atomic_read(&failed[SMB2_CLOSE_HE]));
56846+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
56847+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
56848 seq_printf(m, "\nFlushes: %d sent %d failed",
56849- atomic_read(&sent[SMB2_FLUSH_HE]),
56850- atomic_read(&failed[SMB2_FLUSH_HE]));
56851+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
56852+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
56853 seq_printf(m, "\nReads: %d sent %d failed",
56854- atomic_read(&sent[SMB2_READ_HE]),
56855- atomic_read(&failed[SMB2_READ_HE]));
56856+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
56857+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
56858 seq_printf(m, "\nWrites: %d sent %d failed",
56859- atomic_read(&sent[SMB2_WRITE_HE]),
56860- atomic_read(&failed[SMB2_WRITE_HE]));
56861+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
56862+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
56863 seq_printf(m, "\nLocks: %d sent %d failed",
56864- atomic_read(&sent[SMB2_LOCK_HE]),
56865- atomic_read(&failed[SMB2_LOCK_HE]));
56866+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
56867+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
56868 seq_printf(m, "\nIOCTLs: %d sent %d failed",
56869- atomic_read(&sent[SMB2_IOCTL_HE]),
56870- atomic_read(&failed[SMB2_IOCTL_HE]));
56871+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
56872+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
56873 seq_printf(m, "\nCancels: %d sent %d failed",
56874- atomic_read(&sent[SMB2_CANCEL_HE]),
56875- atomic_read(&failed[SMB2_CANCEL_HE]));
56876+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
56877+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
56878 seq_printf(m, "\nEchos: %d sent %d failed",
56879- atomic_read(&sent[SMB2_ECHO_HE]),
56880- atomic_read(&failed[SMB2_ECHO_HE]));
56881+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
56882+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
56883 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
56884- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
56885- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
56886+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
56887+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
56888 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
56889- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
56890- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
56891+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
56892+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
56893 seq_printf(m, "\nQueryInfos: %d sent %d failed",
56894- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
56895- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
56896+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
56897+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
56898 seq_printf(m, "\nSetInfos: %d sent %d failed",
56899- atomic_read(&sent[SMB2_SET_INFO_HE]),
56900- atomic_read(&failed[SMB2_SET_INFO_HE]));
56901+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
56902+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
56903 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
56904- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
56905- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
56906+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
56907+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
56908 #endif
56909 }
56910
56911diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
56912index 2013234..a720734 100644
56913--- a/fs/cifs/smb2pdu.c
56914+++ b/fs/cifs/smb2pdu.c
56915@@ -2091,8 +2091,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
56916 default:
56917 cifs_dbg(VFS, "info level %u isn't supported\n",
56918 srch_inf->info_level);
56919- rc = -EINVAL;
56920- goto qdir_exit;
56921+ return -EINVAL;
56922 }
56923
56924 req->FileIndex = cpu_to_le32(index);
56925diff --git a/fs/coda/cache.c b/fs/coda/cache.c
56926index 1da168c..8bc7ff6 100644
56927--- a/fs/coda/cache.c
56928+++ b/fs/coda/cache.c
56929@@ -24,7 +24,7 @@
56930 #include "coda_linux.h"
56931 #include "coda_cache.h"
56932
56933-static atomic_t permission_epoch = ATOMIC_INIT(0);
56934+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
56935
56936 /* replace or extend an acl cache hit */
56937 void coda_cache_enter(struct inode *inode, int mask)
56938@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
56939 struct coda_inode_info *cii = ITOC(inode);
56940
56941 spin_lock(&cii->c_lock);
56942- cii->c_cached_epoch = atomic_read(&permission_epoch);
56943+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
56944 if (!uid_eq(cii->c_uid, current_fsuid())) {
56945 cii->c_uid = current_fsuid();
56946 cii->c_cached_perm = mask;
56947@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
56948 {
56949 struct coda_inode_info *cii = ITOC(inode);
56950 spin_lock(&cii->c_lock);
56951- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
56952+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
56953 spin_unlock(&cii->c_lock);
56954 }
56955
56956 /* remove all acl caches */
56957 void coda_cache_clear_all(struct super_block *sb)
56958 {
56959- atomic_inc(&permission_epoch);
56960+ atomic_inc_unchecked(&permission_epoch);
56961 }
56962
56963
56964@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
56965 spin_lock(&cii->c_lock);
56966 hit = (mask & cii->c_cached_perm) == mask &&
56967 uid_eq(cii->c_uid, current_fsuid()) &&
56968- cii->c_cached_epoch == atomic_read(&permission_epoch);
56969+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
56970 spin_unlock(&cii->c_lock);
56971
56972 return hit;
56973diff --git a/fs/compat.c b/fs/compat.c
56974index 6af20de..fec3fbb 100644
56975--- a/fs/compat.c
56976+++ b/fs/compat.c
56977@@ -54,7 +54,7 @@
56978 #include <asm/ioctls.h>
56979 #include "internal.h"
56980
56981-int compat_log = 1;
56982+int compat_log = 0;
56983
56984 int compat_printk(const char *fmt, ...)
56985 {
56986@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
56987
56988 set_fs(KERNEL_DS);
56989 /* The __user pointer cast is valid because of the set_fs() */
56990- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
56991+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
56992 set_fs(oldfs);
56993 /* truncating is ok because it's a user address */
56994 if (!ret)
56995@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
56996 goto out;
56997
56998 ret = -EINVAL;
56999- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
57000+ if (nr_segs > UIO_MAXIOV)
57001 goto out;
57002 if (nr_segs > fast_segs) {
57003 ret = -ENOMEM;
57004@@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
57005 struct compat_readdir_callback {
57006 struct dir_context ctx;
57007 struct compat_old_linux_dirent __user *dirent;
57008+ struct file * file;
57009 int result;
57010 };
57011
57012@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
57013 buf->result = -EOVERFLOW;
57014 return -EOVERFLOW;
57015 }
57016+
57017+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57018+ return 0;
57019+
57020 buf->result++;
57021 dirent = buf->dirent;
57022 if (!access_ok(VERIFY_WRITE, dirent,
57023@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
57024 if (!f.file)
57025 return -EBADF;
57026
57027+ buf.file = f.file;
57028 error = iterate_dir(f.file, &buf.ctx);
57029 if (buf.result)
57030 error = buf.result;
57031@@ -901,6 +907,7 @@ struct compat_getdents_callback {
57032 struct dir_context ctx;
57033 struct compat_linux_dirent __user *current_dir;
57034 struct compat_linux_dirent __user *previous;
57035+ struct file * file;
57036 int count;
57037 int error;
57038 };
57039@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
57040 buf->error = -EOVERFLOW;
57041 return -EOVERFLOW;
57042 }
57043+
57044+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57045+ return 0;
57046+
57047 dirent = buf->previous;
57048 if (dirent) {
57049 if (__put_user(offset, &dirent->d_off))
57050@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
57051 if (!f.file)
57052 return -EBADF;
57053
57054+ buf.file = f.file;
57055 error = iterate_dir(f.file, &buf.ctx);
57056 if (error >= 0)
57057 error = buf.error;
57058@@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
57059 struct dir_context ctx;
57060 struct linux_dirent64 __user *current_dir;
57061 struct linux_dirent64 __user *previous;
57062+ struct file * file;
57063 int count;
57064 int error;
57065 };
57066@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
57067 buf->error = -EINVAL; /* only used if we fail.. */
57068 if (reclen > buf->count)
57069 return -EINVAL;
57070+
57071+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57072+ return 0;
57073+
57074 dirent = buf->previous;
57075
57076 if (dirent) {
57077@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
57078 if (!f.file)
57079 return -EBADF;
57080
57081+ buf.file = f.file;
57082 error = iterate_dir(f.file, &buf.ctx);
57083 if (error >= 0)
57084 error = buf.error;
57085diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
57086index a81147e..20bf2b5 100644
57087--- a/fs/compat_binfmt_elf.c
57088+++ b/fs/compat_binfmt_elf.c
57089@@ -30,11 +30,13 @@
57090 #undef elf_phdr
57091 #undef elf_shdr
57092 #undef elf_note
57093+#undef elf_dyn
57094 #undef elf_addr_t
57095 #define elfhdr elf32_hdr
57096 #define elf_phdr elf32_phdr
57097 #define elf_shdr elf32_shdr
57098 #define elf_note elf32_note
57099+#define elf_dyn Elf32_Dyn
57100 #define elf_addr_t Elf32_Addr
57101
57102 /*
57103diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
57104index dc52e13..ec61057 100644
57105--- a/fs/compat_ioctl.c
57106+++ b/fs/compat_ioctl.c
57107@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
57108 return -EFAULT;
57109 if (__get_user(udata, &ss32->iomem_base))
57110 return -EFAULT;
57111- ss.iomem_base = compat_ptr(udata);
57112+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
57113 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
57114 __get_user(ss.port_high, &ss32->port_high))
57115 return -EFAULT;
57116@@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
57117 for (i = 0; i < nmsgs; i++) {
57118 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
57119 return -EFAULT;
57120- if (get_user(datap, &umsgs[i].buf) ||
57121- put_user(compat_ptr(datap), &tmsgs[i].buf))
57122+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
57123+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
57124 return -EFAULT;
57125 }
57126 return sys_ioctl(fd, cmd, (unsigned long)tdata);
57127@@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(struct file *file,
57128 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
57129 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
57130 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
57131- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
57132+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
57133 return -EFAULT;
57134
57135 return ioctl_preallocate(file, p);
57136@@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
57137 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
57138 {
57139 unsigned int a, b;
57140- a = *(unsigned int *)p;
57141- b = *(unsigned int *)q;
57142+ a = *(const unsigned int *)p;
57143+ b = *(const unsigned int *)q;
57144 if (a > b)
57145 return 1;
57146 if (a < b)
57147diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
57148index e081acb..911df21 100644
57149--- a/fs/configfs/dir.c
57150+++ b/fs/configfs/dir.c
57151@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
57152 }
57153 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
57154 struct configfs_dirent *next;
57155- const char *name;
57156+ const unsigned char * name;
57157+ char d_name[sizeof(next->s_dentry->d_iname)];
57158 int len;
57159 struct inode *inode = NULL;
57160
57161@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
57162 continue;
57163
57164 name = configfs_get_name(next);
57165- len = strlen(name);
57166+ if (next->s_dentry && name == next->s_dentry->d_iname) {
57167+ len = next->s_dentry->d_name.len;
57168+ memcpy(d_name, name, len);
57169+ name = d_name;
57170+ } else
57171+ len = strlen(name);
57172
57173 /*
57174 * We'll have a dentry and an inode for
57175diff --git a/fs/coredump.c b/fs/coredump.c
57176index bc3fbcd..6031650 100644
57177--- a/fs/coredump.c
57178+++ b/fs/coredump.c
57179@@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct file *file)
57180 struct pipe_inode_info *pipe = file->private_data;
57181
57182 pipe_lock(pipe);
57183- pipe->readers++;
57184- pipe->writers--;
57185+ atomic_inc(&pipe->readers);
57186+ atomic_dec(&pipe->writers);
57187 wake_up_interruptible_sync(&pipe->wait);
57188 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57189 pipe_unlock(pipe);
57190@@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct file *file)
57191 * We actually want wait_event_freezable() but then we need
57192 * to clear TIF_SIGPENDING and improve dump_interrupted().
57193 */
57194- wait_event_interruptible(pipe->wait, pipe->readers == 1);
57195+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
57196
57197 pipe_lock(pipe);
57198- pipe->readers--;
57199- pipe->writers++;
57200+ atomic_dec(&pipe->readers);
57201+ atomic_inc(&pipe->writers);
57202 pipe_unlock(pipe);
57203 }
57204
57205@@ -499,7 +499,9 @@ void do_coredump(const siginfo_t *siginfo)
57206 struct files_struct *displaced;
57207 bool need_nonrelative = false;
57208 bool core_dumped = false;
57209- static atomic_t core_dump_count = ATOMIC_INIT(0);
57210+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
57211+ long signr = siginfo->si_signo;
57212+ int dumpable;
57213 struct coredump_params cprm = {
57214 .siginfo = siginfo,
57215 .regs = signal_pt_regs(),
57216@@ -512,12 +514,17 @@ void do_coredump(const siginfo_t *siginfo)
57217 .mm_flags = mm->flags,
57218 };
57219
57220- audit_core_dumps(siginfo->si_signo);
57221+ audit_core_dumps(signr);
57222+
57223+ dumpable = __get_dumpable(cprm.mm_flags);
57224+
57225+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
57226+ gr_handle_brute_attach(dumpable);
57227
57228 binfmt = mm->binfmt;
57229 if (!binfmt || !binfmt->core_dump)
57230 goto fail;
57231- if (!__get_dumpable(cprm.mm_flags))
57232+ if (!dumpable)
57233 goto fail;
57234
57235 cred = prepare_creds();
57236@@ -536,7 +543,7 @@ void do_coredump(const siginfo_t *siginfo)
57237 need_nonrelative = true;
57238 }
57239
57240- retval = coredump_wait(siginfo->si_signo, &core_state);
57241+ retval = coredump_wait(signr, &core_state);
57242 if (retval < 0)
57243 goto fail_creds;
57244
57245@@ -579,7 +586,7 @@ void do_coredump(const siginfo_t *siginfo)
57246 }
57247 cprm.limit = RLIM_INFINITY;
57248
57249- dump_count = atomic_inc_return(&core_dump_count);
57250+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
57251 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
57252 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
57253 task_tgid_vnr(current), current->comm);
57254@@ -611,6 +618,8 @@ void do_coredump(const siginfo_t *siginfo)
57255 } else {
57256 struct inode *inode;
57257
57258+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
57259+
57260 if (cprm.limit < binfmt->min_coredump)
57261 goto fail_unlock;
57262
57263@@ -669,7 +678,7 @@ close_fail:
57264 filp_close(cprm.file, NULL);
57265 fail_dropcount:
57266 if (ispipe)
57267- atomic_dec(&core_dump_count);
57268+ atomic_dec_unchecked(&core_dump_count);
57269 fail_unlock:
57270 kfree(cn.corename);
57271 coredump_finish(mm, core_dumped);
57272@@ -690,6 +699,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
57273 struct file *file = cprm->file;
57274 loff_t pos = file->f_pos;
57275 ssize_t n;
57276+
57277+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
57278 if (cprm->written + nr > cprm->limit)
57279 return 0;
57280 while (nr) {
57281diff --git a/fs/dcache.c b/fs/dcache.c
57282index fdbe230..ba17c1f 100644
57283--- a/fs/dcache.c
57284+++ b/fs/dcache.c
57285@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
57286 */
57287 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
57288 if (name->len > DNAME_INLINE_LEN-1) {
57289- dname = kmalloc(name->len + 1, GFP_KERNEL);
57290+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
57291 if (!dname) {
57292 kmem_cache_free(dentry_cache, dentry);
57293 return NULL;
57294@@ -3428,7 +3428,8 @@ void __init vfs_caches_init(unsigned long mempages)
57295 mempages -= reserve;
57296
57297 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
57298- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
57299+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
57300+ SLAB_NO_SANITIZE, NULL);
57301
57302 dcache_init();
57303 inode_init();
57304diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
57305index 9c0444c..628490c 100644
57306--- a/fs/debugfs/inode.c
57307+++ b/fs/debugfs/inode.c
57308@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
57309 */
57310 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
57311 {
57312+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
57313+ return __create_file(name, S_IFDIR | S_IRWXU,
57314+#else
57315 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
57316+#endif
57317 parent, NULL, NULL);
57318 }
57319 EXPORT_SYMBOL_GPL(debugfs_create_dir);
57320diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
57321index c36c448..fc96710 100644
57322--- a/fs/ecryptfs/inode.c
57323+++ b/fs/ecryptfs/inode.c
57324@@ -675,7 +675,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
57325 old_fs = get_fs();
57326 set_fs(get_ds());
57327 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
57328- (char __user *)lower_buf,
57329+ (char __force_user *)lower_buf,
57330 PATH_MAX);
57331 set_fs(old_fs);
57332 if (rc < 0)
57333diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
57334index e4141f2..d8263e8 100644
57335--- a/fs/ecryptfs/miscdev.c
57336+++ b/fs/ecryptfs/miscdev.c
57337@@ -304,7 +304,7 @@ check_list:
57338 goto out_unlock_msg_ctx;
57339 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
57340 if (msg_ctx->msg) {
57341- if (copy_to_user(&buf[i], packet_length, packet_length_size))
57342+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
57343 goto out_unlock_msg_ctx;
57344 i += packet_length_size;
57345 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
57346diff --git a/fs/exec.c b/fs/exec.c
57347index 7ea097f..0158d8a 100644
57348--- a/fs/exec.c
57349+++ b/fs/exec.c
57350@@ -55,8 +55,20 @@
57351 #include <linux/pipe_fs_i.h>
57352 #include <linux/oom.h>
57353 #include <linux/compat.h>
57354+#include <linux/random.h>
57355+#include <linux/seq_file.h>
57356+#include <linux/coredump.h>
57357+#include <linux/mman.h>
57358+
57359+#ifdef CONFIG_PAX_REFCOUNT
57360+#include <linux/kallsyms.h>
57361+#include <linux/kdebug.h>
57362+#endif
57363+
57364+#include <trace/events/fs.h>
57365
57366 #include <asm/uaccess.h>
57367+#include <asm/sections.h>
57368 #include <asm/mmu_context.h>
57369 #include <asm/tlb.h>
57370
57371@@ -66,19 +78,34 @@
57372
57373 #include <trace/events/sched.h>
57374
57375+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57376+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
57377+{
57378+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
57379+}
57380+#endif
57381+
57382+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
57383+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57384+EXPORT_SYMBOL(pax_set_initial_flags_func);
57385+#endif
57386+
57387 int suid_dumpable = 0;
57388
57389 static LIST_HEAD(formats);
57390 static DEFINE_RWLOCK(binfmt_lock);
57391
57392+extern int gr_process_kernel_exec_ban(void);
57393+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
57394+
57395 void __register_binfmt(struct linux_binfmt * fmt, int insert)
57396 {
57397 BUG_ON(!fmt);
57398 if (WARN_ON(!fmt->load_binary))
57399 return;
57400 write_lock(&binfmt_lock);
57401- insert ? list_add(&fmt->lh, &formats) :
57402- list_add_tail(&fmt->lh, &formats);
57403+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
57404+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
57405 write_unlock(&binfmt_lock);
57406 }
57407
57408@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
57409 void unregister_binfmt(struct linux_binfmt * fmt)
57410 {
57411 write_lock(&binfmt_lock);
57412- list_del(&fmt->lh);
57413+ pax_list_del((struct list_head *)&fmt->lh);
57414 write_unlock(&binfmt_lock);
57415 }
57416
57417@@ -181,18 +208,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
57418 int write)
57419 {
57420 struct page *page;
57421- int ret;
57422
57423-#ifdef CONFIG_STACK_GROWSUP
57424- if (write) {
57425- ret = expand_downwards(bprm->vma, pos);
57426- if (ret < 0)
57427- return NULL;
57428- }
57429-#endif
57430- ret = get_user_pages(current, bprm->mm, pos,
57431- 1, write, 1, &page, NULL);
57432- if (ret <= 0)
57433+ if (0 > expand_downwards(bprm->vma, pos))
57434+ return NULL;
57435+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
57436 return NULL;
57437
57438 if (write) {
57439@@ -208,6 +227,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
57440 if (size <= ARG_MAX)
57441 return page;
57442
57443+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57444+ // only allow 512KB for argv+env on suid/sgid binaries
57445+ // to prevent easy ASLR exhaustion
57446+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
57447+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
57448+ (size > (512 * 1024))) {
57449+ put_page(page);
57450+ return NULL;
57451+ }
57452+#endif
57453+
57454 /*
57455 * Limit to 1/4-th the stack size for the argv+env strings.
57456 * This ensures that:
57457@@ -267,6 +297,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
57458 vma->vm_end = STACK_TOP_MAX;
57459 vma->vm_start = vma->vm_end - PAGE_SIZE;
57460 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
57461+
57462+#ifdef CONFIG_PAX_SEGMEXEC
57463+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
57464+#endif
57465+
57466 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
57467 INIT_LIST_HEAD(&vma->anon_vma_chain);
57468
57469@@ -277,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
57470 mm->stack_vm = mm->total_vm = 1;
57471 up_write(&mm->mmap_sem);
57472 bprm->p = vma->vm_end - sizeof(void *);
57473+
57474+#ifdef CONFIG_PAX_RANDUSTACK
57475+ if (randomize_va_space)
57476+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
57477+#endif
57478+
57479 return 0;
57480 err:
57481 up_write(&mm->mmap_sem);
57482@@ -397,7 +438,7 @@ struct user_arg_ptr {
57483 } ptr;
57484 };
57485
57486-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
57487+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
57488 {
57489 const char __user *native;
57490
57491@@ -406,14 +447,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
57492 compat_uptr_t compat;
57493
57494 if (get_user(compat, argv.ptr.compat + nr))
57495- return ERR_PTR(-EFAULT);
57496+ return (const char __force_user *)ERR_PTR(-EFAULT);
57497
57498 return compat_ptr(compat);
57499 }
57500 #endif
57501
57502 if (get_user(native, argv.ptr.native + nr))
57503- return ERR_PTR(-EFAULT);
57504+ return (const char __force_user *)ERR_PTR(-EFAULT);
57505
57506 return native;
57507 }
57508@@ -432,7 +473,7 @@ static int count(struct user_arg_ptr argv, int max)
57509 if (!p)
57510 break;
57511
57512- if (IS_ERR(p))
57513+ if (IS_ERR((const char __force_kernel *)p))
57514 return -EFAULT;
57515
57516 if (i >= max)
57517@@ -467,7 +508,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
57518
57519 ret = -EFAULT;
57520 str = get_user_arg_ptr(argv, argc);
57521- if (IS_ERR(str))
57522+ if (IS_ERR((const char __force_kernel *)str))
57523 goto out;
57524
57525 len = strnlen_user(str, MAX_ARG_STRLEN);
57526@@ -549,7 +590,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
57527 int r;
57528 mm_segment_t oldfs = get_fs();
57529 struct user_arg_ptr argv = {
57530- .ptr.native = (const char __user *const __user *)__argv,
57531+ .ptr.native = (const char __user * const __force_user *)__argv,
57532 };
57533
57534 set_fs(KERNEL_DS);
57535@@ -584,7 +625,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
57536 unsigned long new_end = old_end - shift;
57537 struct mmu_gather tlb;
57538
57539- BUG_ON(new_start > new_end);
57540+ if (new_start >= new_end || new_start < mmap_min_addr)
57541+ return -ENOMEM;
57542
57543 /*
57544 * ensure there are no vmas between where we want to go
57545@@ -593,6 +635,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
57546 if (vma != find_vma(mm, new_start))
57547 return -EFAULT;
57548
57549+#ifdef CONFIG_PAX_SEGMEXEC
57550+ BUG_ON(pax_find_mirror_vma(vma));
57551+#endif
57552+
57553 /*
57554 * cover the whole range: [new_start, old_end)
57555 */
57556@@ -673,10 +719,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
57557 stack_top = arch_align_stack(stack_top);
57558 stack_top = PAGE_ALIGN(stack_top);
57559
57560- if (unlikely(stack_top < mmap_min_addr) ||
57561- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
57562- return -ENOMEM;
57563-
57564 stack_shift = vma->vm_end - stack_top;
57565
57566 bprm->p -= stack_shift;
57567@@ -688,8 +730,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
57568 bprm->exec -= stack_shift;
57569
57570 down_write(&mm->mmap_sem);
57571+
57572+ /* Move stack pages down in memory. */
57573+ if (stack_shift) {
57574+ ret = shift_arg_pages(vma, stack_shift);
57575+ if (ret)
57576+ goto out_unlock;
57577+ }
57578+
57579 vm_flags = VM_STACK_FLAGS;
57580
57581+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
57582+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
57583+ vm_flags &= ~VM_EXEC;
57584+
57585+#ifdef CONFIG_PAX_MPROTECT
57586+ if (mm->pax_flags & MF_PAX_MPROTECT)
57587+ vm_flags &= ~VM_MAYEXEC;
57588+#endif
57589+
57590+ }
57591+#endif
57592+
57593 /*
57594 * Adjust stack execute permissions; explicitly enable for
57595 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
57596@@ -708,13 +770,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
57597 goto out_unlock;
57598 BUG_ON(prev != vma);
57599
57600- /* Move stack pages down in memory. */
57601- if (stack_shift) {
57602- ret = shift_arg_pages(vma, stack_shift);
57603- if (ret)
57604- goto out_unlock;
57605- }
57606-
57607 /* mprotect_fixup is overkill to remove the temporary stack flags */
57608 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
57609
57610@@ -738,6 +793,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
57611 #endif
57612 current->mm->start_stack = bprm->p;
57613 ret = expand_stack(vma, stack_base);
57614+
57615+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
57616+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
57617+ unsigned long size;
57618+ vm_flags_t vm_flags;
57619+
57620+ size = STACK_TOP - vma->vm_end;
57621+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
57622+
57623+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
57624+
57625+#ifdef CONFIG_X86
57626+ if (!ret) {
57627+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
57628+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
57629+ }
57630+#endif
57631+
57632+ }
57633+#endif
57634+
57635 if (ret)
57636 ret = -EFAULT;
57637
57638@@ -774,6 +850,8 @@ struct file *open_exec(const char *name)
57639
57640 fsnotify_open(file);
57641
57642+ trace_open_exec(name);
57643+
57644 err = deny_write_access(file);
57645 if (err)
57646 goto exit;
57647@@ -797,7 +875,7 @@ int kernel_read(struct file *file, loff_t offset,
57648 old_fs = get_fs();
57649 set_fs(get_ds());
57650 /* The cast to a user pointer is valid due to the set_fs() */
57651- result = vfs_read(file, (void __user *)addr, count, &pos);
57652+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
57653 set_fs(old_fs);
57654 return result;
57655 }
57656@@ -1253,7 +1331,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
57657 }
57658 rcu_read_unlock();
57659
57660- if (p->fs->users > n_fs) {
57661+ if (atomic_read(&p->fs->users) > n_fs) {
57662 bprm->unsafe |= LSM_UNSAFE_SHARE;
57663 } else {
57664 res = -EAGAIN;
57665@@ -1443,6 +1521,31 @@ static int exec_binprm(struct linux_binprm *bprm)
57666 return ret;
57667 }
57668
57669+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57670+static DEFINE_PER_CPU(u64, exec_counter);
57671+static int __init init_exec_counters(void)
57672+{
57673+ unsigned int cpu;
57674+
57675+ for_each_possible_cpu(cpu) {
57676+ per_cpu(exec_counter, cpu) = (u64)cpu;
57677+ }
57678+
57679+ return 0;
57680+}
57681+early_initcall(init_exec_counters);
57682+static inline void increment_exec_counter(void)
57683+{
57684+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
57685+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
57686+}
57687+#else
57688+static inline void increment_exec_counter(void) {}
57689+#endif
57690+
57691+extern void gr_handle_exec_args(struct linux_binprm *bprm,
57692+ struct user_arg_ptr argv);
57693+
57694 /*
57695 * sys_execve() executes a new program.
57696 */
57697@@ -1450,12 +1553,19 @@ static int do_execve_common(const char *filename,
57698 struct user_arg_ptr argv,
57699 struct user_arg_ptr envp)
57700 {
57701+#ifdef CONFIG_GRKERNSEC
57702+ struct file *old_exec_file;
57703+ struct acl_subject_label *old_acl;
57704+ struct rlimit old_rlim[RLIM_NLIMITS];
57705+#endif
57706 struct linux_binprm *bprm;
57707 struct file *file;
57708 struct files_struct *displaced;
57709 bool clear_in_exec;
57710 int retval;
57711
57712+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
57713+
57714 /*
57715 * We move the actual failure in case of RLIMIT_NPROC excess from
57716 * set*uid() to execve() because too many poorly written programs
57717@@ -1496,12 +1606,22 @@ static int do_execve_common(const char *filename,
57718 if (IS_ERR(file))
57719 goto out_unmark;
57720
57721+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
57722+ retval = -EPERM;
57723+ goto out_file;
57724+ }
57725+
57726 sched_exec();
57727
57728 bprm->file = file;
57729 bprm->filename = filename;
57730 bprm->interp = filename;
57731
57732+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
57733+ retval = -EACCES;
57734+ goto out_file;
57735+ }
57736+
57737 retval = bprm_mm_init(bprm);
57738 if (retval)
57739 goto out_file;
57740@@ -1518,24 +1638,70 @@ static int do_execve_common(const char *filename,
57741 if (retval < 0)
57742 goto out;
57743
57744+#ifdef CONFIG_GRKERNSEC
57745+ old_acl = current->acl;
57746+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
57747+ old_exec_file = current->exec_file;
57748+ get_file(file);
57749+ current->exec_file = file;
57750+#endif
57751+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57752+ /* limit suid stack to 8MB
57753+ * we saved the old limits above and will restore them if this exec fails
57754+ */
57755+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
57756+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
57757+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
57758+#endif
57759+
57760+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
57761+ retval = -EPERM;
57762+ goto out_fail;
57763+ }
57764+
57765+ if (!gr_tpe_allow(file)) {
57766+ retval = -EACCES;
57767+ goto out_fail;
57768+ }
57769+
57770+ if (gr_check_crash_exec(file)) {
57771+ retval = -EACCES;
57772+ goto out_fail;
57773+ }
57774+
57775+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
57776+ bprm->unsafe);
57777+ if (retval < 0)
57778+ goto out_fail;
57779+
57780 retval = copy_strings_kernel(1, &bprm->filename, bprm);
57781 if (retval < 0)
57782- goto out;
57783+ goto out_fail;
57784
57785 bprm->exec = bprm->p;
57786 retval = copy_strings(bprm->envc, envp, bprm);
57787 if (retval < 0)
57788- goto out;
57789+ goto out_fail;
57790
57791 retval = copy_strings(bprm->argc, argv, bprm);
57792 if (retval < 0)
57793- goto out;
57794+ goto out_fail;
57795+
57796+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
57797+
57798+ gr_handle_exec_args(bprm, argv);
57799
57800 retval = exec_binprm(bprm);
57801 if (retval < 0)
57802- goto out;
57803+ goto out_fail;
57804+#ifdef CONFIG_GRKERNSEC
57805+ if (old_exec_file)
57806+ fput(old_exec_file);
57807+#endif
57808
57809 /* execve succeeded */
57810+
57811+ increment_exec_counter();
57812 current->fs->in_exec = 0;
57813 current->in_execve = 0;
57814 acct_update_integrals(current);
57815@@ -1545,6 +1711,14 @@ static int do_execve_common(const char *filename,
57816 put_files_struct(displaced);
57817 return retval;
57818
57819+out_fail:
57820+#ifdef CONFIG_GRKERNSEC
57821+ current->acl = old_acl;
57822+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
57823+ fput(current->exec_file);
57824+ current->exec_file = old_exec_file;
57825+#endif
57826+
57827 out:
57828 if (bprm->mm) {
57829 acct_arg_size(bprm, 0);
57830@@ -1699,3 +1873,295 @@ asmlinkage long compat_sys_execve(const char __user * filename,
57831 return error;
57832 }
57833 #endif
57834+
57835+int pax_check_flags(unsigned long *flags)
57836+{
57837+ int retval = 0;
57838+
57839+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
57840+ if (*flags & MF_PAX_SEGMEXEC)
57841+ {
57842+ *flags &= ~MF_PAX_SEGMEXEC;
57843+ retval = -EINVAL;
57844+ }
57845+#endif
57846+
57847+ if ((*flags & MF_PAX_PAGEEXEC)
57848+
57849+#ifdef CONFIG_PAX_PAGEEXEC
57850+ && (*flags & MF_PAX_SEGMEXEC)
57851+#endif
57852+
57853+ )
57854+ {
57855+ *flags &= ~MF_PAX_PAGEEXEC;
57856+ retval = -EINVAL;
57857+ }
57858+
57859+ if ((*flags & MF_PAX_MPROTECT)
57860+
57861+#ifdef CONFIG_PAX_MPROTECT
57862+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
57863+#endif
57864+
57865+ )
57866+ {
57867+ *flags &= ~MF_PAX_MPROTECT;
57868+ retval = -EINVAL;
57869+ }
57870+
57871+ if ((*flags & MF_PAX_EMUTRAMP)
57872+
57873+#ifdef CONFIG_PAX_EMUTRAMP
57874+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
57875+#endif
57876+
57877+ )
57878+ {
57879+ *flags &= ~MF_PAX_EMUTRAMP;
57880+ retval = -EINVAL;
57881+ }
57882+
57883+ return retval;
57884+}
57885+
57886+EXPORT_SYMBOL(pax_check_flags);
57887+
57888+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
57889+char *pax_get_path(const struct path *path, char *buf, int buflen)
57890+{
57891+ char *pathname = d_path(path, buf, buflen);
57892+
57893+ if (IS_ERR(pathname))
57894+ goto toolong;
57895+
57896+ pathname = mangle_path(buf, pathname, "\t\n\\");
57897+ if (!pathname)
57898+ goto toolong;
57899+
57900+ *pathname = 0;
57901+ return buf;
57902+
57903+toolong:
57904+ return "<path too long>";
57905+}
57906+EXPORT_SYMBOL(pax_get_path);
57907+
57908+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
57909+{
57910+ struct task_struct *tsk = current;
57911+ struct mm_struct *mm = current->mm;
57912+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
57913+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
57914+ char *path_exec = NULL;
57915+ char *path_fault = NULL;
57916+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
57917+ siginfo_t info = { };
57918+
57919+ if (buffer_exec && buffer_fault) {
57920+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
57921+
57922+ down_read(&mm->mmap_sem);
57923+ vma = mm->mmap;
57924+ while (vma && (!vma_exec || !vma_fault)) {
57925+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
57926+ vma_exec = vma;
57927+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
57928+ vma_fault = vma;
57929+ vma = vma->vm_next;
57930+ }
57931+ if (vma_exec)
57932+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
57933+ if (vma_fault) {
57934+ start = vma_fault->vm_start;
57935+ end = vma_fault->vm_end;
57936+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
57937+ if (vma_fault->vm_file)
57938+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
57939+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
57940+ path_fault = "<heap>";
57941+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
57942+ path_fault = "<stack>";
57943+ else
57944+ path_fault = "<anonymous mapping>";
57945+ }
57946+ up_read(&mm->mmap_sem);
57947+ }
57948+ if (tsk->signal->curr_ip)
57949+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
57950+ else
57951+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
57952+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
57953+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
57954+ free_page((unsigned long)buffer_exec);
57955+ free_page((unsigned long)buffer_fault);
57956+ pax_report_insns(regs, pc, sp);
57957+ info.si_signo = SIGKILL;
57958+ info.si_errno = 0;
57959+ info.si_code = SI_KERNEL;
57960+ info.si_pid = 0;
57961+ info.si_uid = 0;
57962+ do_coredump(&info);
57963+}
57964+#endif
57965+
57966+#ifdef CONFIG_PAX_REFCOUNT
57967+void pax_report_refcount_overflow(struct pt_regs *regs)
57968+{
57969+ if (current->signal->curr_ip)
57970+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
57971+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
57972+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
57973+ else
57974+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
57975+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
57976+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
57977+ preempt_disable();
57978+ show_regs(regs);
57979+ preempt_enable();
57980+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
57981+}
57982+#endif
57983+
57984+#ifdef CONFIG_PAX_USERCOPY
57985+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
57986+static noinline int check_stack_object(const void *obj, unsigned long len)
57987+{
57988+ const void * const stack = task_stack_page(current);
57989+ const void * const stackend = stack + THREAD_SIZE;
57990+
57991+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
57992+ const void *frame = NULL;
57993+ const void *oldframe;
57994+#endif
57995+
57996+ if (obj + len < obj)
57997+ return -1;
57998+
57999+ if (obj + len <= stack || stackend <= obj)
58000+ return 0;
58001+
58002+ if (obj < stack || stackend < obj + len)
58003+ return -1;
58004+
58005+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58006+ oldframe = __builtin_frame_address(1);
58007+ if (oldframe)
58008+ frame = __builtin_frame_address(2);
58009+ /*
58010+ low ----------------------------------------------> high
58011+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
58012+ ^----------------^
58013+ allow copies only within here
58014+ */
58015+ while (stack <= frame && frame < stackend) {
58016+ /* if obj + len extends past the last frame, this
58017+ check won't pass and the next frame will be 0,
58018+ causing us to bail out and correctly report
58019+ the copy as invalid
58020+ */
58021+ if (obj + len <= frame)
58022+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
58023+ oldframe = frame;
58024+ frame = *(const void * const *)frame;
58025+ }
58026+ return -1;
58027+#else
58028+ return 1;
58029+#endif
58030+}
58031+
58032+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
58033+{
58034+ if (current->signal->curr_ip)
58035+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58036+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58037+ else
58038+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58039+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58040+ dump_stack();
58041+ gr_handle_kernel_exploit();
58042+ do_group_exit(SIGKILL);
58043+}
58044+#endif
58045+
58046+#ifdef CONFIG_PAX_USERCOPY
58047+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
58048+{
58049+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
58050+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
58051+#ifdef CONFIG_MODULES
58052+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
58053+#else
58054+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
58055+#endif
58056+
58057+#else
58058+ unsigned long textlow = (unsigned long)_stext;
58059+ unsigned long texthigh = (unsigned long)_etext;
58060+
58061+#ifdef CONFIG_X86_64
58062+ /* check against linear mapping as well */
58063+ if (high > (unsigned long)__va(__pa(textlow)) &&
58064+ low <= (unsigned long)__va(__pa(texthigh)))
58065+ return true;
58066+#endif
58067+
58068+#endif
58069+
58070+ if (high <= textlow || low > texthigh)
58071+ return false;
58072+ else
58073+ return true;
58074+}
58075+#endif
58076+
58077+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
58078+{
58079+
58080+#ifdef CONFIG_PAX_USERCOPY
58081+ const char *type;
58082+
58083+ if (!n)
58084+ return;
58085+
58086+ type = check_heap_object(ptr, n);
58087+ if (!type) {
58088+ int ret = check_stack_object(ptr, n);
58089+ if (ret == 1 || ret == 2)
58090+ return;
58091+ if (ret == 0) {
58092+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
58093+ type = "<kernel text>";
58094+ else
58095+ return;
58096+ } else
58097+ type = "<process stack>";
58098+ }
58099+
58100+ pax_report_usercopy(ptr, n, to_user, type);
58101+#endif
58102+
58103+}
58104+EXPORT_SYMBOL(__check_object_size);
58105+
58106+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
58107+void pax_track_stack(void)
58108+{
58109+ unsigned long sp = (unsigned long)&sp;
58110+ if (sp < current_thread_info()->lowest_stack &&
58111+ sp > (unsigned long)task_stack_page(current))
58112+ current_thread_info()->lowest_stack = sp;
58113+}
58114+EXPORT_SYMBOL(pax_track_stack);
58115+#endif
58116+
58117+#ifdef CONFIG_PAX_SIZE_OVERFLOW
58118+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
58119+{
58120+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
58121+ dump_stack();
58122+ do_group_exit(SIGKILL);
58123+}
58124+EXPORT_SYMBOL(report_size_overflow);
58125+#endif
58126diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
58127index 9f9992b..8b59411 100644
58128--- a/fs/ext2/balloc.c
58129+++ b/fs/ext2/balloc.c
58130@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
58131
58132 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
58133 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
58134- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
58135+ if (free_blocks < root_blocks + 1 &&
58136 !uid_eq(sbi->s_resuid, current_fsuid()) &&
58137 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
58138- !in_group_p (sbi->s_resgid))) {
58139+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
58140 return 0;
58141 }
58142 return 1;
58143diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
58144index 2d7557d..14e38f94 100644
58145--- a/fs/ext2/xattr.c
58146+++ b/fs/ext2/xattr.c
58147@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
58148 struct buffer_head *bh = NULL;
58149 struct ext2_xattr_entry *entry;
58150 char *end;
58151- size_t rest = buffer_size;
58152+ size_t rest = buffer_size, total_size = 0;
58153 int error;
58154
58155 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
58156@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
58157 buffer += size;
58158 }
58159 rest -= size;
58160+ total_size += size;
58161 }
58162 }
58163- error = buffer_size - rest; /* total size */
58164+ error = total_size;
58165
58166 cleanup:
58167 brelse(bh);
58168diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
58169index 22548f5..41521d8 100644
58170--- a/fs/ext3/balloc.c
58171+++ b/fs/ext3/balloc.c
58172@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
58173
58174 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
58175 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
58176- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
58177+ if (free_blocks < root_blocks + 1 &&
58178 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
58179 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
58180- !in_group_p (sbi->s_resgid))) {
58181+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
58182 return 0;
58183 }
58184 return 1;
58185diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
58186index b1fc963..881228c 100644
58187--- a/fs/ext3/xattr.c
58188+++ b/fs/ext3/xattr.c
58189@@ -330,7 +330,7 @@ static int
58190 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
58191 char *buffer, size_t buffer_size)
58192 {
58193- size_t rest = buffer_size;
58194+ size_t rest = buffer_size, total_size = 0;
58195
58196 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
58197 const struct xattr_handler *handler =
58198@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
58199 buffer += size;
58200 }
58201 rest -= size;
58202+ total_size += size;
58203 }
58204 }
58205- return buffer_size - rest;
58206+ return total_size;
58207 }
58208
58209 static int
58210diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
58211index 6ea7b14..8fa16d9 100644
58212--- a/fs/ext4/balloc.c
58213+++ b/fs/ext4/balloc.c
58214@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
58215 /* Hm, nope. Are (enough) root reserved clusters available? */
58216 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
58217 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
58218- capable(CAP_SYS_RESOURCE) ||
58219- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
58220+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
58221+ capable_nolog(CAP_SYS_RESOURCE)) {
58222
58223 if (free_clusters >= (nclusters + dirty_clusters +
58224 resv_clusters))
58225diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
58226index ece5556..e39d3a8 100644
58227--- a/fs/ext4/ext4.h
58228+++ b/fs/ext4/ext4.h
58229@@ -1267,19 +1267,19 @@ struct ext4_sb_info {
58230 unsigned long s_mb_last_start;
58231
58232 /* stats for buddy allocator */
58233- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
58234- atomic_t s_bal_success; /* we found long enough chunks */
58235- atomic_t s_bal_allocated; /* in blocks */
58236- atomic_t s_bal_ex_scanned; /* total extents scanned */
58237- atomic_t s_bal_goals; /* goal hits */
58238- atomic_t s_bal_breaks; /* too long searches */
58239- atomic_t s_bal_2orders; /* 2^order hits */
58240+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
58241+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
58242+ atomic_unchecked_t s_bal_allocated; /* in blocks */
58243+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
58244+ atomic_unchecked_t s_bal_goals; /* goal hits */
58245+ atomic_unchecked_t s_bal_breaks; /* too long searches */
58246+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
58247 spinlock_t s_bal_lock;
58248 unsigned long s_mb_buddies_generated;
58249 unsigned long long s_mb_generation_time;
58250- atomic_t s_mb_lost_chunks;
58251- atomic_t s_mb_preallocated;
58252- atomic_t s_mb_discarded;
58253+ atomic_unchecked_t s_mb_lost_chunks;
58254+ atomic_unchecked_t s_mb_preallocated;
58255+ atomic_unchecked_t s_mb_discarded;
58256 atomic_t s_lock_busy;
58257
58258 /* locality groups */
58259diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
58260index 04a5c75..09894fa 100644
58261--- a/fs/ext4/mballoc.c
58262+++ b/fs/ext4/mballoc.c
58263@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
58264 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
58265
58266 if (EXT4_SB(sb)->s_mb_stats)
58267- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
58268+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
58269
58270 break;
58271 }
58272@@ -2189,7 +2189,7 @@ repeat:
58273 ac->ac_status = AC_STATUS_CONTINUE;
58274 ac->ac_flags |= EXT4_MB_HINT_FIRST;
58275 cr = 3;
58276- atomic_inc(&sbi->s_mb_lost_chunks);
58277+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
58278 goto repeat;
58279 }
58280 }
58281@@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
58282 if (sbi->s_mb_stats) {
58283 ext4_msg(sb, KERN_INFO,
58284 "mballoc: %u blocks %u reqs (%u success)",
58285- atomic_read(&sbi->s_bal_allocated),
58286- atomic_read(&sbi->s_bal_reqs),
58287- atomic_read(&sbi->s_bal_success));
58288+ atomic_read_unchecked(&sbi->s_bal_allocated),
58289+ atomic_read_unchecked(&sbi->s_bal_reqs),
58290+ atomic_read_unchecked(&sbi->s_bal_success));
58291 ext4_msg(sb, KERN_INFO,
58292 "mballoc: %u extents scanned, %u goal hits, "
58293 "%u 2^N hits, %u breaks, %u lost",
58294- atomic_read(&sbi->s_bal_ex_scanned),
58295- atomic_read(&sbi->s_bal_goals),
58296- atomic_read(&sbi->s_bal_2orders),
58297- atomic_read(&sbi->s_bal_breaks),
58298- atomic_read(&sbi->s_mb_lost_chunks));
58299+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
58300+ atomic_read_unchecked(&sbi->s_bal_goals),
58301+ atomic_read_unchecked(&sbi->s_bal_2orders),
58302+ atomic_read_unchecked(&sbi->s_bal_breaks),
58303+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
58304 ext4_msg(sb, KERN_INFO,
58305 "mballoc: %lu generated and it took %Lu",
58306 sbi->s_mb_buddies_generated,
58307 sbi->s_mb_generation_time);
58308 ext4_msg(sb, KERN_INFO,
58309 "mballoc: %u preallocated, %u discarded",
58310- atomic_read(&sbi->s_mb_preallocated),
58311- atomic_read(&sbi->s_mb_discarded));
58312+ atomic_read_unchecked(&sbi->s_mb_preallocated),
58313+ atomic_read_unchecked(&sbi->s_mb_discarded));
58314 }
58315
58316 free_percpu(sbi->s_locality_groups);
58317@@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
58318 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
58319
58320 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
58321- atomic_inc(&sbi->s_bal_reqs);
58322- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
58323+ atomic_inc_unchecked(&sbi->s_bal_reqs);
58324+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
58325 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
58326- atomic_inc(&sbi->s_bal_success);
58327- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
58328+ atomic_inc_unchecked(&sbi->s_bal_success);
58329+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
58330 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
58331 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
58332- atomic_inc(&sbi->s_bal_goals);
58333+ atomic_inc_unchecked(&sbi->s_bal_goals);
58334 if (ac->ac_found > sbi->s_mb_max_to_scan)
58335- atomic_inc(&sbi->s_bal_breaks);
58336+ atomic_inc_unchecked(&sbi->s_bal_breaks);
58337 }
58338
58339 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
58340@@ -3583,7 +3583,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
58341 trace_ext4_mb_new_inode_pa(ac, pa);
58342
58343 ext4_mb_use_inode_pa(ac, pa);
58344- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
58345+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
58346
58347 ei = EXT4_I(ac->ac_inode);
58348 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
58349@@ -3643,7 +3643,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
58350 trace_ext4_mb_new_group_pa(ac, pa);
58351
58352 ext4_mb_use_group_pa(ac, pa);
58353- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
58354+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
58355
58356 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
58357 lg = ac->ac_lg;
58358@@ -3732,7 +3732,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
58359 * from the bitmap and continue.
58360 */
58361 }
58362- atomic_add(free, &sbi->s_mb_discarded);
58363+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
58364
58365 return err;
58366 }
58367@@ -3750,7 +3750,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
58368 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
58369 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
58370 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
58371- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
58372+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
58373 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
58374
58375 return 0;
58376diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
58377index 04434ad..6404663 100644
58378--- a/fs/ext4/mmp.c
58379+++ b/fs/ext4/mmp.c
58380@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
58381 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
58382 const char *function, unsigned int line, const char *msg)
58383 {
58384- __ext4_warning(sb, function, line, msg);
58385+ __ext4_warning(sb, function, line, "%s", msg);
58386 __ext4_warning(sb, function, line,
58387 "MMP failure info: last update time: %llu, last update "
58388 "node: %s, last update device: %s\n",
58389diff --git a/fs/ext4/super.c b/fs/ext4/super.c
58390index 1f7784d..5d8bbad 100644
58391--- a/fs/ext4/super.c
58392+++ b/fs/ext4/super.c
58393@@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data)
58394 }
58395
58396 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
58397-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
58398+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
58399 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
58400
58401 #ifdef CONFIG_QUOTA
58402@@ -2450,7 +2450,7 @@ struct ext4_attr {
58403 int offset;
58404 int deprecated_val;
58405 } u;
58406-};
58407+} __do_const;
58408
58409 static int parse_strtoull(const char *buf,
58410 unsigned long long max, unsigned long long *value)
58411diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
58412index 1423c48..9c0c6dc 100644
58413--- a/fs/ext4/xattr.c
58414+++ b/fs/ext4/xattr.c
58415@@ -381,7 +381,7 @@ static int
58416 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
58417 char *buffer, size_t buffer_size)
58418 {
58419- size_t rest = buffer_size;
58420+ size_t rest = buffer_size, total_size = 0;
58421
58422 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
58423 const struct xattr_handler *handler =
58424@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
58425 buffer += size;
58426 }
58427 rest -= size;
58428+ total_size += size;
58429 }
58430 }
58431- return buffer_size - rest;
58432+ return total_size;
58433 }
58434
58435 static int
58436diff --git a/fs/fcntl.c b/fs/fcntl.c
58437index ef68665..5deacdc 100644
58438--- a/fs/fcntl.c
58439+++ b/fs/fcntl.c
58440@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
58441 if (err)
58442 return err;
58443
58444+ if (gr_handle_chroot_fowner(pid, type))
58445+ return -ENOENT;
58446+ if (gr_check_protected_task_fowner(pid, type))
58447+ return -EACCES;
58448+
58449 f_modown(filp, pid, type, force);
58450 return 0;
58451 }
58452diff --git a/fs/fhandle.c b/fs/fhandle.c
58453index 999ff5c..41f4109 100644
58454--- a/fs/fhandle.c
58455+++ b/fs/fhandle.c
58456@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
58457 } else
58458 retval = 0;
58459 /* copy the mount id */
58460- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
58461- sizeof(*mnt_id)) ||
58462+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
58463 copy_to_user(ufh, handle,
58464 sizeof(struct file_handle) + handle_bytes))
58465 retval = -EFAULT;
58466diff --git a/fs/file.c b/fs/file.c
58467index 4a78f98..f9a6d25 100644
58468--- a/fs/file.c
58469+++ b/fs/file.c
58470@@ -16,6 +16,7 @@
58471 #include <linux/slab.h>
58472 #include <linux/vmalloc.h>
58473 #include <linux/file.h>
58474+#include <linux/security.h>
58475 #include <linux/fdtable.h>
58476 #include <linux/bitops.h>
58477 #include <linux/interrupt.h>
58478@@ -141,7 +142,7 @@ out:
58479 * Return <0 error code on error; 1 on successful completion.
58480 * The files->file_lock should be held on entry, and will be held on exit.
58481 */
58482-static int expand_fdtable(struct files_struct *files, int nr)
58483+static int expand_fdtable(struct files_struct *files, unsigned int nr)
58484 __releases(files->file_lock)
58485 __acquires(files->file_lock)
58486 {
58487@@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
58488 * expanded and execution may have blocked.
58489 * The files->file_lock should be held on entry, and will be held on exit.
58490 */
58491-static int expand_files(struct files_struct *files, int nr)
58492+static int expand_files(struct files_struct *files, unsigned int nr)
58493 {
58494 struct fdtable *fdt;
58495
58496@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
58497 if (!file)
58498 return __close_fd(files, fd);
58499
58500+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
58501 if (fd >= rlimit(RLIMIT_NOFILE))
58502 return -EBADF;
58503
58504@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
58505 if (unlikely(oldfd == newfd))
58506 return -EINVAL;
58507
58508+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
58509 if (newfd >= rlimit(RLIMIT_NOFILE))
58510 return -EBADF;
58511
58512@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
58513 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
58514 {
58515 int err;
58516+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
58517 if (from >= rlimit(RLIMIT_NOFILE))
58518 return -EINVAL;
58519 err = alloc_fd(from, flags);
58520diff --git a/fs/filesystems.c b/fs/filesystems.c
58521index 92567d9..fcd8cbf 100644
58522--- a/fs/filesystems.c
58523+++ b/fs/filesystems.c
58524@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
58525 int len = dot ? dot - name : strlen(name);
58526
58527 fs = __get_fs_type(name, len);
58528+#ifdef CONFIG_GRKERNSEC_MODHARDEN
58529+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
58530+#else
58531 if (!fs && (request_module("fs-%.*s", len, name) == 0))
58532+#endif
58533 fs = __get_fs_type(name, len);
58534
58535 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
58536diff --git a/fs/fs_struct.c b/fs/fs_struct.c
58537index 7dca743..543d620 100644
58538--- a/fs/fs_struct.c
58539+++ b/fs/fs_struct.c
58540@@ -4,6 +4,7 @@
58541 #include <linux/path.h>
58542 #include <linux/slab.h>
58543 #include <linux/fs_struct.h>
58544+#include <linux/grsecurity.h>
58545 #include "internal.h"
58546
58547 /*
58548@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
58549 write_seqcount_begin(&fs->seq);
58550 old_root = fs->root;
58551 fs->root = *path;
58552+ gr_set_chroot_entries(current, path);
58553 write_seqcount_end(&fs->seq);
58554 spin_unlock(&fs->lock);
58555 if (old_root.dentry)
58556@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
58557 int hits = 0;
58558 spin_lock(&fs->lock);
58559 write_seqcount_begin(&fs->seq);
58560+ /* this root replacement is only done by pivot_root,
58561+ leave grsec's chroot tagging alone for this task
58562+ so that a pivoted root isn't treated as a chroot
58563+ */
58564 hits += replace_path(&fs->root, old_root, new_root);
58565 hits += replace_path(&fs->pwd, old_root, new_root);
58566 write_seqcount_end(&fs->seq);
58567@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
58568 task_lock(tsk);
58569 spin_lock(&fs->lock);
58570 tsk->fs = NULL;
58571- kill = !--fs->users;
58572+ gr_clear_chroot_entries(tsk);
58573+ kill = !atomic_dec_return(&fs->users);
58574 spin_unlock(&fs->lock);
58575 task_unlock(tsk);
58576 if (kill)
58577@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
58578 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
58579 /* We don't need to lock fs - think why ;-) */
58580 if (fs) {
58581- fs->users = 1;
58582+ atomic_set(&fs->users, 1);
58583 fs->in_exec = 0;
58584 spin_lock_init(&fs->lock);
58585 seqcount_init(&fs->seq);
58586@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
58587 spin_lock(&old->lock);
58588 fs->root = old->root;
58589 path_get(&fs->root);
58590+ /* instead of calling gr_set_chroot_entries here,
58591+ we call it from every caller of this function
58592+ */
58593 fs->pwd = old->pwd;
58594 path_get(&fs->pwd);
58595 spin_unlock(&old->lock);
58596@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
58597
58598 task_lock(current);
58599 spin_lock(&fs->lock);
58600- kill = !--fs->users;
58601+ kill = !atomic_dec_return(&fs->users);
58602 current->fs = new_fs;
58603+ gr_set_chroot_entries(current, &new_fs->root);
58604 spin_unlock(&fs->lock);
58605 task_unlock(current);
58606
58607@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
58608
58609 int current_umask(void)
58610 {
58611- return current->fs->umask;
58612+ return current->fs->umask | gr_acl_umask();
58613 }
58614 EXPORT_SYMBOL(current_umask);
58615
58616 /* to be mentioned only in INIT_TASK */
58617 struct fs_struct init_fs = {
58618- .users = 1,
58619+ .users = ATOMIC_INIT(1),
58620 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
58621 .seq = SEQCNT_ZERO(init_fs.seq),
58622 .umask = 0022,
58623diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
58624index 29d7feb..303644d 100644
58625--- a/fs/fscache/cookie.c
58626+++ b/fs/fscache/cookie.c
58627@@ -19,7 +19,7 @@
58628
58629 struct kmem_cache *fscache_cookie_jar;
58630
58631-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
58632+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
58633
58634 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
58635 static int fscache_alloc_object(struct fscache_cache *cache,
58636@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
58637 parent ? (char *) parent->def->name : "<no-parent>",
58638 def->name, netfs_data, enable);
58639
58640- fscache_stat(&fscache_n_acquires);
58641+ fscache_stat_unchecked(&fscache_n_acquires);
58642
58643 /* if there's no parent cookie, then we don't create one here either */
58644 if (!parent) {
58645- fscache_stat(&fscache_n_acquires_null);
58646+ fscache_stat_unchecked(&fscache_n_acquires_null);
58647 _leave(" [no parent]");
58648 return NULL;
58649 }
58650@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
58651 /* allocate and initialise a cookie */
58652 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
58653 if (!cookie) {
58654- fscache_stat(&fscache_n_acquires_oom);
58655+ fscache_stat_unchecked(&fscache_n_acquires_oom);
58656 _leave(" [ENOMEM]");
58657 return NULL;
58658 }
58659@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
58660
58661 switch (cookie->def->type) {
58662 case FSCACHE_COOKIE_TYPE_INDEX:
58663- fscache_stat(&fscache_n_cookie_index);
58664+ fscache_stat_unchecked(&fscache_n_cookie_index);
58665 break;
58666 case FSCACHE_COOKIE_TYPE_DATAFILE:
58667- fscache_stat(&fscache_n_cookie_data);
58668+ fscache_stat_unchecked(&fscache_n_cookie_data);
58669 break;
58670 default:
58671- fscache_stat(&fscache_n_cookie_special);
58672+ fscache_stat_unchecked(&fscache_n_cookie_special);
58673 break;
58674 }
58675
58676@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
58677 } else {
58678 atomic_dec(&parent->n_children);
58679 __fscache_cookie_put(cookie);
58680- fscache_stat(&fscache_n_acquires_nobufs);
58681+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
58682 _leave(" = NULL");
58683 return NULL;
58684 }
58685@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
58686 }
58687 }
58688
58689- fscache_stat(&fscache_n_acquires_ok);
58690+ fscache_stat_unchecked(&fscache_n_acquires_ok);
58691 _leave(" = %p", cookie);
58692 return cookie;
58693 }
58694@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
58695 cache = fscache_select_cache_for_object(cookie->parent);
58696 if (!cache) {
58697 up_read(&fscache_addremove_sem);
58698- fscache_stat(&fscache_n_acquires_no_cache);
58699+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
58700 _leave(" = -ENOMEDIUM [no cache]");
58701 return -ENOMEDIUM;
58702 }
58703@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
58704 object = cache->ops->alloc_object(cache, cookie);
58705 fscache_stat_d(&fscache_n_cop_alloc_object);
58706 if (IS_ERR(object)) {
58707- fscache_stat(&fscache_n_object_no_alloc);
58708+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
58709 ret = PTR_ERR(object);
58710 goto error;
58711 }
58712
58713- fscache_stat(&fscache_n_object_alloc);
58714+ fscache_stat_unchecked(&fscache_n_object_alloc);
58715
58716- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
58717+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
58718
58719 _debug("ALLOC OBJ%x: %s {%lx}",
58720 object->debug_id, cookie->def->name, object->events);
58721@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
58722
58723 _enter("{%s}", cookie->def->name);
58724
58725- fscache_stat(&fscache_n_invalidates);
58726+ fscache_stat_unchecked(&fscache_n_invalidates);
58727
58728 /* Only permit invalidation of data files. Invalidating an index will
58729 * require the caller to release all its attachments to the tree rooted
58730@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
58731 {
58732 struct fscache_object *object;
58733
58734- fscache_stat(&fscache_n_updates);
58735+ fscache_stat_unchecked(&fscache_n_updates);
58736
58737 if (!cookie) {
58738- fscache_stat(&fscache_n_updates_null);
58739+ fscache_stat_unchecked(&fscache_n_updates_null);
58740 _leave(" [no cookie]");
58741 return;
58742 }
58743@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
58744 */
58745 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
58746 {
58747- fscache_stat(&fscache_n_relinquishes);
58748+ fscache_stat_unchecked(&fscache_n_relinquishes);
58749 if (retire)
58750- fscache_stat(&fscache_n_relinquishes_retire);
58751+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
58752
58753 if (!cookie) {
58754- fscache_stat(&fscache_n_relinquishes_null);
58755+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
58756 _leave(" [no cookie]");
58757 return;
58758 }
58759@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
58760 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
58761 goto inconsistent;
58762
58763- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58764+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58765
58766 __fscache_use_cookie(cookie);
58767 if (fscache_submit_op(object, op) < 0)
58768diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
58769index 4226f66..0fb3f45 100644
58770--- a/fs/fscache/internal.h
58771+++ b/fs/fscache/internal.h
58772@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
58773 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
58774 extern int fscache_wait_for_operation_activation(struct fscache_object *,
58775 struct fscache_operation *,
58776- atomic_t *,
58777- atomic_t *,
58778+ atomic_unchecked_t *,
58779+ atomic_unchecked_t *,
58780 void (*)(struct fscache_operation *));
58781 extern void fscache_invalidate_writes(struct fscache_cookie *);
58782
58783@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
58784 * stats.c
58785 */
58786 #ifdef CONFIG_FSCACHE_STATS
58787-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
58788-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
58789+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
58790+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
58791
58792-extern atomic_t fscache_n_op_pend;
58793-extern atomic_t fscache_n_op_run;
58794-extern atomic_t fscache_n_op_enqueue;
58795-extern atomic_t fscache_n_op_deferred_release;
58796-extern atomic_t fscache_n_op_release;
58797-extern atomic_t fscache_n_op_gc;
58798-extern atomic_t fscache_n_op_cancelled;
58799-extern atomic_t fscache_n_op_rejected;
58800+extern atomic_unchecked_t fscache_n_op_pend;
58801+extern atomic_unchecked_t fscache_n_op_run;
58802+extern atomic_unchecked_t fscache_n_op_enqueue;
58803+extern atomic_unchecked_t fscache_n_op_deferred_release;
58804+extern atomic_unchecked_t fscache_n_op_release;
58805+extern atomic_unchecked_t fscache_n_op_gc;
58806+extern atomic_unchecked_t fscache_n_op_cancelled;
58807+extern atomic_unchecked_t fscache_n_op_rejected;
58808
58809-extern atomic_t fscache_n_attr_changed;
58810-extern atomic_t fscache_n_attr_changed_ok;
58811-extern atomic_t fscache_n_attr_changed_nobufs;
58812-extern atomic_t fscache_n_attr_changed_nomem;
58813-extern atomic_t fscache_n_attr_changed_calls;
58814+extern atomic_unchecked_t fscache_n_attr_changed;
58815+extern atomic_unchecked_t fscache_n_attr_changed_ok;
58816+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
58817+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
58818+extern atomic_unchecked_t fscache_n_attr_changed_calls;
58819
58820-extern atomic_t fscache_n_allocs;
58821-extern atomic_t fscache_n_allocs_ok;
58822-extern atomic_t fscache_n_allocs_wait;
58823-extern atomic_t fscache_n_allocs_nobufs;
58824-extern atomic_t fscache_n_allocs_intr;
58825-extern atomic_t fscache_n_allocs_object_dead;
58826-extern atomic_t fscache_n_alloc_ops;
58827-extern atomic_t fscache_n_alloc_op_waits;
58828+extern atomic_unchecked_t fscache_n_allocs;
58829+extern atomic_unchecked_t fscache_n_allocs_ok;
58830+extern atomic_unchecked_t fscache_n_allocs_wait;
58831+extern atomic_unchecked_t fscache_n_allocs_nobufs;
58832+extern atomic_unchecked_t fscache_n_allocs_intr;
58833+extern atomic_unchecked_t fscache_n_allocs_object_dead;
58834+extern atomic_unchecked_t fscache_n_alloc_ops;
58835+extern atomic_unchecked_t fscache_n_alloc_op_waits;
58836
58837-extern atomic_t fscache_n_retrievals;
58838-extern atomic_t fscache_n_retrievals_ok;
58839-extern atomic_t fscache_n_retrievals_wait;
58840-extern atomic_t fscache_n_retrievals_nodata;
58841-extern atomic_t fscache_n_retrievals_nobufs;
58842-extern atomic_t fscache_n_retrievals_intr;
58843-extern atomic_t fscache_n_retrievals_nomem;
58844-extern atomic_t fscache_n_retrievals_object_dead;
58845-extern atomic_t fscache_n_retrieval_ops;
58846-extern atomic_t fscache_n_retrieval_op_waits;
58847+extern atomic_unchecked_t fscache_n_retrievals;
58848+extern atomic_unchecked_t fscache_n_retrievals_ok;
58849+extern atomic_unchecked_t fscache_n_retrievals_wait;
58850+extern atomic_unchecked_t fscache_n_retrievals_nodata;
58851+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
58852+extern atomic_unchecked_t fscache_n_retrievals_intr;
58853+extern atomic_unchecked_t fscache_n_retrievals_nomem;
58854+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
58855+extern atomic_unchecked_t fscache_n_retrieval_ops;
58856+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
58857
58858-extern atomic_t fscache_n_stores;
58859-extern atomic_t fscache_n_stores_ok;
58860-extern atomic_t fscache_n_stores_again;
58861-extern atomic_t fscache_n_stores_nobufs;
58862-extern atomic_t fscache_n_stores_oom;
58863-extern atomic_t fscache_n_store_ops;
58864-extern atomic_t fscache_n_store_calls;
58865-extern atomic_t fscache_n_store_pages;
58866-extern atomic_t fscache_n_store_radix_deletes;
58867-extern atomic_t fscache_n_store_pages_over_limit;
58868+extern atomic_unchecked_t fscache_n_stores;
58869+extern atomic_unchecked_t fscache_n_stores_ok;
58870+extern atomic_unchecked_t fscache_n_stores_again;
58871+extern atomic_unchecked_t fscache_n_stores_nobufs;
58872+extern atomic_unchecked_t fscache_n_stores_oom;
58873+extern atomic_unchecked_t fscache_n_store_ops;
58874+extern atomic_unchecked_t fscache_n_store_calls;
58875+extern atomic_unchecked_t fscache_n_store_pages;
58876+extern atomic_unchecked_t fscache_n_store_radix_deletes;
58877+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
58878
58879-extern atomic_t fscache_n_store_vmscan_not_storing;
58880-extern atomic_t fscache_n_store_vmscan_gone;
58881-extern atomic_t fscache_n_store_vmscan_busy;
58882-extern atomic_t fscache_n_store_vmscan_cancelled;
58883-extern atomic_t fscache_n_store_vmscan_wait;
58884+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
58885+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
58886+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
58887+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
58888+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
58889
58890-extern atomic_t fscache_n_marks;
58891-extern atomic_t fscache_n_uncaches;
58892+extern atomic_unchecked_t fscache_n_marks;
58893+extern atomic_unchecked_t fscache_n_uncaches;
58894
58895-extern atomic_t fscache_n_acquires;
58896-extern atomic_t fscache_n_acquires_null;
58897-extern atomic_t fscache_n_acquires_no_cache;
58898-extern atomic_t fscache_n_acquires_ok;
58899-extern atomic_t fscache_n_acquires_nobufs;
58900-extern atomic_t fscache_n_acquires_oom;
58901+extern atomic_unchecked_t fscache_n_acquires;
58902+extern atomic_unchecked_t fscache_n_acquires_null;
58903+extern atomic_unchecked_t fscache_n_acquires_no_cache;
58904+extern atomic_unchecked_t fscache_n_acquires_ok;
58905+extern atomic_unchecked_t fscache_n_acquires_nobufs;
58906+extern atomic_unchecked_t fscache_n_acquires_oom;
58907
58908-extern atomic_t fscache_n_invalidates;
58909-extern atomic_t fscache_n_invalidates_run;
58910+extern atomic_unchecked_t fscache_n_invalidates;
58911+extern atomic_unchecked_t fscache_n_invalidates_run;
58912
58913-extern atomic_t fscache_n_updates;
58914-extern atomic_t fscache_n_updates_null;
58915-extern atomic_t fscache_n_updates_run;
58916+extern atomic_unchecked_t fscache_n_updates;
58917+extern atomic_unchecked_t fscache_n_updates_null;
58918+extern atomic_unchecked_t fscache_n_updates_run;
58919
58920-extern atomic_t fscache_n_relinquishes;
58921-extern atomic_t fscache_n_relinquishes_null;
58922-extern atomic_t fscache_n_relinquishes_waitcrt;
58923-extern atomic_t fscache_n_relinquishes_retire;
58924+extern atomic_unchecked_t fscache_n_relinquishes;
58925+extern atomic_unchecked_t fscache_n_relinquishes_null;
58926+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
58927+extern atomic_unchecked_t fscache_n_relinquishes_retire;
58928
58929-extern atomic_t fscache_n_cookie_index;
58930-extern atomic_t fscache_n_cookie_data;
58931-extern atomic_t fscache_n_cookie_special;
58932+extern atomic_unchecked_t fscache_n_cookie_index;
58933+extern atomic_unchecked_t fscache_n_cookie_data;
58934+extern atomic_unchecked_t fscache_n_cookie_special;
58935
58936-extern atomic_t fscache_n_object_alloc;
58937-extern atomic_t fscache_n_object_no_alloc;
58938-extern atomic_t fscache_n_object_lookups;
58939-extern atomic_t fscache_n_object_lookups_negative;
58940-extern atomic_t fscache_n_object_lookups_positive;
58941-extern atomic_t fscache_n_object_lookups_timed_out;
58942-extern atomic_t fscache_n_object_created;
58943-extern atomic_t fscache_n_object_avail;
58944-extern atomic_t fscache_n_object_dead;
58945+extern atomic_unchecked_t fscache_n_object_alloc;
58946+extern atomic_unchecked_t fscache_n_object_no_alloc;
58947+extern atomic_unchecked_t fscache_n_object_lookups;
58948+extern atomic_unchecked_t fscache_n_object_lookups_negative;
58949+extern atomic_unchecked_t fscache_n_object_lookups_positive;
58950+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
58951+extern atomic_unchecked_t fscache_n_object_created;
58952+extern atomic_unchecked_t fscache_n_object_avail;
58953+extern atomic_unchecked_t fscache_n_object_dead;
58954
58955-extern atomic_t fscache_n_checkaux_none;
58956-extern atomic_t fscache_n_checkaux_okay;
58957-extern atomic_t fscache_n_checkaux_update;
58958-extern atomic_t fscache_n_checkaux_obsolete;
58959+extern atomic_unchecked_t fscache_n_checkaux_none;
58960+extern atomic_unchecked_t fscache_n_checkaux_okay;
58961+extern atomic_unchecked_t fscache_n_checkaux_update;
58962+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
58963
58964 extern atomic_t fscache_n_cop_alloc_object;
58965 extern atomic_t fscache_n_cop_lookup_object;
58966@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
58967 atomic_inc(stat);
58968 }
58969
58970+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
58971+{
58972+ atomic_inc_unchecked(stat);
58973+}
58974+
58975 static inline void fscache_stat_d(atomic_t *stat)
58976 {
58977 atomic_dec(stat);
58978@@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
58979
58980 #define __fscache_stat(stat) (NULL)
58981 #define fscache_stat(stat) do {} while (0)
58982+#define fscache_stat_unchecked(stat) do {} while (0)
58983 #define fscache_stat_d(stat) do {} while (0)
58984 #endif
58985
58986diff --git a/fs/fscache/object.c b/fs/fscache/object.c
58987index 53d35c5..5d68ed4 100644
58988--- a/fs/fscache/object.c
58989+++ b/fs/fscache/object.c
58990@@ -451,7 +451,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
58991 _debug("LOOKUP \"%s\" in \"%s\"",
58992 cookie->def->name, object->cache->tag->name);
58993
58994- fscache_stat(&fscache_n_object_lookups);
58995+ fscache_stat_unchecked(&fscache_n_object_lookups);
58996 fscache_stat(&fscache_n_cop_lookup_object);
58997 ret = object->cache->ops->lookup_object(object);
58998 fscache_stat_d(&fscache_n_cop_lookup_object);
58999@@ -461,7 +461,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59000 if (ret == -ETIMEDOUT) {
59001 /* probably stuck behind another object, so move this one to
59002 * the back of the queue */
59003- fscache_stat(&fscache_n_object_lookups_timed_out);
59004+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
59005 _leave(" [timeout]");
59006 return NO_TRANSIT;
59007 }
59008@@ -489,7 +489,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
59009 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
59010
59011 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59012- fscache_stat(&fscache_n_object_lookups_negative);
59013+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
59014
59015 /* Allow write requests to begin stacking up and read requests to begin
59016 * returning ENODATA.
59017@@ -524,7 +524,7 @@ void fscache_obtained_object(struct fscache_object *object)
59018 /* if we were still looking up, then we must have a positive lookup
59019 * result, in which case there may be data available */
59020 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59021- fscache_stat(&fscache_n_object_lookups_positive);
59022+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
59023
59024 /* We do (presumably) have data */
59025 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
59026@@ -536,7 +536,7 @@ void fscache_obtained_object(struct fscache_object *object)
59027 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
59028 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
59029 } else {
59030- fscache_stat(&fscache_n_object_created);
59031+ fscache_stat_unchecked(&fscache_n_object_created);
59032 }
59033
59034 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
59035@@ -572,7 +572,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
59036 fscache_stat_d(&fscache_n_cop_lookup_complete);
59037
59038 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
59039- fscache_stat(&fscache_n_object_avail);
59040+ fscache_stat_unchecked(&fscache_n_object_avail);
59041
59042 _leave("");
59043 return transit_to(JUMPSTART_DEPS);
59044@@ -719,7 +719,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
59045
59046 /* this just shifts the object release to the work processor */
59047 fscache_put_object(object);
59048- fscache_stat(&fscache_n_object_dead);
59049+ fscache_stat_unchecked(&fscache_n_object_dead);
59050
59051 _leave("");
59052 return transit_to(OBJECT_DEAD);
59053@@ -884,7 +884,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59054 enum fscache_checkaux result;
59055
59056 if (!object->cookie->def->check_aux) {
59057- fscache_stat(&fscache_n_checkaux_none);
59058+ fscache_stat_unchecked(&fscache_n_checkaux_none);
59059 return FSCACHE_CHECKAUX_OKAY;
59060 }
59061
59062@@ -893,17 +893,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59063 switch (result) {
59064 /* entry okay as is */
59065 case FSCACHE_CHECKAUX_OKAY:
59066- fscache_stat(&fscache_n_checkaux_okay);
59067+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
59068 break;
59069
59070 /* entry requires update */
59071 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
59072- fscache_stat(&fscache_n_checkaux_update);
59073+ fscache_stat_unchecked(&fscache_n_checkaux_update);
59074 break;
59075
59076 /* entry requires deletion */
59077 case FSCACHE_CHECKAUX_OBSOLETE:
59078- fscache_stat(&fscache_n_checkaux_obsolete);
59079+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
59080 break;
59081
59082 default:
59083@@ -989,7 +989,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
59084 {
59085 const struct fscache_state *s;
59086
59087- fscache_stat(&fscache_n_invalidates_run);
59088+ fscache_stat_unchecked(&fscache_n_invalidates_run);
59089 fscache_stat(&fscache_n_cop_invalidate_object);
59090 s = _fscache_invalidate_object(object, event);
59091 fscache_stat_d(&fscache_n_cop_invalidate_object);
59092@@ -1004,7 +1004,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
59093 {
59094 _enter("{OBJ%x},%d", object->debug_id, event);
59095
59096- fscache_stat(&fscache_n_updates_run);
59097+ fscache_stat_unchecked(&fscache_n_updates_run);
59098 fscache_stat(&fscache_n_cop_update_object);
59099 object->cache->ops->update_object(object);
59100 fscache_stat_d(&fscache_n_cop_update_object);
59101diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
59102index 318071a..379938b 100644
59103--- a/fs/fscache/operation.c
59104+++ b/fs/fscache/operation.c
59105@@ -17,7 +17,7 @@
59106 #include <linux/slab.h>
59107 #include "internal.h"
59108
59109-atomic_t fscache_op_debug_id;
59110+atomic_unchecked_t fscache_op_debug_id;
59111 EXPORT_SYMBOL(fscache_op_debug_id);
59112
59113 /**
59114@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
59115 ASSERTCMP(atomic_read(&op->usage), >, 0);
59116 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
59117
59118- fscache_stat(&fscache_n_op_enqueue);
59119+ fscache_stat_unchecked(&fscache_n_op_enqueue);
59120 switch (op->flags & FSCACHE_OP_TYPE) {
59121 case FSCACHE_OP_ASYNC:
59122 _debug("queue async");
59123@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
59124 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
59125 if (op->processor)
59126 fscache_enqueue_operation(op);
59127- fscache_stat(&fscache_n_op_run);
59128+ fscache_stat_unchecked(&fscache_n_op_run);
59129 }
59130
59131 /*
59132@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
59133 if (object->n_in_progress > 0) {
59134 atomic_inc(&op->usage);
59135 list_add_tail(&op->pend_link, &object->pending_ops);
59136- fscache_stat(&fscache_n_op_pend);
59137+ fscache_stat_unchecked(&fscache_n_op_pend);
59138 } else if (!list_empty(&object->pending_ops)) {
59139 atomic_inc(&op->usage);
59140 list_add_tail(&op->pend_link, &object->pending_ops);
59141- fscache_stat(&fscache_n_op_pend);
59142+ fscache_stat_unchecked(&fscache_n_op_pend);
59143 fscache_start_operations(object);
59144 } else {
59145 ASSERTCMP(object->n_in_progress, ==, 0);
59146@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
59147 object->n_exclusive++; /* reads and writes must wait */
59148 atomic_inc(&op->usage);
59149 list_add_tail(&op->pend_link, &object->pending_ops);
59150- fscache_stat(&fscache_n_op_pend);
59151+ fscache_stat_unchecked(&fscache_n_op_pend);
59152 ret = 0;
59153 } else {
59154 /* If we're in any other state, there must have been an I/O
59155@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
59156 if (object->n_exclusive > 0) {
59157 atomic_inc(&op->usage);
59158 list_add_tail(&op->pend_link, &object->pending_ops);
59159- fscache_stat(&fscache_n_op_pend);
59160+ fscache_stat_unchecked(&fscache_n_op_pend);
59161 } else if (!list_empty(&object->pending_ops)) {
59162 atomic_inc(&op->usage);
59163 list_add_tail(&op->pend_link, &object->pending_ops);
59164- fscache_stat(&fscache_n_op_pend);
59165+ fscache_stat_unchecked(&fscache_n_op_pend);
59166 fscache_start_operations(object);
59167 } else {
59168 ASSERTCMP(object->n_exclusive, ==, 0);
59169@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
59170 object->n_ops++;
59171 atomic_inc(&op->usage);
59172 list_add_tail(&op->pend_link, &object->pending_ops);
59173- fscache_stat(&fscache_n_op_pend);
59174+ fscache_stat_unchecked(&fscache_n_op_pend);
59175 ret = 0;
59176 } else if (fscache_object_is_dying(object)) {
59177- fscache_stat(&fscache_n_op_rejected);
59178+ fscache_stat_unchecked(&fscache_n_op_rejected);
59179 op->state = FSCACHE_OP_ST_CANCELLED;
59180 ret = -ENOBUFS;
59181 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
59182@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
59183 ret = -EBUSY;
59184 if (op->state == FSCACHE_OP_ST_PENDING) {
59185 ASSERT(!list_empty(&op->pend_link));
59186- fscache_stat(&fscache_n_op_cancelled);
59187+ fscache_stat_unchecked(&fscache_n_op_cancelled);
59188 list_del_init(&op->pend_link);
59189 if (do_cancel)
59190 do_cancel(op);
59191@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
59192 while (!list_empty(&object->pending_ops)) {
59193 op = list_entry(object->pending_ops.next,
59194 struct fscache_operation, pend_link);
59195- fscache_stat(&fscache_n_op_cancelled);
59196+ fscache_stat_unchecked(&fscache_n_op_cancelled);
59197 list_del_init(&op->pend_link);
59198
59199 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
59200@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
59201 op->state, ==, FSCACHE_OP_ST_CANCELLED);
59202 op->state = FSCACHE_OP_ST_DEAD;
59203
59204- fscache_stat(&fscache_n_op_release);
59205+ fscache_stat_unchecked(&fscache_n_op_release);
59206
59207 if (op->release) {
59208 op->release(op);
59209@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
59210 * lock, and defer it otherwise */
59211 if (!spin_trylock(&object->lock)) {
59212 _debug("defer put");
59213- fscache_stat(&fscache_n_op_deferred_release);
59214+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
59215
59216 cache = object->cache;
59217 spin_lock(&cache->op_gc_list_lock);
59218@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
59219
59220 _debug("GC DEFERRED REL OBJ%x OP%x",
59221 object->debug_id, op->debug_id);
59222- fscache_stat(&fscache_n_op_gc);
59223+ fscache_stat_unchecked(&fscache_n_op_gc);
59224
59225 ASSERTCMP(atomic_read(&op->usage), ==, 0);
59226 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
59227diff --git a/fs/fscache/page.c b/fs/fscache/page.c
59228index 7f5c658..6c1e164 100644
59229--- a/fs/fscache/page.c
59230+++ b/fs/fscache/page.c
59231@@ -61,7 +61,7 @@ try_again:
59232 val = radix_tree_lookup(&cookie->stores, page->index);
59233 if (!val) {
59234 rcu_read_unlock();
59235- fscache_stat(&fscache_n_store_vmscan_not_storing);
59236+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
59237 __fscache_uncache_page(cookie, page);
59238 return true;
59239 }
59240@@ -91,11 +91,11 @@ try_again:
59241 spin_unlock(&cookie->stores_lock);
59242
59243 if (xpage) {
59244- fscache_stat(&fscache_n_store_vmscan_cancelled);
59245- fscache_stat(&fscache_n_store_radix_deletes);
59246+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
59247+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
59248 ASSERTCMP(xpage, ==, page);
59249 } else {
59250- fscache_stat(&fscache_n_store_vmscan_gone);
59251+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
59252 }
59253
59254 wake_up_bit(&cookie->flags, 0);
59255@@ -110,11 +110,11 @@ page_busy:
59256 * sleeping on memory allocation, so we may need to impose a timeout
59257 * too. */
59258 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
59259- fscache_stat(&fscache_n_store_vmscan_busy);
59260+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
59261 return false;
59262 }
59263
59264- fscache_stat(&fscache_n_store_vmscan_wait);
59265+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
59266 __fscache_wait_on_page_write(cookie, page);
59267 gfp &= ~__GFP_WAIT;
59268 goto try_again;
59269@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
59270 FSCACHE_COOKIE_STORING_TAG);
59271 if (!radix_tree_tag_get(&cookie->stores, page->index,
59272 FSCACHE_COOKIE_PENDING_TAG)) {
59273- fscache_stat(&fscache_n_store_radix_deletes);
59274+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
59275 xpage = radix_tree_delete(&cookie->stores, page->index);
59276 }
59277 spin_unlock(&cookie->stores_lock);
59278@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
59279
59280 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
59281
59282- fscache_stat(&fscache_n_attr_changed_calls);
59283+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
59284
59285 if (fscache_object_is_active(object)) {
59286 fscache_stat(&fscache_n_cop_attr_changed);
59287@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
59288
59289 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
59290
59291- fscache_stat(&fscache_n_attr_changed);
59292+ fscache_stat_unchecked(&fscache_n_attr_changed);
59293
59294 op = kzalloc(sizeof(*op), GFP_KERNEL);
59295 if (!op) {
59296- fscache_stat(&fscache_n_attr_changed_nomem);
59297+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
59298 _leave(" = -ENOMEM");
59299 return -ENOMEM;
59300 }
59301@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
59302 if (fscache_submit_exclusive_op(object, op) < 0)
59303 goto nobufs;
59304 spin_unlock(&cookie->lock);
59305- fscache_stat(&fscache_n_attr_changed_ok);
59306+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
59307 fscache_put_operation(op);
59308 _leave(" = 0");
59309 return 0;
59310@@ -225,7 +225,7 @@ nobufs:
59311 kfree(op);
59312 if (wake_cookie)
59313 __fscache_wake_unused_cookie(cookie);
59314- fscache_stat(&fscache_n_attr_changed_nobufs);
59315+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
59316 _leave(" = %d", -ENOBUFS);
59317 return -ENOBUFS;
59318 }
59319@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
59320 /* allocate a retrieval operation and attempt to submit it */
59321 op = kzalloc(sizeof(*op), GFP_NOIO);
59322 if (!op) {
59323- fscache_stat(&fscache_n_retrievals_nomem);
59324+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
59325 return NULL;
59326 }
59327
59328@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
59329 return 0;
59330 }
59331
59332- fscache_stat(&fscache_n_retrievals_wait);
59333+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
59334
59335 jif = jiffies;
59336 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
59337 fscache_wait_bit_interruptible,
59338 TASK_INTERRUPTIBLE) != 0) {
59339- fscache_stat(&fscache_n_retrievals_intr);
59340+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
59341 _leave(" = -ERESTARTSYS");
59342 return -ERESTARTSYS;
59343 }
59344@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
59345 */
59346 int fscache_wait_for_operation_activation(struct fscache_object *object,
59347 struct fscache_operation *op,
59348- atomic_t *stat_op_waits,
59349- atomic_t *stat_object_dead,
59350+ atomic_unchecked_t *stat_op_waits,
59351+ atomic_unchecked_t *stat_object_dead,
59352 void (*do_cancel)(struct fscache_operation *))
59353 {
59354 int ret;
59355@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
59356
59357 _debug(">>> WT");
59358 if (stat_op_waits)
59359- fscache_stat(stat_op_waits);
59360+ fscache_stat_unchecked(stat_op_waits);
59361 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
59362 fscache_wait_bit_interruptible,
59363 TASK_INTERRUPTIBLE) != 0) {
59364@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
59365 check_if_dead:
59366 if (op->state == FSCACHE_OP_ST_CANCELLED) {
59367 if (stat_object_dead)
59368- fscache_stat(stat_object_dead);
59369+ fscache_stat_unchecked(stat_object_dead);
59370 _leave(" = -ENOBUFS [cancelled]");
59371 return -ENOBUFS;
59372 }
59373@@ -366,7 +366,7 @@ check_if_dead:
59374 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
59375 fscache_cancel_op(op, do_cancel);
59376 if (stat_object_dead)
59377- fscache_stat(stat_object_dead);
59378+ fscache_stat_unchecked(stat_object_dead);
59379 return -ENOBUFS;
59380 }
59381 return 0;
59382@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
59383
59384 _enter("%p,%p,,,", cookie, page);
59385
59386- fscache_stat(&fscache_n_retrievals);
59387+ fscache_stat_unchecked(&fscache_n_retrievals);
59388
59389 if (hlist_empty(&cookie->backing_objects))
59390 goto nobufs;
59391@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
59392 goto nobufs_unlock_dec;
59393 spin_unlock(&cookie->lock);
59394
59395- fscache_stat(&fscache_n_retrieval_ops);
59396+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
59397
59398 /* pin the netfs read context in case we need to do the actual netfs
59399 * read because we've encountered a cache read failure */
59400@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
59401
59402 error:
59403 if (ret == -ENOMEM)
59404- fscache_stat(&fscache_n_retrievals_nomem);
59405+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
59406 else if (ret == -ERESTARTSYS)
59407- fscache_stat(&fscache_n_retrievals_intr);
59408+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
59409 else if (ret == -ENODATA)
59410- fscache_stat(&fscache_n_retrievals_nodata);
59411+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
59412 else if (ret < 0)
59413- fscache_stat(&fscache_n_retrievals_nobufs);
59414+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59415 else
59416- fscache_stat(&fscache_n_retrievals_ok);
59417+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
59418
59419 fscache_put_retrieval(op);
59420 _leave(" = %d", ret);
59421@@ -490,7 +490,7 @@ nobufs_unlock:
59422 __fscache_wake_unused_cookie(cookie);
59423 kfree(op);
59424 nobufs:
59425- fscache_stat(&fscache_n_retrievals_nobufs);
59426+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59427 _leave(" = -ENOBUFS");
59428 return -ENOBUFS;
59429 }
59430@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
59431
59432 _enter("%p,,%d,,,", cookie, *nr_pages);
59433
59434- fscache_stat(&fscache_n_retrievals);
59435+ fscache_stat_unchecked(&fscache_n_retrievals);
59436
59437 if (hlist_empty(&cookie->backing_objects))
59438 goto nobufs;
59439@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
59440 goto nobufs_unlock_dec;
59441 spin_unlock(&cookie->lock);
59442
59443- fscache_stat(&fscache_n_retrieval_ops);
59444+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
59445
59446 /* pin the netfs read context in case we need to do the actual netfs
59447 * read because we've encountered a cache read failure */
59448@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
59449
59450 error:
59451 if (ret == -ENOMEM)
59452- fscache_stat(&fscache_n_retrievals_nomem);
59453+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
59454 else if (ret == -ERESTARTSYS)
59455- fscache_stat(&fscache_n_retrievals_intr);
59456+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
59457 else if (ret == -ENODATA)
59458- fscache_stat(&fscache_n_retrievals_nodata);
59459+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
59460 else if (ret < 0)
59461- fscache_stat(&fscache_n_retrievals_nobufs);
59462+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59463 else
59464- fscache_stat(&fscache_n_retrievals_ok);
59465+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
59466
59467 fscache_put_retrieval(op);
59468 _leave(" = %d", ret);
59469@@ -621,7 +621,7 @@ nobufs_unlock:
59470 if (wake_cookie)
59471 __fscache_wake_unused_cookie(cookie);
59472 nobufs:
59473- fscache_stat(&fscache_n_retrievals_nobufs);
59474+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59475 _leave(" = -ENOBUFS");
59476 return -ENOBUFS;
59477 }
59478@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
59479
59480 _enter("%p,%p,,,", cookie, page);
59481
59482- fscache_stat(&fscache_n_allocs);
59483+ fscache_stat_unchecked(&fscache_n_allocs);
59484
59485 if (hlist_empty(&cookie->backing_objects))
59486 goto nobufs;
59487@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
59488 goto nobufs_unlock_dec;
59489 spin_unlock(&cookie->lock);
59490
59491- fscache_stat(&fscache_n_alloc_ops);
59492+ fscache_stat_unchecked(&fscache_n_alloc_ops);
59493
59494 ret = fscache_wait_for_operation_activation(
59495 object, &op->op,
59496@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
59497
59498 error:
59499 if (ret == -ERESTARTSYS)
59500- fscache_stat(&fscache_n_allocs_intr);
59501+ fscache_stat_unchecked(&fscache_n_allocs_intr);
59502 else if (ret < 0)
59503- fscache_stat(&fscache_n_allocs_nobufs);
59504+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
59505 else
59506- fscache_stat(&fscache_n_allocs_ok);
59507+ fscache_stat_unchecked(&fscache_n_allocs_ok);
59508
59509 fscache_put_retrieval(op);
59510 _leave(" = %d", ret);
59511@@ -715,7 +715,7 @@ nobufs_unlock:
59512 if (wake_cookie)
59513 __fscache_wake_unused_cookie(cookie);
59514 nobufs:
59515- fscache_stat(&fscache_n_allocs_nobufs);
59516+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
59517 _leave(" = -ENOBUFS");
59518 return -ENOBUFS;
59519 }
59520@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
59521
59522 spin_lock(&cookie->stores_lock);
59523
59524- fscache_stat(&fscache_n_store_calls);
59525+ fscache_stat_unchecked(&fscache_n_store_calls);
59526
59527 /* find a page to store */
59528 page = NULL;
59529@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
59530 page = results[0];
59531 _debug("gang %d [%lx]", n, page->index);
59532 if (page->index > op->store_limit) {
59533- fscache_stat(&fscache_n_store_pages_over_limit);
59534+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
59535 goto superseded;
59536 }
59537
59538@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
59539 spin_unlock(&cookie->stores_lock);
59540 spin_unlock(&object->lock);
59541
59542- fscache_stat(&fscache_n_store_pages);
59543+ fscache_stat_unchecked(&fscache_n_store_pages);
59544 fscache_stat(&fscache_n_cop_write_page);
59545 ret = object->cache->ops->write_page(op, page);
59546 fscache_stat_d(&fscache_n_cop_write_page);
59547@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59548 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
59549 ASSERT(PageFsCache(page));
59550
59551- fscache_stat(&fscache_n_stores);
59552+ fscache_stat_unchecked(&fscache_n_stores);
59553
59554 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
59555 _leave(" = -ENOBUFS [invalidating]");
59556@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59557 spin_unlock(&cookie->stores_lock);
59558 spin_unlock(&object->lock);
59559
59560- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
59561+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59562 op->store_limit = object->store_limit;
59563
59564 __fscache_use_cookie(cookie);
59565@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59566
59567 spin_unlock(&cookie->lock);
59568 radix_tree_preload_end();
59569- fscache_stat(&fscache_n_store_ops);
59570- fscache_stat(&fscache_n_stores_ok);
59571+ fscache_stat_unchecked(&fscache_n_store_ops);
59572+ fscache_stat_unchecked(&fscache_n_stores_ok);
59573
59574 /* the work queue now carries its own ref on the object */
59575 fscache_put_operation(&op->op);
59576@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59577 return 0;
59578
59579 already_queued:
59580- fscache_stat(&fscache_n_stores_again);
59581+ fscache_stat_unchecked(&fscache_n_stores_again);
59582 already_pending:
59583 spin_unlock(&cookie->stores_lock);
59584 spin_unlock(&object->lock);
59585 spin_unlock(&cookie->lock);
59586 radix_tree_preload_end();
59587 kfree(op);
59588- fscache_stat(&fscache_n_stores_ok);
59589+ fscache_stat_unchecked(&fscache_n_stores_ok);
59590 _leave(" = 0");
59591 return 0;
59592
59593@@ -1024,14 +1024,14 @@ nobufs:
59594 kfree(op);
59595 if (wake_cookie)
59596 __fscache_wake_unused_cookie(cookie);
59597- fscache_stat(&fscache_n_stores_nobufs);
59598+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
59599 _leave(" = -ENOBUFS");
59600 return -ENOBUFS;
59601
59602 nomem_free:
59603 kfree(op);
59604 nomem:
59605- fscache_stat(&fscache_n_stores_oom);
59606+ fscache_stat_unchecked(&fscache_n_stores_oom);
59607 _leave(" = -ENOMEM");
59608 return -ENOMEM;
59609 }
59610@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
59611 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
59612 ASSERTCMP(page, !=, NULL);
59613
59614- fscache_stat(&fscache_n_uncaches);
59615+ fscache_stat_unchecked(&fscache_n_uncaches);
59616
59617 /* cache withdrawal may beat us to it */
59618 if (!PageFsCache(page))
59619@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
59620 struct fscache_cookie *cookie = op->op.object->cookie;
59621
59622 #ifdef CONFIG_FSCACHE_STATS
59623- atomic_inc(&fscache_n_marks);
59624+ atomic_inc_unchecked(&fscache_n_marks);
59625 #endif
59626
59627 _debug("- mark %p{%lx}", page, page->index);
59628diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
59629index 40d13c7..ddf52b9 100644
59630--- a/fs/fscache/stats.c
59631+++ b/fs/fscache/stats.c
59632@@ -18,99 +18,99 @@
59633 /*
59634 * operation counters
59635 */
59636-atomic_t fscache_n_op_pend;
59637-atomic_t fscache_n_op_run;
59638-atomic_t fscache_n_op_enqueue;
59639-atomic_t fscache_n_op_requeue;
59640-atomic_t fscache_n_op_deferred_release;
59641-atomic_t fscache_n_op_release;
59642-atomic_t fscache_n_op_gc;
59643-atomic_t fscache_n_op_cancelled;
59644-atomic_t fscache_n_op_rejected;
59645+atomic_unchecked_t fscache_n_op_pend;
59646+atomic_unchecked_t fscache_n_op_run;
59647+atomic_unchecked_t fscache_n_op_enqueue;
59648+atomic_unchecked_t fscache_n_op_requeue;
59649+atomic_unchecked_t fscache_n_op_deferred_release;
59650+atomic_unchecked_t fscache_n_op_release;
59651+atomic_unchecked_t fscache_n_op_gc;
59652+atomic_unchecked_t fscache_n_op_cancelled;
59653+atomic_unchecked_t fscache_n_op_rejected;
59654
59655-atomic_t fscache_n_attr_changed;
59656-atomic_t fscache_n_attr_changed_ok;
59657-atomic_t fscache_n_attr_changed_nobufs;
59658-atomic_t fscache_n_attr_changed_nomem;
59659-atomic_t fscache_n_attr_changed_calls;
59660+atomic_unchecked_t fscache_n_attr_changed;
59661+atomic_unchecked_t fscache_n_attr_changed_ok;
59662+atomic_unchecked_t fscache_n_attr_changed_nobufs;
59663+atomic_unchecked_t fscache_n_attr_changed_nomem;
59664+atomic_unchecked_t fscache_n_attr_changed_calls;
59665
59666-atomic_t fscache_n_allocs;
59667-atomic_t fscache_n_allocs_ok;
59668-atomic_t fscache_n_allocs_wait;
59669-atomic_t fscache_n_allocs_nobufs;
59670-atomic_t fscache_n_allocs_intr;
59671-atomic_t fscache_n_allocs_object_dead;
59672-atomic_t fscache_n_alloc_ops;
59673-atomic_t fscache_n_alloc_op_waits;
59674+atomic_unchecked_t fscache_n_allocs;
59675+atomic_unchecked_t fscache_n_allocs_ok;
59676+atomic_unchecked_t fscache_n_allocs_wait;
59677+atomic_unchecked_t fscache_n_allocs_nobufs;
59678+atomic_unchecked_t fscache_n_allocs_intr;
59679+atomic_unchecked_t fscache_n_allocs_object_dead;
59680+atomic_unchecked_t fscache_n_alloc_ops;
59681+atomic_unchecked_t fscache_n_alloc_op_waits;
59682
59683-atomic_t fscache_n_retrievals;
59684-atomic_t fscache_n_retrievals_ok;
59685-atomic_t fscache_n_retrievals_wait;
59686-atomic_t fscache_n_retrievals_nodata;
59687-atomic_t fscache_n_retrievals_nobufs;
59688-atomic_t fscache_n_retrievals_intr;
59689-atomic_t fscache_n_retrievals_nomem;
59690-atomic_t fscache_n_retrievals_object_dead;
59691-atomic_t fscache_n_retrieval_ops;
59692-atomic_t fscache_n_retrieval_op_waits;
59693+atomic_unchecked_t fscache_n_retrievals;
59694+atomic_unchecked_t fscache_n_retrievals_ok;
59695+atomic_unchecked_t fscache_n_retrievals_wait;
59696+atomic_unchecked_t fscache_n_retrievals_nodata;
59697+atomic_unchecked_t fscache_n_retrievals_nobufs;
59698+atomic_unchecked_t fscache_n_retrievals_intr;
59699+atomic_unchecked_t fscache_n_retrievals_nomem;
59700+atomic_unchecked_t fscache_n_retrievals_object_dead;
59701+atomic_unchecked_t fscache_n_retrieval_ops;
59702+atomic_unchecked_t fscache_n_retrieval_op_waits;
59703
59704-atomic_t fscache_n_stores;
59705-atomic_t fscache_n_stores_ok;
59706-atomic_t fscache_n_stores_again;
59707-atomic_t fscache_n_stores_nobufs;
59708-atomic_t fscache_n_stores_oom;
59709-atomic_t fscache_n_store_ops;
59710-atomic_t fscache_n_store_calls;
59711-atomic_t fscache_n_store_pages;
59712-atomic_t fscache_n_store_radix_deletes;
59713-atomic_t fscache_n_store_pages_over_limit;
59714+atomic_unchecked_t fscache_n_stores;
59715+atomic_unchecked_t fscache_n_stores_ok;
59716+atomic_unchecked_t fscache_n_stores_again;
59717+atomic_unchecked_t fscache_n_stores_nobufs;
59718+atomic_unchecked_t fscache_n_stores_oom;
59719+atomic_unchecked_t fscache_n_store_ops;
59720+atomic_unchecked_t fscache_n_store_calls;
59721+atomic_unchecked_t fscache_n_store_pages;
59722+atomic_unchecked_t fscache_n_store_radix_deletes;
59723+atomic_unchecked_t fscache_n_store_pages_over_limit;
59724
59725-atomic_t fscache_n_store_vmscan_not_storing;
59726-atomic_t fscache_n_store_vmscan_gone;
59727-atomic_t fscache_n_store_vmscan_busy;
59728-atomic_t fscache_n_store_vmscan_cancelled;
59729-atomic_t fscache_n_store_vmscan_wait;
59730+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
59731+atomic_unchecked_t fscache_n_store_vmscan_gone;
59732+atomic_unchecked_t fscache_n_store_vmscan_busy;
59733+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
59734+atomic_unchecked_t fscache_n_store_vmscan_wait;
59735
59736-atomic_t fscache_n_marks;
59737-atomic_t fscache_n_uncaches;
59738+atomic_unchecked_t fscache_n_marks;
59739+atomic_unchecked_t fscache_n_uncaches;
59740
59741-atomic_t fscache_n_acquires;
59742-atomic_t fscache_n_acquires_null;
59743-atomic_t fscache_n_acquires_no_cache;
59744-atomic_t fscache_n_acquires_ok;
59745-atomic_t fscache_n_acquires_nobufs;
59746-atomic_t fscache_n_acquires_oom;
59747+atomic_unchecked_t fscache_n_acquires;
59748+atomic_unchecked_t fscache_n_acquires_null;
59749+atomic_unchecked_t fscache_n_acquires_no_cache;
59750+atomic_unchecked_t fscache_n_acquires_ok;
59751+atomic_unchecked_t fscache_n_acquires_nobufs;
59752+atomic_unchecked_t fscache_n_acquires_oom;
59753
59754-atomic_t fscache_n_invalidates;
59755-atomic_t fscache_n_invalidates_run;
59756+atomic_unchecked_t fscache_n_invalidates;
59757+atomic_unchecked_t fscache_n_invalidates_run;
59758
59759-atomic_t fscache_n_updates;
59760-atomic_t fscache_n_updates_null;
59761-atomic_t fscache_n_updates_run;
59762+atomic_unchecked_t fscache_n_updates;
59763+atomic_unchecked_t fscache_n_updates_null;
59764+atomic_unchecked_t fscache_n_updates_run;
59765
59766-atomic_t fscache_n_relinquishes;
59767-atomic_t fscache_n_relinquishes_null;
59768-atomic_t fscache_n_relinquishes_waitcrt;
59769-atomic_t fscache_n_relinquishes_retire;
59770+atomic_unchecked_t fscache_n_relinquishes;
59771+atomic_unchecked_t fscache_n_relinquishes_null;
59772+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
59773+atomic_unchecked_t fscache_n_relinquishes_retire;
59774
59775-atomic_t fscache_n_cookie_index;
59776-atomic_t fscache_n_cookie_data;
59777-atomic_t fscache_n_cookie_special;
59778+atomic_unchecked_t fscache_n_cookie_index;
59779+atomic_unchecked_t fscache_n_cookie_data;
59780+atomic_unchecked_t fscache_n_cookie_special;
59781
59782-atomic_t fscache_n_object_alloc;
59783-atomic_t fscache_n_object_no_alloc;
59784-atomic_t fscache_n_object_lookups;
59785-atomic_t fscache_n_object_lookups_negative;
59786-atomic_t fscache_n_object_lookups_positive;
59787-atomic_t fscache_n_object_lookups_timed_out;
59788-atomic_t fscache_n_object_created;
59789-atomic_t fscache_n_object_avail;
59790-atomic_t fscache_n_object_dead;
59791+atomic_unchecked_t fscache_n_object_alloc;
59792+atomic_unchecked_t fscache_n_object_no_alloc;
59793+atomic_unchecked_t fscache_n_object_lookups;
59794+atomic_unchecked_t fscache_n_object_lookups_negative;
59795+atomic_unchecked_t fscache_n_object_lookups_positive;
59796+atomic_unchecked_t fscache_n_object_lookups_timed_out;
59797+atomic_unchecked_t fscache_n_object_created;
59798+atomic_unchecked_t fscache_n_object_avail;
59799+atomic_unchecked_t fscache_n_object_dead;
59800
59801-atomic_t fscache_n_checkaux_none;
59802-atomic_t fscache_n_checkaux_okay;
59803-atomic_t fscache_n_checkaux_update;
59804-atomic_t fscache_n_checkaux_obsolete;
59805+atomic_unchecked_t fscache_n_checkaux_none;
59806+atomic_unchecked_t fscache_n_checkaux_okay;
59807+atomic_unchecked_t fscache_n_checkaux_update;
59808+atomic_unchecked_t fscache_n_checkaux_obsolete;
59809
59810 atomic_t fscache_n_cop_alloc_object;
59811 atomic_t fscache_n_cop_lookup_object;
59812@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
59813 seq_puts(m, "FS-Cache statistics\n");
59814
59815 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
59816- atomic_read(&fscache_n_cookie_index),
59817- atomic_read(&fscache_n_cookie_data),
59818- atomic_read(&fscache_n_cookie_special));
59819+ atomic_read_unchecked(&fscache_n_cookie_index),
59820+ atomic_read_unchecked(&fscache_n_cookie_data),
59821+ atomic_read_unchecked(&fscache_n_cookie_special));
59822
59823 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
59824- atomic_read(&fscache_n_object_alloc),
59825- atomic_read(&fscache_n_object_no_alloc),
59826- atomic_read(&fscache_n_object_avail),
59827- atomic_read(&fscache_n_object_dead));
59828+ atomic_read_unchecked(&fscache_n_object_alloc),
59829+ atomic_read_unchecked(&fscache_n_object_no_alloc),
59830+ atomic_read_unchecked(&fscache_n_object_avail),
59831+ atomic_read_unchecked(&fscache_n_object_dead));
59832 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
59833- atomic_read(&fscache_n_checkaux_none),
59834- atomic_read(&fscache_n_checkaux_okay),
59835- atomic_read(&fscache_n_checkaux_update),
59836- atomic_read(&fscache_n_checkaux_obsolete));
59837+ atomic_read_unchecked(&fscache_n_checkaux_none),
59838+ atomic_read_unchecked(&fscache_n_checkaux_okay),
59839+ atomic_read_unchecked(&fscache_n_checkaux_update),
59840+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
59841
59842 seq_printf(m, "Pages : mrk=%u unc=%u\n",
59843- atomic_read(&fscache_n_marks),
59844- atomic_read(&fscache_n_uncaches));
59845+ atomic_read_unchecked(&fscache_n_marks),
59846+ atomic_read_unchecked(&fscache_n_uncaches));
59847
59848 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
59849 " oom=%u\n",
59850- atomic_read(&fscache_n_acquires),
59851- atomic_read(&fscache_n_acquires_null),
59852- atomic_read(&fscache_n_acquires_no_cache),
59853- atomic_read(&fscache_n_acquires_ok),
59854- atomic_read(&fscache_n_acquires_nobufs),
59855- atomic_read(&fscache_n_acquires_oom));
59856+ atomic_read_unchecked(&fscache_n_acquires),
59857+ atomic_read_unchecked(&fscache_n_acquires_null),
59858+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
59859+ atomic_read_unchecked(&fscache_n_acquires_ok),
59860+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
59861+ atomic_read_unchecked(&fscache_n_acquires_oom));
59862
59863 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
59864- atomic_read(&fscache_n_object_lookups),
59865- atomic_read(&fscache_n_object_lookups_negative),
59866- atomic_read(&fscache_n_object_lookups_positive),
59867- atomic_read(&fscache_n_object_created),
59868- atomic_read(&fscache_n_object_lookups_timed_out));
59869+ atomic_read_unchecked(&fscache_n_object_lookups),
59870+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
59871+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
59872+ atomic_read_unchecked(&fscache_n_object_created),
59873+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
59874
59875 seq_printf(m, "Invals : n=%u run=%u\n",
59876- atomic_read(&fscache_n_invalidates),
59877- atomic_read(&fscache_n_invalidates_run));
59878+ atomic_read_unchecked(&fscache_n_invalidates),
59879+ atomic_read_unchecked(&fscache_n_invalidates_run));
59880
59881 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
59882- atomic_read(&fscache_n_updates),
59883- atomic_read(&fscache_n_updates_null),
59884- atomic_read(&fscache_n_updates_run));
59885+ atomic_read_unchecked(&fscache_n_updates),
59886+ atomic_read_unchecked(&fscache_n_updates_null),
59887+ atomic_read_unchecked(&fscache_n_updates_run));
59888
59889 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
59890- atomic_read(&fscache_n_relinquishes),
59891- atomic_read(&fscache_n_relinquishes_null),
59892- atomic_read(&fscache_n_relinquishes_waitcrt),
59893- atomic_read(&fscache_n_relinquishes_retire));
59894+ atomic_read_unchecked(&fscache_n_relinquishes),
59895+ atomic_read_unchecked(&fscache_n_relinquishes_null),
59896+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
59897+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
59898
59899 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
59900- atomic_read(&fscache_n_attr_changed),
59901- atomic_read(&fscache_n_attr_changed_ok),
59902- atomic_read(&fscache_n_attr_changed_nobufs),
59903- atomic_read(&fscache_n_attr_changed_nomem),
59904- atomic_read(&fscache_n_attr_changed_calls));
59905+ atomic_read_unchecked(&fscache_n_attr_changed),
59906+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
59907+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
59908+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
59909+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
59910
59911 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
59912- atomic_read(&fscache_n_allocs),
59913- atomic_read(&fscache_n_allocs_ok),
59914- atomic_read(&fscache_n_allocs_wait),
59915- atomic_read(&fscache_n_allocs_nobufs),
59916- atomic_read(&fscache_n_allocs_intr));
59917+ atomic_read_unchecked(&fscache_n_allocs),
59918+ atomic_read_unchecked(&fscache_n_allocs_ok),
59919+ atomic_read_unchecked(&fscache_n_allocs_wait),
59920+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
59921+ atomic_read_unchecked(&fscache_n_allocs_intr));
59922 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
59923- atomic_read(&fscache_n_alloc_ops),
59924- atomic_read(&fscache_n_alloc_op_waits),
59925- atomic_read(&fscache_n_allocs_object_dead));
59926+ atomic_read_unchecked(&fscache_n_alloc_ops),
59927+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
59928+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
59929
59930 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
59931 " int=%u oom=%u\n",
59932- atomic_read(&fscache_n_retrievals),
59933- atomic_read(&fscache_n_retrievals_ok),
59934- atomic_read(&fscache_n_retrievals_wait),
59935- atomic_read(&fscache_n_retrievals_nodata),
59936- atomic_read(&fscache_n_retrievals_nobufs),
59937- atomic_read(&fscache_n_retrievals_intr),
59938- atomic_read(&fscache_n_retrievals_nomem));
59939+ atomic_read_unchecked(&fscache_n_retrievals),
59940+ atomic_read_unchecked(&fscache_n_retrievals_ok),
59941+ atomic_read_unchecked(&fscache_n_retrievals_wait),
59942+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
59943+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
59944+ atomic_read_unchecked(&fscache_n_retrievals_intr),
59945+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
59946 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
59947- atomic_read(&fscache_n_retrieval_ops),
59948- atomic_read(&fscache_n_retrieval_op_waits),
59949- atomic_read(&fscache_n_retrievals_object_dead));
59950+ atomic_read_unchecked(&fscache_n_retrieval_ops),
59951+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
59952+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
59953
59954 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
59955- atomic_read(&fscache_n_stores),
59956- atomic_read(&fscache_n_stores_ok),
59957- atomic_read(&fscache_n_stores_again),
59958- atomic_read(&fscache_n_stores_nobufs),
59959- atomic_read(&fscache_n_stores_oom));
59960+ atomic_read_unchecked(&fscache_n_stores),
59961+ atomic_read_unchecked(&fscache_n_stores_ok),
59962+ atomic_read_unchecked(&fscache_n_stores_again),
59963+ atomic_read_unchecked(&fscache_n_stores_nobufs),
59964+ atomic_read_unchecked(&fscache_n_stores_oom));
59965 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
59966- atomic_read(&fscache_n_store_ops),
59967- atomic_read(&fscache_n_store_calls),
59968- atomic_read(&fscache_n_store_pages),
59969- atomic_read(&fscache_n_store_radix_deletes),
59970- atomic_read(&fscache_n_store_pages_over_limit));
59971+ atomic_read_unchecked(&fscache_n_store_ops),
59972+ atomic_read_unchecked(&fscache_n_store_calls),
59973+ atomic_read_unchecked(&fscache_n_store_pages),
59974+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
59975+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
59976
59977 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
59978- atomic_read(&fscache_n_store_vmscan_not_storing),
59979- atomic_read(&fscache_n_store_vmscan_gone),
59980- atomic_read(&fscache_n_store_vmscan_busy),
59981- atomic_read(&fscache_n_store_vmscan_cancelled),
59982- atomic_read(&fscache_n_store_vmscan_wait));
59983+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
59984+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
59985+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
59986+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
59987+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
59988
59989 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
59990- atomic_read(&fscache_n_op_pend),
59991- atomic_read(&fscache_n_op_run),
59992- atomic_read(&fscache_n_op_enqueue),
59993- atomic_read(&fscache_n_op_cancelled),
59994- atomic_read(&fscache_n_op_rejected));
59995+ atomic_read_unchecked(&fscache_n_op_pend),
59996+ atomic_read_unchecked(&fscache_n_op_run),
59997+ atomic_read_unchecked(&fscache_n_op_enqueue),
59998+ atomic_read_unchecked(&fscache_n_op_cancelled),
59999+ atomic_read_unchecked(&fscache_n_op_rejected));
60000 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
60001- atomic_read(&fscache_n_op_deferred_release),
60002- atomic_read(&fscache_n_op_release),
60003- atomic_read(&fscache_n_op_gc));
60004+ atomic_read_unchecked(&fscache_n_op_deferred_release),
60005+ atomic_read_unchecked(&fscache_n_op_release),
60006+ atomic_read_unchecked(&fscache_n_op_gc));
60007
60008 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
60009 atomic_read(&fscache_n_cop_alloc_object),
60010diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
60011index b96a49b..9bfdc47 100644
60012--- a/fs/fuse/cuse.c
60013+++ b/fs/fuse/cuse.c
60014@@ -606,10 +606,12 @@ static int __init cuse_init(void)
60015 INIT_LIST_HEAD(&cuse_conntbl[i]);
60016
60017 /* inherit and extend fuse_dev_operations */
60018- cuse_channel_fops = fuse_dev_operations;
60019- cuse_channel_fops.owner = THIS_MODULE;
60020- cuse_channel_fops.open = cuse_channel_open;
60021- cuse_channel_fops.release = cuse_channel_release;
60022+ pax_open_kernel();
60023+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
60024+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
60025+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
60026+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
60027+ pax_close_kernel();
60028
60029 cuse_class = class_create(THIS_MODULE, "cuse");
60030 if (IS_ERR(cuse_class))
60031diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
60032index ef74ad5..c9ac759e 100644
60033--- a/fs/fuse/dev.c
60034+++ b/fs/fuse/dev.c
60035@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60036 ret = 0;
60037 pipe_lock(pipe);
60038
60039- if (!pipe->readers) {
60040+ if (!atomic_read(&pipe->readers)) {
60041 send_sig(SIGPIPE, current, 0);
60042 if (!ret)
60043 ret = -EPIPE;
60044@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60045 page_nr++;
60046 ret += buf->len;
60047
60048- if (pipe->files)
60049+ if (atomic_read(&pipe->files))
60050 do_wakeup = 1;
60051 }
60052
60053diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
60054index c3eb2c4..98007d4 100644
60055--- a/fs/fuse/dir.c
60056+++ b/fs/fuse/dir.c
60057@@ -1408,7 +1408,7 @@ static char *read_link(struct dentry *dentry)
60058 return link;
60059 }
60060
60061-static void free_link(char *link)
60062+static void free_link(const char *link)
60063 {
60064 if (!IS_ERR(link))
60065 free_page((unsigned long) link);
60066diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
60067index db23ce1..9e6cd9d 100644
60068--- a/fs/hostfs/hostfs_kern.c
60069+++ b/fs/hostfs/hostfs_kern.c
60070@@ -895,7 +895,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60071
60072 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60073 {
60074- char *s = nd_get_link(nd);
60075+ const char *s = nd_get_link(nd);
60076 if (!IS_ERR(s))
60077 __putname(s);
60078 }
60079diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
60080index d19b30a..ef89c36 100644
60081--- a/fs/hugetlbfs/inode.c
60082+++ b/fs/hugetlbfs/inode.c
60083@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60084 struct mm_struct *mm = current->mm;
60085 struct vm_area_struct *vma;
60086 struct hstate *h = hstate_file(file);
60087+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
60088 struct vm_unmapped_area_info info;
60089
60090 if (len & ~huge_page_mask(h))
60091@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60092 return addr;
60093 }
60094
60095+#ifdef CONFIG_PAX_RANDMMAP
60096+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
60097+#endif
60098+
60099 if (addr) {
60100 addr = ALIGN(addr, huge_page_size(h));
60101 vma = find_vma(mm, addr);
60102- if (TASK_SIZE - len >= addr &&
60103- (!vma || addr + len <= vma->vm_start))
60104+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
60105 return addr;
60106 }
60107
60108 info.flags = 0;
60109 info.length = len;
60110 info.low_limit = TASK_UNMAPPED_BASE;
60111+
60112+#ifdef CONFIG_PAX_RANDMMAP
60113+ if (mm->pax_flags & MF_PAX_RANDMMAP)
60114+ info.low_limit += mm->delta_mmap;
60115+#endif
60116+
60117 info.high_limit = TASK_SIZE;
60118 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
60119 info.align_offset = 0;
60120@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
60121 };
60122 MODULE_ALIAS_FS("hugetlbfs");
60123
60124-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60125+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60126
60127 static int can_do_hugetlb_shm(void)
60128 {
60129diff --git a/fs/inode.c b/fs/inode.c
60130index 4bcdad3..1883822 100644
60131--- a/fs/inode.c
60132+++ b/fs/inode.c
60133@@ -841,8 +841,8 @@ unsigned int get_next_ino(void)
60134
60135 #ifdef CONFIG_SMP
60136 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
60137- static atomic_t shared_last_ino;
60138- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
60139+ static atomic_unchecked_t shared_last_ino;
60140+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
60141
60142 res = next - LAST_INO_BATCH;
60143 }
60144diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
60145index 4a6cf28..d3a29d3 100644
60146--- a/fs/jffs2/erase.c
60147+++ b/fs/jffs2/erase.c
60148@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
60149 struct jffs2_unknown_node marker = {
60150 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
60151 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
60152- .totlen = cpu_to_je32(c->cleanmarker_size)
60153+ .totlen = cpu_to_je32(c->cleanmarker_size),
60154+ .hdr_crc = cpu_to_je32(0)
60155 };
60156
60157 jffs2_prealloc_raw_node_refs(c, jeb, 1);
60158diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
60159index a6597d6..41b30ec 100644
60160--- a/fs/jffs2/wbuf.c
60161+++ b/fs/jffs2/wbuf.c
60162@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
60163 {
60164 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
60165 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
60166- .totlen = constant_cpu_to_je32(8)
60167+ .totlen = constant_cpu_to_je32(8),
60168+ .hdr_crc = constant_cpu_to_je32(0)
60169 };
60170
60171 /*
60172diff --git a/fs/jfs/super.c b/fs/jfs/super.c
60173index 6669aa2..36b033d 100644
60174--- a/fs/jfs/super.c
60175+++ b/fs/jfs/super.c
60176@@ -882,7 +882,7 @@ static int __init init_jfs_fs(void)
60177
60178 jfs_inode_cachep =
60179 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
60180- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
60181+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
60182 init_once);
60183 if (jfs_inode_cachep == NULL)
60184 return -ENOMEM;
60185diff --git a/fs/libfs.c b/fs/libfs.c
60186index a184424..944ddce 100644
60187--- a/fs/libfs.c
60188+++ b/fs/libfs.c
60189@@ -159,6 +159,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
60190
60191 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
60192 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
60193+ char d_name[sizeof(next->d_iname)];
60194+ const unsigned char *name;
60195+
60196 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
60197 if (!simple_positive(next)) {
60198 spin_unlock(&next->d_lock);
60199@@ -167,7 +170,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
60200
60201 spin_unlock(&next->d_lock);
60202 spin_unlock(&dentry->d_lock);
60203- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
60204+ name = next->d_name.name;
60205+ if (name == next->d_iname) {
60206+ memcpy(d_name, name, next->d_name.len);
60207+ name = d_name;
60208+ }
60209+ if (!dir_emit(ctx, name, next->d_name.len,
60210 next->d_inode->i_ino, dt_type(next->d_inode)))
60211 return 0;
60212 spin_lock(&dentry->d_lock);
60213@@ -999,7 +1007,7 @@ EXPORT_SYMBOL(noop_fsync);
60214 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
60215 void *cookie)
60216 {
60217- char *s = nd_get_link(nd);
60218+ const char *s = nd_get_link(nd);
60219 if (!IS_ERR(s))
60220 kfree(s);
60221 }
60222diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
60223index acd3947..1f896e2 100644
60224--- a/fs/lockd/clntproc.c
60225+++ b/fs/lockd/clntproc.c
60226@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
60227 /*
60228 * Cookie counter for NLM requests
60229 */
60230-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
60231+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
60232
60233 void nlmclnt_next_cookie(struct nlm_cookie *c)
60234 {
60235- u32 cookie = atomic_inc_return(&nlm_cookie);
60236+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
60237
60238 memcpy(c->data, &cookie, 4);
60239 c->len=4;
60240diff --git a/fs/locks.c b/fs/locks.c
60241index 92a0f0a..45a48f0 100644
60242--- a/fs/locks.c
60243+++ b/fs/locks.c
60244@@ -2219,16 +2219,16 @@ void locks_remove_flock(struct file *filp)
60245 return;
60246
60247 if (filp->f_op->flock) {
60248- struct file_lock fl = {
60249+ struct file_lock flock = {
60250 .fl_pid = current->tgid,
60251 .fl_file = filp,
60252 .fl_flags = FL_FLOCK,
60253 .fl_type = F_UNLCK,
60254 .fl_end = OFFSET_MAX,
60255 };
60256- filp->f_op->flock(filp, F_SETLKW, &fl);
60257- if (fl.fl_ops && fl.fl_ops->fl_release_private)
60258- fl.fl_ops->fl_release_private(&fl);
60259+ filp->f_op->flock(filp, F_SETLKW, &flock);
60260+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
60261+ flock.fl_ops->fl_release_private(&flock);
60262 }
60263
60264 spin_lock(&inode->i_lock);
60265diff --git a/fs/mount.h b/fs/mount.h
60266index a17458c..e69fb5b 100644
60267--- a/fs/mount.h
60268+++ b/fs/mount.h
60269@@ -11,7 +11,7 @@ struct mnt_namespace {
60270 u64 seq; /* Sequence number to prevent loops */
60271 wait_queue_head_t poll;
60272 int event;
60273-};
60274+} __randomize_layout;
60275
60276 struct mnt_pcp {
60277 int mnt_count;
60278@@ -57,7 +57,7 @@ struct mount {
60279 int mnt_expiry_mark; /* true if marked for expiry */
60280 int mnt_pinned;
60281 struct path mnt_ex_mountpoint;
60282-};
60283+} __randomize_layout;
60284
60285 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
60286
60287diff --git a/fs/namei.c b/fs/namei.c
60288index 3531dee..3177227 100644
60289--- a/fs/namei.c
60290+++ b/fs/namei.c
60291@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
60292 if (ret != -EACCES)
60293 return ret;
60294
60295+#ifdef CONFIG_GRKERNSEC
60296+ /* we'll block if we have to log due to a denied capability use */
60297+ if (mask & MAY_NOT_BLOCK)
60298+ return -ECHILD;
60299+#endif
60300+
60301 if (S_ISDIR(inode->i_mode)) {
60302 /* DACs are overridable for directories */
60303- if (inode_capable(inode, CAP_DAC_OVERRIDE))
60304- return 0;
60305 if (!(mask & MAY_WRITE))
60306- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
60307+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
60308+ inode_capable(inode, CAP_DAC_READ_SEARCH))
60309 return 0;
60310+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
60311+ return 0;
60312 return -EACCES;
60313 }
60314 /*
60315+ * Searching includes executable on directories, else just read.
60316+ */
60317+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
60318+ if (mask == MAY_READ)
60319+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
60320+ inode_capable(inode, CAP_DAC_READ_SEARCH))
60321+ return 0;
60322+
60323+ /*
60324 * Read/write DACs are always overridable.
60325 * Executable DACs are overridable when there is
60326 * at least one exec bit set.
60327@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
60328 if (inode_capable(inode, CAP_DAC_OVERRIDE))
60329 return 0;
60330
60331- /*
60332- * Searching includes executable on directories, else just read.
60333- */
60334- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
60335- if (mask == MAY_READ)
60336- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
60337- return 0;
60338-
60339 return -EACCES;
60340 }
60341
60342@@ -810,7 +818,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
60343 {
60344 struct dentry *dentry = link->dentry;
60345 int error;
60346- char *s;
60347+ const char *s;
60348
60349 BUG_ON(nd->flags & LOOKUP_RCU);
60350
60351@@ -831,6 +839,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
60352 if (error)
60353 goto out_put_nd_path;
60354
60355+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
60356+ dentry->d_inode, dentry, nd->path.mnt)) {
60357+ error = -EACCES;
60358+ goto out_put_nd_path;
60359+ }
60360+
60361 nd->last_type = LAST_BIND;
60362 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
60363 error = PTR_ERR(*p);
60364@@ -1582,6 +1596,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
60365 if (res)
60366 break;
60367 res = walk_component(nd, path, LOOKUP_FOLLOW);
60368+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
60369+ res = -EACCES;
60370 put_link(nd, &link, cookie);
60371 } while (res > 0);
60372
60373@@ -1655,7 +1671,7 @@ EXPORT_SYMBOL(full_name_hash);
60374 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
60375 {
60376 unsigned long a, b, adata, bdata, mask, hash, len;
60377- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
60378+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
60379
60380 hash = a = 0;
60381 len = -sizeof(unsigned long);
60382@@ -1939,6 +1955,8 @@ static int path_lookupat(int dfd, const char *name,
60383 if (err)
60384 break;
60385 err = lookup_last(nd, &path);
60386+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
60387+ err = -EACCES;
60388 put_link(nd, &link, cookie);
60389 }
60390 }
60391@@ -1946,6 +1964,13 @@ static int path_lookupat(int dfd, const char *name,
60392 if (!err)
60393 err = complete_walk(nd);
60394
60395+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
60396+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
60397+ path_put(&nd->path);
60398+ err = -ENOENT;
60399+ }
60400+ }
60401+
60402 if (!err && nd->flags & LOOKUP_DIRECTORY) {
60403 if (!d_is_directory(nd->path.dentry)) {
60404 path_put(&nd->path);
60405@@ -1973,8 +1998,15 @@ static int filename_lookup(int dfd, struct filename *name,
60406 retval = path_lookupat(dfd, name->name,
60407 flags | LOOKUP_REVAL, nd);
60408
60409- if (likely(!retval))
60410+ if (likely(!retval)) {
60411 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
60412+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
60413+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
60414+ path_put(&nd->path);
60415+ return -ENOENT;
60416+ }
60417+ }
60418+ }
60419 return retval;
60420 }
60421
60422@@ -2548,6 +2580,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
60423 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
60424 return -EPERM;
60425
60426+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
60427+ return -EPERM;
60428+ if (gr_handle_rawio(inode))
60429+ return -EPERM;
60430+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
60431+ return -EACCES;
60432+
60433 return 0;
60434 }
60435
60436@@ -2779,7 +2818,7 @@ looked_up:
60437 * cleared otherwise prior to returning.
60438 */
60439 static int lookup_open(struct nameidata *nd, struct path *path,
60440- struct file *file,
60441+ struct path *link, struct file *file,
60442 const struct open_flags *op,
60443 bool got_write, int *opened)
60444 {
60445@@ -2814,6 +2853,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
60446 /* Negative dentry, just create the file */
60447 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
60448 umode_t mode = op->mode;
60449+
60450+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
60451+ error = -EACCES;
60452+ goto out_dput;
60453+ }
60454+
60455+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
60456+ error = -EACCES;
60457+ goto out_dput;
60458+ }
60459+
60460 if (!IS_POSIXACL(dir->d_inode))
60461 mode &= ~current_umask();
60462 /*
60463@@ -2835,6 +2885,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
60464 nd->flags & LOOKUP_EXCL);
60465 if (error)
60466 goto out_dput;
60467+ else
60468+ gr_handle_create(dentry, nd->path.mnt);
60469 }
60470 out_no_open:
60471 path->dentry = dentry;
60472@@ -2849,7 +2901,7 @@ out_dput:
60473 /*
60474 * Handle the last step of open()
60475 */
60476-static int do_last(struct nameidata *nd, struct path *path,
60477+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
60478 struct file *file, const struct open_flags *op,
60479 int *opened, struct filename *name)
60480 {
60481@@ -2899,6 +2951,15 @@ static int do_last(struct nameidata *nd, struct path *path,
60482 if (error)
60483 return error;
60484
60485+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
60486+ error = -ENOENT;
60487+ goto out;
60488+ }
60489+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
60490+ error = -EACCES;
60491+ goto out;
60492+ }
60493+
60494 audit_inode(name, dir, LOOKUP_PARENT);
60495 error = -EISDIR;
60496 /* trailing slashes? */
60497@@ -2918,7 +2979,7 @@ retry_lookup:
60498 */
60499 }
60500 mutex_lock(&dir->d_inode->i_mutex);
60501- error = lookup_open(nd, path, file, op, got_write, opened);
60502+ error = lookup_open(nd, path, link, file, op, got_write, opened);
60503 mutex_unlock(&dir->d_inode->i_mutex);
60504
60505 if (error <= 0) {
60506@@ -2942,11 +3003,28 @@ retry_lookup:
60507 goto finish_open_created;
60508 }
60509
60510+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
60511+ error = -ENOENT;
60512+ goto exit_dput;
60513+ }
60514+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
60515+ error = -EACCES;
60516+ goto exit_dput;
60517+ }
60518+
60519 /*
60520 * create/update audit record if it already exists.
60521 */
60522- if (d_is_positive(path->dentry))
60523+ if (d_is_positive(path->dentry)) {
60524+ /* only check if O_CREAT is specified, all other checks need to go
60525+ into may_open */
60526+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
60527+ error = -EACCES;
60528+ goto exit_dput;
60529+ }
60530+
60531 audit_inode(name, path->dentry, 0);
60532+ }
60533
60534 /*
60535 * If atomic_open() acquired write access it is dropped now due to
60536@@ -2987,6 +3065,11 @@ finish_lookup:
60537 }
60538 }
60539 BUG_ON(inode != path->dentry->d_inode);
60540+ /* if we're resolving a symlink to another symlink */
60541+ if (link && gr_handle_symlink_owner(link, inode)) {
60542+ error = -EACCES;
60543+ goto out;
60544+ }
60545 return 1;
60546 }
60547
60548@@ -2996,7 +3079,6 @@ finish_lookup:
60549 save_parent.dentry = nd->path.dentry;
60550 save_parent.mnt = mntget(path->mnt);
60551 nd->path.dentry = path->dentry;
60552-
60553 }
60554 nd->inode = inode;
60555 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
60556@@ -3006,7 +3088,18 @@ finish_open:
60557 path_put(&save_parent);
60558 return error;
60559 }
60560+
60561+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
60562+ error = -ENOENT;
60563+ goto out;
60564+ }
60565+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
60566+ error = -EACCES;
60567+ goto out;
60568+ }
60569+
60570 audit_inode(name, nd->path.dentry, 0);
60571+
60572 error = -EISDIR;
60573 if ((open_flag & O_CREAT) &&
60574 (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
60575@@ -3170,7 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
60576 if (unlikely(error))
60577 goto out;
60578
60579- error = do_last(nd, &path, file, op, &opened, pathname);
60580+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
60581 while (unlikely(error > 0)) { /* trailing symlink */
60582 struct path link = path;
60583 void *cookie;
60584@@ -3188,7 +3281,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
60585 error = follow_link(&link, nd, &cookie);
60586 if (unlikely(error))
60587 break;
60588- error = do_last(nd, &path, file, op, &opened, pathname);
60589+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
60590 put_link(nd, &link, cookie);
60591 }
60592 out:
60593@@ -3288,9 +3381,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
60594 goto unlock;
60595
60596 error = -EEXIST;
60597- if (d_is_positive(dentry))
60598+ if (d_is_positive(dentry)) {
60599+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
60600+ error = -ENOENT;
60601 goto fail;
60602-
60603+ }
60604 /*
60605 * Special case - lookup gave negative, but... we had foo/bar/
60606 * From the vfs_mknod() POV we just have a negative dentry -
60607@@ -3342,6 +3437,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
60608 }
60609 EXPORT_SYMBOL(user_path_create);
60610
60611+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
60612+{
60613+ struct filename *tmp = getname(pathname);
60614+ struct dentry *res;
60615+ if (IS_ERR(tmp))
60616+ return ERR_CAST(tmp);
60617+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
60618+ if (IS_ERR(res))
60619+ putname(tmp);
60620+ else
60621+ *to = tmp;
60622+ return res;
60623+}
60624+
60625 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
60626 {
60627 int error = may_create(dir, dentry);
60628@@ -3404,6 +3513,17 @@ retry:
60629
60630 if (!IS_POSIXACL(path.dentry->d_inode))
60631 mode &= ~current_umask();
60632+
60633+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
60634+ error = -EPERM;
60635+ goto out;
60636+ }
60637+
60638+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
60639+ error = -EACCES;
60640+ goto out;
60641+ }
60642+
60643 error = security_path_mknod(&path, dentry, mode, dev);
60644 if (error)
60645 goto out;
60646@@ -3420,6 +3540,8 @@ retry:
60647 break;
60648 }
60649 out:
60650+ if (!error)
60651+ gr_handle_create(dentry, path.mnt);
60652 done_path_create(&path, dentry);
60653 if (retry_estale(error, lookup_flags)) {
60654 lookup_flags |= LOOKUP_REVAL;
60655@@ -3472,9 +3594,16 @@ retry:
60656
60657 if (!IS_POSIXACL(path.dentry->d_inode))
60658 mode &= ~current_umask();
60659+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
60660+ error = -EACCES;
60661+ goto out;
60662+ }
60663 error = security_path_mkdir(&path, dentry, mode);
60664 if (!error)
60665 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
60666+ if (!error)
60667+ gr_handle_create(dentry, path.mnt);
60668+out:
60669 done_path_create(&path, dentry);
60670 if (retry_estale(error, lookup_flags)) {
60671 lookup_flags |= LOOKUP_REVAL;
60672@@ -3555,6 +3684,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
60673 struct filename *name;
60674 struct dentry *dentry;
60675 struct nameidata nd;
60676+ ino_t saved_ino = 0;
60677+ dev_t saved_dev = 0;
60678 unsigned int lookup_flags = 0;
60679 retry:
60680 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
60681@@ -3587,10 +3718,21 @@ retry:
60682 error = -ENOENT;
60683 goto exit3;
60684 }
60685+
60686+ saved_ino = dentry->d_inode->i_ino;
60687+ saved_dev = gr_get_dev_from_dentry(dentry);
60688+
60689+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
60690+ error = -EACCES;
60691+ goto exit3;
60692+ }
60693+
60694 error = security_path_rmdir(&nd.path, dentry);
60695 if (error)
60696 goto exit3;
60697 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
60698+ if (!error && (saved_dev || saved_ino))
60699+ gr_handle_delete(saved_ino, saved_dev);
60700 exit3:
60701 dput(dentry);
60702 exit2:
60703@@ -3680,6 +3822,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
60704 struct nameidata nd;
60705 struct inode *inode = NULL;
60706 struct inode *delegated_inode = NULL;
60707+ ino_t saved_ino = 0;
60708+ dev_t saved_dev = 0;
60709 unsigned int lookup_flags = 0;
60710 retry:
60711 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
60712@@ -3706,10 +3850,22 @@ retry_deleg:
60713 if (d_is_negative(dentry))
60714 goto slashes;
60715 ihold(inode);
60716+
60717+ if (inode->i_nlink <= 1) {
60718+ saved_ino = inode->i_ino;
60719+ saved_dev = gr_get_dev_from_dentry(dentry);
60720+ }
60721+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
60722+ error = -EACCES;
60723+ goto exit2;
60724+ }
60725+
60726 error = security_path_unlink(&nd.path, dentry);
60727 if (error)
60728 goto exit2;
60729 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
60730+ if (!error && (saved_ino || saved_dev))
60731+ gr_handle_delete(saved_ino, saved_dev);
60732 exit2:
60733 dput(dentry);
60734 }
60735@@ -3797,9 +3953,17 @@ retry:
60736 if (IS_ERR(dentry))
60737 goto out_putname;
60738
60739+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
60740+ error = -EACCES;
60741+ goto out;
60742+ }
60743+
60744 error = security_path_symlink(&path, dentry, from->name);
60745 if (!error)
60746 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
60747+ if (!error)
60748+ gr_handle_create(dentry, path.mnt);
60749+out:
60750 done_path_create(&path, dentry);
60751 if (retry_estale(error, lookup_flags)) {
60752 lookup_flags |= LOOKUP_REVAL;
60753@@ -3902,6 +4066,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
60754 struct dentry *new_dentry;
60755 struct path old_path, new_path;
60756 struct inode *delegated_inode = NULL;
60757+ struct filename *to = NULL;
60758 int how = 0;
60759 int error;
60760
60761@@ -3925,7 +4090,7 @@ retry:
60762 if (error)
60763 return error;
60764
60765- new_dentry = user_path_create(newdfd, newname, &new_path,
60766+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
60767 (how & LOOKUP_REVAL));
60768 error = PTR_ERR(new_dentry);
60769 if (IS_ERR(new_dentry))
60770@@ -3937,11 +4102,28 @@ retry:
60771 error = may_linkat(&old_path);
60772 if (unlikely(error))
60773 goto out_dput;
60774+
60775+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
60776+ old_path.dentry->d_inode,
60777+ old_path.dentry->d_inode->i_mode, to)) {
60778+ error = -EACCES;
60779+ goto out_dput;
60780+ }
60781+
60782+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
60783+ old_path.dentry, old_path.mnt, to)) {
60784+ error = -EACCES;
60785+ goto out_dput;
60786+ }
60787+
60788 error = security_path_link(old_path.dentry, &new_path, new_dentry);
60789 if (error)
60790 goto out_dput;
60791 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
60792+ if (!error)
60793+ gr_handle_create(new_dentry, new_path.mnt);
60794 out_dput:
60795+ putname(to);
60796 done_path_create(&new_path, new_dentry);
60797 if (delegated_inode) {
60798 error = break_deleg_wait(&delegated_inode);
60799@@ -4225,6 +4407,12 @@ retry_deleg:
60800 if (new_dentry == trap)
60801 goto exit5;
60802
60803+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
60804+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
60805+ to);
60806+ if (error)
60807+ goto exit5;
60808+
60809 error = security_path_rename(&oldnd.path, old_dentry,
60810 &newnd.path, new_dentry);
60811 if (error)
60812@@ -4232,6 +4420,9 @@ retry_deleg:
60813 error = vfs_rename(old_dir->d_inode, old_dentry,
60814 new_dir->d_inode, new_dentry,
60815 &delegated_inode);
60816+ if (!error)
60817+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
60818+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
60819 exit5:
60820 dput(new_dentry);
60821 exit4:
60822@@ -4268,6 +4459,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
60823
60824 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
60825 {
60826+ char tmpbuf[64];
60827+ const char *newlink;
60828 int len;
60829
60830 len = PTR_ERR(link);
60831@@ -4277,7 +4470,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
60832 len = strlen(link);
60833 if (len > (unsigned) buflen)
60834 len = buflen;
60835- if (copy_to_user(buffer, link, len))
60836+
60837+ if (len < sizeof(tmpbuf)) {
60838+ memcpy(tmpbuf, link, len);
60839+ newlink = tmpbuf;
60840+ } else
60841+ newlink = link;
60842+
60843+ if (copy_to_user(buffer, newlink, len))
60844 len = -EFAULT;
60845 out:
60846 return len;
60847diff --git a/fs/namespace.c b/fs/namespace.c
60848index be32ebc..c595734 100644
60849--- a/fs/namespace.c
60850+++ b/fs/namespace.c
60851@@ -1293,6 +1293,9 @@ static int do_umount(struct mount *mnt, int flags)
60852 if (!(sb->s_flags & MS_RDONLY))
60853 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
60854 up_write(&sb->s_umount);
60855+
60856+ gr_log_remount(mnt->mnt_devname, retval);
60857+
60858 return retval;
60859 }
60860
60861@@ -1315,6 +1318,9 @@ static int do_umount(struct mount *mnt, int flags)
60862 }
60863 unlock_mount_hash();
60864 namespace_unlock();
60865+
60866+ gr_log_unmount(mnt->mnt_devname, retval);
60867+
60868 return retval;
60869 }
60870
60871@@ -1334,7 +1340,7 @@ static inline bool may_mount(void)
60872 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
60873 */
60874
60875-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
60876+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
60877 {
60878 struct path path;
60879 struct mount *mnt;
60880@@ -1376,7 +1382,7 @@ out:
60881 /*
60882 * The 2.0 compatible umount. No flags.
60883 */
60884-SYSCALL_DEFINE1(oldumount, char __user *, name)
60885+SYSCALL_DEFINE1(oldumount, const char __user *, name)
60886 {
60887 return sys_umount(name, 0);
60888 }
60889@@ -2379,6 +2385,16 @@ long do_mount(const char *dev_name, const char *dir_name,
60890 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
60891 MS_STRICTATIME);
60892
60893+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
60894+ retval = -EPERM;
60895+ goto dput_out;
60896+ }
60897+
60898+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
60899+ retval = -EPERM;
60900+ goto dput_out;
60901+ }
60902+
60903 if (flags & MS_REMOUNT)
60904 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
60905 data_page);
60906@@ -2393,6 +2409,9 @@ long do_mount(const char *dev_name, const char *dir_name,
60907 dev_name, data_page);
60908 dput_out:
60909 path_put(&path);
60910+
60911+ gr_log_mount(dev_name, dir_name, retval);
60912+
60913 return retval;
60914 }
60915
60916@@ -2410,7 +2429,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
60917 * number incrementing at 10Ghz will take 12,427 years to wrap which
60918 * is effectively never, so we can ignore the possibility.
60919 */
60920-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
60921+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
60922
60923 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
60924 {
60925@@ -2425,7 +2444,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
60926 kfree(new_ns);
60927 return ERR_PTR(ret);
60928 }
60929- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
60930+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
60931 atomic_set(&new_ns->count, 1);
60932 new_ns->root = NULL;
60933 INIT_LIST_HEAD(&new_ns->list);
60934@@ -2435,7 +2454,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
60935 return new_ns;
60936 }
60937
60938-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
60939+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
60940 struct user_namespace *user_ns, struct fs_struct *new_fs)
60941 {
60942 struct mnt_namespace *new_ns;
60943@@ -2556,8 +2575,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
60944 }
60945 EXPORT_SYMBOL(mount_subtree);
60946
60947-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
60948- char __user *, type, unsigned long, flags, void __user *, data)
60949+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
60950+ const char __user *, type, unsigned long, flags, void __user *, data)
60951 {
60952 int ret;
60953 char *kernel_type;
60954@@ -2670,6 +2689,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
60955 if (error)
60956 goto out2;
60957
60958+ if (gr_handle_chroot_pivot()) {
60959+ error = -EPERM;
60960+ goto out2;
60961+ }
60962+
60963 get_fs_root(current->fs, &root);
60964 old_mp = lock_mount(&old);
60965 error = PTR_ERR(old_mp);
60966@@ -2930,7 +2954,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
60967 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
60968 return -EPERM;
60969
60970- if (fs->users != 1)
60971+ if (atomic_read(&fs->users) != 1)
60972 return -EINVAL;
60973
60974 get_mnt_ns(mnt_ns);
60975diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
60976index f4ccfe6..a5cf064 100644
60977--- a/fs/nfs/callback_xdr.c
60978+++ b/fs/nfs/callback_xdr.c
60979@@ -51,7 +51,7 @@ struct callback_op {
60980 callback_decode_arg_t decode_args;
60981 callback_encode_res_t encode_res;
60982 long res_maxsize;
60983-};
60984+} __do_const;
60985
60986 static struct callback_op callback_ops[];
60987
60988diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
60989index 00ad1c2..2fde15e 100644
60990--- a/fs/nfs/inode.c
60991+++ b/fs/nfs/inode.c
60992@@ -1146,16 +1146,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
60993 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
60994 }
60995
60996-static atomic_long_t nfs_attr_generation_counter;
60997+static atomic_long_unchecked_t nfs_attr_generation_counter;
60998
60999 static unsigned long nfs_read_attr_generation_counter(void)
61000 {
61001- return atomic_long_read(&nfs_attr_generation_counter);
61002+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
61003 }
61004
61005 unsigned long nfs_inc_attr_generation_counter(void)
61006 {
61007- return atomic_long_inc_return(&nfs_attr_generation_counter);
61008+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
61009 }
61010
61011 void nfs_fattr_init(struct nfs_fattr *fattr)
61012diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
61013index b4a160a..2b9bfba 100644
61014--- a/fs/nfs/nfs4client.c
61015+++ b/fs/nfs/nfs4client.c
61016@@ -409,13 +409,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
61017 error = nfs4_discover_server_trunking(clp, &old);
61018 if (error < 0)
61019 goto error;
61020- nfs_put_client(clp);
61021- if (clp != old) {
61022+
61023+ if (clp != old)
61024 clp->cl_preserve_clid = true;
61025- clp = old;
61026- }
61027-
61028- return clp;
61029+ nfs_put_client(clp);
61030+ return old;
61031
61032 error:
61033 nfs_mark_client_ready(clp, error);
61034@@ -493,9 +491,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
61035 prev = pos;
61036
61037 status = nfs_wait_client_init_complete(pos);
61038- spin_lock(&nn->nfs_client_lock);
61039 if (status < 0)
61040- continue;
61041+ goto out;
61042+ status = -NFS4ERR_STALE_CLIENTID;
61043+ spin_lock(&nn->nfs_client_lock);
61044 }
61045 if (pos->cl_cons_state != NFS_CS_READY)
61046 continue;
61047@@ -633,7 +632,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
61048 }
61049 spin_lock(&nn->nfs_client_lock);
61050 if (status < 0)
61051- continue;
61052+ break;
61053+ status = -NFS4ERR_STALE_CLIENTID;
61054 }
61055 if (pos->cl_cons_state != NFS_CS_READY)
61056 continue;
61057diff --git a/fs/nfs/write.c b/fs/nfs/write.c
61058index c1d5482..6a85038 100644
61059--- a/fs/nfs/write.c
61060+++ b/fs/nfs/write.c
61061@@ -922,19 +922,20 @@ out:
61062 * extend the write to cover the entire page in order to avoid fragmentation
61063 * inefficiencies.
61064 *
61065- * If the file is opened for synchronous writes or if we have a write delegation
61066- * from the server then we can just skip the rest of the checks.
61067+ * If the file is opened for synchronous writes then we can just skip the rest
61068+ * of the checks.
61069 */
61070 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
61071 {
61072 if (file->f_flags & O_DSYNC)
61073 return 0;
61074+ if (!nfs_write_pageuptodate(page, inode))
61075+ return 0;
61076 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
61077 return 1;
61078- if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
61079- (inode->i_flock->fl_start == 0 &&
61080+ if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
61081 inode->i_flock->fl_end == OFFSET_MAX &&
61082- inode->i_flock->fl_type != F_RDLCK)))
61083+ inode->i_flock->fl_type != F_RDLCK))
61084 return 1;
61085 return 0;
61086 }
61087diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
61088index 419572f..5414a23 100644
61089--- a/fs/nfsd/nfs4proc.c
61090+++ b/fs/nfsd/nfs4proc.c
61091@@ -1168,7 +1168,7 @@ struct nfsd4_operation {
61092 nfsd4op_rsize op_rsize_bop;
61093 stateid_getter op_get_currentstateid;
61094 stateid_setter op_set_currentstateid;
61095-};
61096+} __do_const;
61097
61098 static struct nfsd4_operation nfsd4_ops[];
61099
61100diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
61101index ee7237f..e3ae60a 100644
61102--- a/fs/nfsd/nfs4xdr.c
61103+++ b/fs/nfsd/nfs4xdr.c
61104@@ -1523,7 +1523,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
61105
61106 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
61107
61108-static nfsd4_dec nfsd4_dec_ops[] = {
61109+static const nfsd4_dec nfsd4_dec_ops[] = {
61110 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
61111 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
61112 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
61113diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
61114index b6af150..f6ec5e3 100644
61115--- a/fs/nfsd/nfscache.c
61116+++ b/fs/nfsd/nfscache.c
61117@@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
61118 {
61119 struct svc_cacherep *rp = rqstp->rq_cacherep;
61120 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
61121- int len;
61122+ long len;
61123 size_t bufsize = 0;
61124
61125 if (!rp)
61126 return;
61127
61128- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
61129- len >>= 2;
61130+ if (statp) {
61131+ len = (char*)statp - (char*)resv->iov_base;
61132+ len = resv->iov_len - len;
61133+ len >>= 2;
61134+ }
61135
61136 /* Don't cache excessive amounts of data and XDR failures */
61137 if (!statp || len > (256 >> 2)) {
61138diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
61139index 7eea63c..a35f4fb 100644
61140--- a/fs/nfsd/vfs.c
61141+++ b/fs/nfsd/vfs.c
61142@@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
61143 } else {
61144 oldfs = get_fs();
61145 set_fs(KERNEL_DS);
61146- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
61147+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
61148 set_fs(oldfs);
61149 }
61150
61151@@ -1084,7 +1084,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
61152
61153 /* Write the data. */
61154 oldfs = get_fs(); set_fs(KERNEL_DS);
61155- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
61156+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
61157 set_fs(oldfs);
61158 if (host_err < 0)
61159 goto out_nfserr;
61160@@ -1629,7 +1629,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
61161 */
61162
61163 oldfs = get_fs(); set_fs(KERNEL_DS);
61164- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
61165+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
61166 set_fs(oldfs);
61167
61168 if (host_err < 0)
61169diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
61170index fea6bd5..8ee9d81 100644
61171--- a/fs/nls/nls_base.c
61172+++ b/fs/nls/nls_base.c
61173@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
61174
61175 int register_nls(struct nls_table * nls)
61176 {
61177- struct nls_table ** tmp = &tables;
61178+ struct nls_table *tmp = tables;
61179
61180 if (nls->next)
61181 return -EBUSY;
61182
61183 spin_lock(&nls_lock);
61184- while (*tmp) {
61185- if (nls == *tmp) {
61186+ while (tmp) {
61187+ if (nls == tmp) {
61188 spin_unlock(&nls_lock);
61189 return -EBUSY;
61190 }
61191- tmp = &(*tmp)->next;
61192+ tmp = tmp->next;
61193 }
61194- nls->next = tables;
61195+ pax_open_kernel();
61196+ *(struct nls_table **)&nls->next = tables;
61197+ pax_close_kernel();
61198 tables = nls;
61199 spin_unlock(&nls_lock);
61200 return 0;
61201@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
61202
61203 int unregister_nls(struct nls_table * nls)
61204 {
61205- struct nls_table ** tmp = &tables;
61206+ struct nls_table * const * tmp = &tables;
61207
61208 spin_lock(&nls_lock);
61209 while (*tmp) {
61210 if (nls == *tmp) {
61211- *tmp = nls->next;
61212+ pax_open_kernel();
61213+ *(struct nls_table **)tmp = nls->next;
61214+ pax_close_kernel();
61215 spin_unlock(&nls_lock);
61216 return 0;
61217 }
61218diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
61219index 7424929..35f6be5 100644
61220--- a/fs/nls/nls_euc-jp.c
61221+++ b/fs/nls/nls_euc-jp.c
61222@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
61223 p_nls = load_nls("cp932");
61224
61225 if (p_nls) {
61226- table.charset2upper = p_nls->charset2upper;
61227- table.charset2lower = p_nls->charset2lower;
61228+ pax_open_kernel();
61229+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
61230+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
61231+ pax_close_kernel();
61232 return register_nls(&table);
61233 }
61234
61235diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
61236index e7bc1d7..06bd4bb 100644
61237--- a/fs/nls/nls_koi8-ru.c
61238+++ b/fs/nls/nls_koi8-ru.c
61239@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
61240 p_nls = load_nls("koi8-u");
61241
61242 if (p_nls) {
61243- table.charset2upper = p_nls->charset2upper;
61244- table.charset2lower = p_nls->charset2lower;
61245+ pax_open_kernel();
61246+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
61247+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
61248+ pax_close_kernel();
61249 return register_nls(&table);
61250 }
61251
61252diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
61253index e44cb64..7668ca4 100644
61254--- a/fs/notify/fanotify/fanotify_user.c
61255+++ b/fs/notify/fanotify/fanotify_user.c
61256@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
61257
61258 fd = fanotify_event_metadata.fd;
61259 ret = -EFAULT;
61260- if (copy_to_user(buf, &fanotify_event_metadata,
61261- fanotify_event_metadata.event_len))
61262+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
61263+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
61264 goto out_close_fd;
61265
61266 ret = prepare_for_access_response(group, event, fd);
61267@@ -888,9 +888,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
61268 {
61269 return sys_fanotify_mark(fanotify_fd, flags,
61270 #ifdef __BIG_ENDIAN
61271- ((__u64)mask1 << 32) | mask0,
61272-#else
61273 ((__u64)mask0 << 32) | mask1,
61274+#else
61275+ ((__u64)mask1 << 32) | mask0,
61276 #endif
61277 dfd, pathname);
61278 }
61279diff --git a/fs/notify/notification.c b/fs/notify/notification.c
61280index 7b51b05..5ea5ef6 100644
61281--- a/fs/notify/notification.c
61282+++ b/fs/notify/notification.c
61283@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
61284 * get set to 0 so it will never get 'freed'
61285 */
61286 static struct fsnotify_event *q_overflow_event;
61287-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61288+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61289
61290 /**
61291 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
61292@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61293 */
61294 u32 fsnotify_get_cookie(void)
61295 {
61296- return atomic_inc_return(&fsnotify_sync_cookie);
61297+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
61298 }
61299 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
61300
61301diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
61302index 9e38daf..5727cae 100644
61303--- a/fs/ntfs/dir.c
61304+++ b/fs/ntfs/dir.c
61305@@ -1310,7 +1310,7 @@ find_next_index_buffer:
61306 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
61307 ~(s64)(ndir->itype.index.block_size - 1)));
61308 /* Bounds checks. */
61309- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
61310+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
61311 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
61312 "inode 0x%lx or driver bug.", vdir->i_ino);
61313 goto err_out;
61314diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
61315index ea4ba9d..1e13d34 100644
61316--- a/fs/ntfs/file.c
61317+++ b/fs/ntfs/file.c
61318@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
61319 char *addr;
61320 size_t total = 0;
61321 unsigned len;
61322- int left;
61323+ unsigned left;
61324
61325 do {
61326 len = PAGE_CACHE_SIZE - ofs;
61327diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
61328index 82650d5..db37dcf 100644
61329--- a/fs/ntfs/super.c
61330+++ b/fs/ntfs/super.c
61331@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61332 if (!silent)
61333 ntfs_error(sb, "Primary boot sector is invalid.");
61334 } else if (!silent)
61335- ntfs_error(sb, read_err_str, "primary");
61336+ ntfs_error(sb, read_err_str, "%s", "primary");
61337 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
61338 if (bh_primary)
61339 brelse(bh_primary);
61340@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61341 goto hotfix_primary_boot_sector;
61342 brelse(bh_backup);
61343 } else if (!silent)
61344- ntfs_error(sb, read_err_str, "backup");
61345+ ntfs_error(sb, read_err_str, "%s", "backup");
61346 /* Try to read NT3.51- backup boot sector. */
61347 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
61348 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
61349@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61350 "sector.");
61351 brelse(bh_backup);
61352 } else if (!silent)
61353- ntfs_error(sb, read_err_str, "backup");
61354+ ntfs_error(sb, read_err_str, "%s", "backup");
61355 /* We failed. Cleanup and return. */
61356 if (bh_primary)
61357 brelse(bh_primary);
61358diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
61359index cd5496b..26a1055 100644
61360--- a/fs/ocfs2/localalloc.c
61361+++ b/fs/ocfs2/localalloc.c
61362@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
61363 goto bail;
61364 }
61365
61366- atomic_inc(&osb->alloc_stats.moves);
61367+ atomic_inc_unchecked(&osb->alloc_stats.moves);
61368
61369 bail:
61370 if (handle)
61371diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
61372index 3a90347..c40bef8 100644
61373--- a/fs/ocfs2/ocfs2.h
61374+++ b/fs/ocfs2/ocfs2.h
61375@@ -235,11 +235,11 @@ enum ocfs2_vol_state
61376
61377 struct ocfs2_alloc_stats
61378 {
61379- atomic_t moves;
61380- atomic_t local_data;
61381- atomic_t bitmap_data;
61382- atomic_t bg_allocs;
61383- atomic_t bg_extends;
61384+ atomic_unchecked_t moves;
61385+ atomic_unchecked_t local_data;
61386+ atomic_unchecked_t bitmap_data;
61387+ atomic_unchecked_t bg_allocs;
61388+ atomic_unchecked_t bg_extends;
61389 };
61390
61391 enum ocfs2_local_alloc_state
61392diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
61393index 2c91452..77a3cd2 100644
61394--- a/fs/ocfs2/suballoc.c
61395+++ b/fs/ocfs2/suballoc.c
61396@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
61397 mlog_errno(status);
61398 goto bail;
61399 }
61400- atomic_inc(&osb->alloc_stats.bg_extends);
61401+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
61402
61403 /* You should never ask for this much metadata */
61404 BUG_ON(bits_wanted >
61405@@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handle,
61406 mlog_errno(status);
61407 goto bail;
61408 }
61409- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61410+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61411
61412 *suballoc_loc = res.sr_bg_blkno;
61413 *suballoc_bit_start = res.sr_bit_offset;
61414@@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
61415 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
61416 res->sr_bits);
61417
61418- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61419+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61420
61421 BUG_ON(res->sr_bits != 1);
61422
61423@@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
61424 mlog_errno(status);
61425 goto bail;
61426 }
61427- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61428+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61429
61430 BUG_ON(res.sr_bits != 1);
61431
61432@@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
61433 cluster_start,
61434 num_clusters);
61435 if (!status)
61436- atomic_inc(&osb->alloc_stats.local_data);
61437+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
61438 } else {
61439 if (min_clusters > (osb->bitmap_cpg - 1)) {
61440 /* The only paths asking for contiguousness
61441@@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
61442 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
61443 res.sr_bg_blkno,
61444 res.sr_bit_offset);
61445- atomic_inc(&osb->alloc_stats.bitmap_data);
61446+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
61447 *num_clusters = res.sr_bits;
61448 }
61449 }
61450diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
61451index c414929..5c9ee542 100644
61452--- a/fs/ocfs2/super.c
61453+++ b/fs/ocfs2/super.c
61454@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
61455 "%10s => GlobalAllocs: %d LocalAllocs: %d "
61456 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
61457 "Stats",
61458- atomic_read(&osb->alloc_stats.bitmap_data),
61459- atomic_read(&osb->alloc_stats.local_data),
61460- atomic_read(&osb->alloc_stats.bg_allocs),
61461- atomic_read(&osb->alloc_stats.moves),
61462- atomic_read(&osb->alloc_stats.bg_extends));
61463+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
61464+ atomic_read_unchecked(&osb->alloc_stats.local_data),
61465+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
61466+ atomic_read_unchecked(&osb->alloc_stats.moves),
61467+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
61468
61469 out += snprintf(buf + out, len - out,
61470 "%10s => State: %u Descriptor: %llu Size: %u bits "
61471@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
61472 spin_lock_init(&osb->osb_xattr_lock);
61473 ocfs2_init_steal_slots(osb);
61474
61475- atomic_set(&osb->alloc_stats.moves, 0);
61476- atomic_set(&osb->alloc_stats.local_data, 0);
61477- atomic_set(&osb->alloc_stats.bitmap_data, 0);
61478- atomic_set(&osb->alloc_stats.bg_allocs, 0);
61479- atomic_set(&osb->alloc_stats.bg_extends, 0);
61480+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
61481+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
61482+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
61483+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
61484+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
61485
61486 /* Copy the blockcheck stats from the superblock probe */
61487 osb->osb_ecc_stats = *stats;
61488diff --git a/fs/open.c b/fs/open.c
61489index 4b3e1ed..1c84599 100644
61490--- a/fs/open.c
61491+++ b/fs/open.c
61492@@ -32,6 +32,8 @@
61493 #include <linux/dnotify.h>
61494 #include <linux/compat.h>
61495
61496+#define CREATE_TRACE_POINTS
61497+#include <trace/events/fs.h>
61498 #include "internal.h"
61499
61500 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
61501@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
61502 error = locks_verify_truncate(inode, NULL, length);
61503 if (!error)
61504 error = security_path_truncate(path);
61505+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
61506+ error = -EACCES;
61507 if (!error)
61508 error = do_truncate(path->dentry, length, 0, NULL);
61509
61510@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
61511 error = locks_verify_truncate(inode, f.file, length);
61512 if (!error)
61513 error = security_path_truncate(&f.file->f_path);
61514+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
61515+ error = -EACCES;
61516 if (!error)
61517 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
61518 sb_end_write(inode->i_sb);
61519@@ -361,6 +367,9 @@ retry:
61520 if (__mnt_is_readonly(path.mnt))
61521 res = -EROFS;
61522
61523+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
61524+ res = -EACCES;
61525+
61526 out_path_release:
61527 path_put(&path);
61528 if (retry_estale(res, lookup_flags)) {
61529@@ -392,6 +401,8 @@ retry:
61530 if (error)
61531 goto dput_and_out;
61532
61533+ gr_log_chdir(path.dentry, path.mnt);
61534+
61535 set_fs_pwd(current->fs, &path);
61536
61537 dput_and_out:
61538@@ -421,6 +432,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
61539 goto out_putf;
61540
61541 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
61542+
61543+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
61544+ error = -EPERM;
61545+
61546+ if (!error)
61547+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
61548+
61549 if (!error)
61550 set_fs_pwd(current->fs, &f.file->f_path);
61551 out_putf:
61552@@ -450,7 +468,13 @@ retry:
61553 if (error)
61554 goto dput_and_out;
61555
61556+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
61557+ goto dput_and_out;
61558+
61559 set_fs_root(current->fs, &path);
61560+
61561+ gr_handle_chroot_chdir(&path);
61562+
61563 error = 0;
61564 dput_and_out:
61565 path_put(&path);
61566@@ -474,6 +498,16 @@ static int chmod_common(struct path *path, umode_t mode)
61567 return error;
61568 retry_deleg:
61569 mutex_lock(&inode->i_mutex);
61570+
61571+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
61572+ error = -EACCES;
61573+ goto out_unlock;
61574+ }
61575+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
61576+ error = -EACCES;
61577+ goto out_unlock;
61578+ }
61579+
61580 error = security_path_chmod(path, mode);
61581 if (error)
61582 goto out_unlock;
61583@@ -539,6 +573,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
61584 uid = make_kuid(current_user_ns(), user);
61585 gid = make_kgid(current_user_ns(), group);
61586
61587+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
61588+ return -EACCES;
61589+
61590 newattrs.ia_valid = ATTR_CTIME;
61591 if (user != (uid_t) -1) {
61592 if (!uid_valid(uid))
61593@@ -990,6 +1027,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
61594 } else {
61595 fsnotify_open(f);
61596 fd_install(fd, f);
61597+ trace_do_sys_open(tmp->name, flags, mode);
61598 }
61599 }
61600 putname(tmp);
61601diff --git a/fs/pipe.c b/fs/pipe.c
61602index 0e0752e..7cfdd50 100644
61603--- a/fs/pipe.c
61604+++ b/fs/pipe.c
61605@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
61606
61607 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
61608 {
61609- if (pipe->files)
61610+ if (atomic_read(&pipe->files))
61611 mutex_lock_nested(&pipe->mutex, subclass);
61612 }
61613
61614@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
61615
61616 void pipe_unlock(struct pipe_inode_info *pipe)
61617 {
61618- if (pipe->files)
61619+ if (atomic_read(&pipe->files))
61620 mutex_unlock(&pipe->mutex);
61621 }
61622 EXPORT_SYMBOL(pipe_unlock);
61623@@ -449,9 +449,9 @@ redo:
61624 }
61625 if (bufs) /* More to do? */
61626 continue;
61627- if (!pipe->writers)
61628+ if (!atomic_read(&pipe->writers))
61629 break;
61630- if (!pipe->waiting_writers) {
61631+ if (!atomic_read(&pipe->waiting_writers)) {
61632 /* syscall merging: Usually we must not sleep
61633 * if O_NONBLOCK is set, or if we got some data.
61634 * But if a writer sleeps in kernel space, then
61635@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
61636 ret = 0;
61637 __pipe_lock(pipe);
61638
61639- if (!pipe->readers) {
61640+ if (!atomic_read(&pipe->readers)) {
61641 send_sig(SIGPIPE, current, 0);
61642 ret = -EPIPE;
61643 goto out;
61644@@ -562,7 +562,7 @@ redo1:
61645 for (;;) {
61646 int bufs;
61647
61648- if (!pipe->readers) {
61649+ if (!atomic_read(&pipe->readers)) {
61650 send_sig(SIGPIPE, current, 0);
61651 if (!ret)
61652 ret = -EPIPE;
61653@@ -653,9 +653,9 @@ redo2:
61654 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61655 do_wakeup = 0;
61656 }
61657- pipe->waiting_writers++;
61658+ atomic_inc(&pipe->waiting_writers);
61659 pipe_wait(pipe);
61660- pipe->waiting_writers--;
61661+ atomic_dec(&pipe->waiting_writers);
61662 }
61663 out:
61664 __pipe_unlock(pipe);
61665@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
61666 mask = 0;
61667 if (filp->f_mode & FMODE_READ) {
61668 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
61669- if (!pipe->writers && filp->f_version != pipe->w_counter)
61670+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
61671 mask |= POLLHUP;
61672 }
61673
61674@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
61675 * Most Unices do not set POLLERR for FIFOs but on Linux they
61676 * behave exactly like pipes for poll().
61677 */
61678- if (!pipe->readers)
61679+ if (!atomic_read(&pipe->readers))
61680 mask |= POLLERR;
61681 }
61682
61683@@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
61684 int kill = 0;
61685
61686 spin_lock(&inode->i_lock);
61687- if (!--pipe->files) {
61688+ if (atomic_dec_and_test(&pipe->files)) {
61689 inode->i_pipe = NULL;
61690 kill = 1;
61691 }
61692@@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
61693
61694 __pipe_lock(pipe);
61695 if (file->f_mode & FMODE_READ)
61696- pipe->readers--;
61697+ atomic_dec(&pipe->readers);
61698 if (file->f_mode & FMODE_WRITE)
61699- pipe->writers--;
61700+ atomic_dec(&pipe->writers);
61701
61702- if (pipe->readers || pipe->writers) {
61703+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
61704 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
61705 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61706 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
61707@@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
61708 kfree(pipe);
61709 }
61710
61711-static struct vfsmount *pipe_mnt __read_mostly;
61712+struct vfsmount *pipe_mnt __read_mostly;
61713
61714 /*
61715 * pipefs_dname() is called from d_path().
61716@@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
61717 goto fail_iput;
61718
61719 inode->i_pipe = pipe;
61720- pipe->files = 2;
61721- pipe->readers = pipe->writers = 1;
61722+ atomic_set(&pipe->files, 2);
61723+ atomic_set(&pipe->readers, 1);
61724+ atomic_set(&pipe->writers, 1);
61725 inode->i_fop = &pipefifo_fops;
61726
61727 /*
61728@@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
61729 spin_lock(&inode->i_lock);
61730 if (inode->i_pipe) {
61731 pipe = inode->i_pipe;
61732- pipe->files++;
61733+ atomic_inc(&pipe->files);
61734 spin_unlock(&inode->i_lock);
61735 } else {
61736 spin_unlock(&inode->i_lock);
61737 pipe = alloc_pipe_info();
61738 if (!pipe)
61739 return -ENOMEM;
61740- pipe->files = 1;
61741+ atomic_set(&pipe->files, 1);
61742 spin_lock(&inode->i_lock);
61743 if (unlikely(inode->i_pipe)) {
61744- inode->i_pipe->files++;
61745+ atomic_inc(&inode->i_pipe->files);
61746 spin_unlock(&inode->i_lock);
61747 free_pipe_info(pipe);
61748 pipe = inode->i_pipe;
61749@@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
61750 * opened, even when there is no process writing the FIFO.
61751 */
61752 pipe->r_counter++;
61753- if (pipe->readers++ == 0)
61754+ if (atomic_inc_return(&pipe->readers) == 1)
61755 wake_up_partner(pipe);
61756
61757- if (!is_pipe && !pipe->writers) {
61758+ if (!is_pipe && !atomic_read(&pipe->writers)) {
61759 if ((filp->f_flags & O_NONBLOCK)) {
61760 /* suppress POLLHUP until we have
61761 * seen a writer */
61762@@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
61763 * errno=ENXIO when there is no process reading the FIFO.
61764 */
61765 ret = -ENXIO;
61766- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
61767+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
61768 goto err;
61769
61770 pipe->w_counter++;
61771- if (!pipe->writers++)
61772+ if (atomic_inc_return(&pipe->writers) == 1)
61773 wake_up_partner(pipe);
61774
61775- if (!is_pipe && !pipe->readers) {
61776+ if (!is_pipe && !atomic_read(&pipe->readers)) {
61777 if (wait_for_partner(pipe, &pipe->r_counter))
61778 goto err_wr;
61779 }
61780@@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
61781 * the process can at least talk to itself.
61782 */
61783
61784- pipe->readers++;
61785- pipe->writers++;
61786+ atomic_inc(&pipe->readers);
61787+ atomic_inc(&pipe->writers);
61788 pipe->r_counter++;
61789 pipe->w_counter++;
61790- if (pipe->readers == 1 || pipe->writers == 1)
61791+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
61792 wake_up_partner(pipe);
61793 break;
61794
61795@@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
61796 return 0;
61797
61798 err_rd:
61799- if (!--pipe->readers)
61800+ if (atomic_dec_and_test(&pipe->readers))
61801 wake_up_interruptible(&pipe->wait);
61802 ret = -ERESTARTSYS;
61803 goto err;
61804
61805 err_wr:
61806- if (!--pipe->writers)
61807+ if (atomic_dec_and_test(&pipe->writers))
61808 wake_up_interruptible(&pipe->wait);
61809 ret = -ERESTARTSYS;
61810 goto err;
61811diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
61812index 2183fcf..3c32a98 100644
61813--- a/fs/proc/Kconfig
61814+++ b/fs/proc/Kconfig
61815@@ -30,7 +30,7 @@ config PROC_FS
61816
61817 config PROC_KCORE
61818 bool "/proc/kcore support" if !ARM
61819- depends on PROC_FS && MMU
61820+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
61821 help
61822 Provides a virtual ELF core file of the live kernel. This can
61823 be read with gdb and other ELF tools. No modifications can be
61824@@ -38,8 +38,8 @@ config PROC_KCORE
61825
61826 config PROC_VMCORE
61827 bool "/proc/vmcore support"
61828- depends on PROC_FS && CRASH_DUMP
61829- default y
61830+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
61831+ default n
61832 help
61833 Exports the dump image of crashed kernel in ELF format.
61834
61835@@ -63,8 +63,8 @@ config PROC_SYSCTL
61836 limited in memory.
61837
61838 config PROC_PAGE_MONITOR
61839- default y
61840- depends on PROC_FS && MMU
61841+ default n
61842+ depends on PROC_FS && MMU && !GRKERNSEC
61843 bool "Enable /proc page monitoring" if EXPERT
61844 help
61845 Various /proc files exist to monitor process memory utilization:
61846diff --git a/fs/proc/array.c b/fs/proc/array.c
61847index 1bd2077..2f7cfd5 100644
61848--- a/fs/proc/array.c
61849+++ b/fs/proc/array.c
61850@@ -60,6 +60,7 @@
61851 #include <linux/tty.h>
61852 #include <linux/string.h>
61853 #include <linux/mman.h>
61854+#include <linux/grsecurity.h>
61855 #include <linux/proc_fs.h>
61856 #include <linux/ioport.h>
61857 #include <linux/uaccess.h>
61858@@ -365,6 +366,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
61859 seq_putc(m, '\n');
61860 }
61861
61862+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61863+static inline void task_pax(struct seq_file *m, struct task_struct *p)
61864+{
61865+ if (p->mm)
61866+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
61867+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
61868+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
61869+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
61870+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
61871+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
61872+ else
61873+ seq_printf(m, "PaX:\t-----\n");
61874+}
61875+#endif
61876+
61877 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
61878 struct pid *pid, struct task_struct *task)
61879 {
61880@@ -383,9 +399,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
61881 task_cpus_allowed(m, task);
61882 cpuset_task_status_allowed(m, task);
61883 task_context_switch_counts(m, task);
61884+
61885+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61886+ task_pax(m, task);
61887+#endif
61888+
61889+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
61890+ task_grsec_rbac(m, task);
61891+#endif
61892+
61893 return 0;
61894 }
61895
61896+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61897+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
61898+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
61899+ _mm->pax_flags & MF_PAX_SEGMEXEC))
61900+#endif
61901+
61902 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61903 struct pid *pid, struct task_struct *task, int whole)
61904 {
61905@@ -407,6 +438,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61906 char tcomm[sizeof(task->comm)];
61907 unsigned long flags;
61908
61909+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61910+ if (current->exec_id != m->exec_id) {
61911+ gr_log_badprocpid("stat");
61912+ return 0;
61913+ }
61914+#endif
61915+
61916 state = *get_task_state(task);
61917 vsize = eip = esp = 0;
61918 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
61919@@ -478,6 +516,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61920 gtime = task_gtime(task);
61921 }
61922
61923+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61924+ if (PAX_RAND_FLAGS(mm)) {
61925+ eip = 0;
61926+ esp = 0;
61927+ wchan = 0;
61928+ }
61929+#endif
61930+#ifdef CONFIG_GRKERNSEC_HIDESYM
61931+ wchan = 0;
61932+ eip =0;
61933+ esp =0;
61934+#endif
61935+
61936 /* scale priority and nice values from timeslices to -20..20 */
61937 /* to make it look like a "normal" Unix priority/nice value */
61938 priority = task_prio(task);
61939@@ -514,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61940 seq_put_decimal_ull(m, ' ', vsize);
61941 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
61942 seq_put_decimal_ull(m, ' ', rsslim);
61943+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61944+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
61945+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
61946+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
61947+#else
61948 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
61949 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
61950 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
61951+#endif
61952 seq_put_decimal_ull(m, ' ', esp);
61953 seq_put_decimal_ull(m, ' ', eip);
61954 /* The signal information here is obsolete.
61955@@ -538,7 +595,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61956 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
61957 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
61958
61959- if (mm && permitted) {
61960+ if (mm && permitted
61961+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61962+ && !PAX_RAND_FLAGS(mm)
61963+#endif
61964+ ) {
61965 seq_put_decimal_ull(m, ' ', mm->start_data);
61966 seq_put_decimal_ull(m, ' ', mm->end_data);
61967 seq_put_decimal_ull(m, ' ', mm->start_brk);
61968@@ -576,8 +637,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
61969 struct pid *pid, struct task_struct *task)
61970 {
61971 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
61972- struct mm_struct *mm = get_task_mm(task);
61973+ struct mm_struct *mm;
61974
61975+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61976+ if (current->exec_id != m->exec_id) {
61977+ gr_log_badprocpid("statm");
61978+ return 0;
61979+ }
61980+#endif
61981+ mm = get_task_mm(task);
61982 if (mm) {
61983 size = task_statm(mm, &shared, &text, &data, &resident);
61984 mmput(mm);
61985@@ -600,6 +668,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
61986 return 0;
61987 }
61988
61989+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
61990+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
61991+{
61992+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
61993+}
61994+#endif
61995+
61996 #ifdef CONFIG_CHECKPOINT_RESTORE
61997 static struct pid *
61998 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
61999diff --git a/fs/proc/base.c b/fs/proc/base.c
62000index 03c8d74..4efb575 100644
62001--- a/fs/proc/base.c
62002+++ b/fs/proc/base.c
62003@@ -113,6 +113,14 @@ struct pid_entry {
62004 union proc_op op;
62005 };
62006
62007+struct getdents_callback {
62008+ struct linux_dirent __user * current_dir;
62009+ struct linux_dirent __user * previous;
62010+ struct file * file;
62011+ int count;
62012+ int error;
62013+};
62014+
62015 #define NOD(NAME, MODE, IOP, FOP, OP) { \
62016 .name = (NAME), \
62017 .len = sizeof(NAME) - 1, \
62018@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
62019 if (!mm->arg_end)
62020 goto out_mm; /* Shh! No looking before we're done */
62021
62022+ if (gr_acl_handle_procpidmem(task))
62023+ goto out_mm;
62024+
62025 len = mm->arg_end - mm->arg_start;
62026
62027 if (len > PAGE_SIZE)
62028@@ -237,12 +248,28 @@ out:
62029 return res;
62030 }
62031
62032+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62033+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
62034+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
62035+ _mm->pax_flags & MF_PAX_SEGMEXEC))
62036+#endif
62037+
62038 static int proc_pid_auxv(struct task_struct *task, char *buffer)
62039 {
62040 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
62041 int res = PTR_ERR(mm);
62042 if (mm && !IS_ERR(mm)) {
62043 unsigned int nwords = 0;
62044+
62045+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62046+ /* allow if we're currently ptracing this task */
62047+ if (PAX_RAND_FLAGS(mm) &&
62048+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
62049+ mmput(mm);
62050+ return 0;
62051+ }
62052+#endif
62053+
62054 do {
62055 nwords += 2;
62056 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
62057@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
62058 }
62059
62060
62061-#ifdef CONFIG_KALLSYMS
62062+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62063 /*
62064 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
62065 * Returns the resolved symbol. If that fails, simply return the address.
62066@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
62067 mutex_unlock(&task->signal->cred_guard_mutex);
62068 }
62069
62070-#ifdef CONFIG_STACKTRACE
62071+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62072
62073 #define MAX_STACK_TRACE_DEPTH 64
62074
62075@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
62076 return count;
62077 }
62078
62079-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62080+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62081 static int proc_pid_syscall(struct task_struct *task, char *buffer)
62082 {
62083 long nr;
62084@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
62085 /************************************************************************/
62086
62087 /* permission checks */
62088-static int proc_fd_access_allowed(struct inode *inode)
62089+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
62090 {
62091 struct task_struct *task;
62092 int allowed = 0;
62093@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
62094 */
62095 task = get_proc_task(inode);
62096 if (task) {
62097- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62098+ if (log)
62099+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62100+ else
62101+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
62102 put_task_struct(task);
62103 }
62104 return allowed;
62105@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
62106 struct task_struct *task,
62107 int hide_pid_min)
62108 {
62109+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62110+ return false;
62111+
62112+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62113+ rcu_read_lock();
62114+ {
62115+ const struct cred *tmpcred = current_cred();
62116+ const struct cred *cred = __task_cred(task);
62117+
62118+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
62119+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62120+ || in_group_p(grsec_proc_gid)
62121+#endif
62122+ ) {
62123+ rcu_read_unlock();
62124+ return true;
62125+ }
62126+ }
62127+ rcu_read_unlock();
62128+
62129+ if (!pid->hide_pid)
62130+ return false;
62131+#endif
62132+
62133 if (pid->hide_pid < hide_pid_min)
62134 return true;
62135 if (in_group_p(pid->pid_gid))
62136 return true;
62137+
62138 return ptrace_may_access(task, PTRACE_MODE_READ);
62139 }
62140
62141@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
62142 put_task_struct(task);
62143
62144 if (!has_perms) {
62145+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62146+ {
62147+#else
62148 if (pid->hide_pid == 2) {
62149+#endif
62150 /*
62151 * Let's make getdents(), stat(), and open()
62152 * consistent with each other. If a process
62153@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
62154 if (!task)
62155 return -ESRCH;
62156
62157+ if (gr_acl_handle_procpidmem(task)) {
62158+ put_task_struct(task);
62159+ return -EPERM;
62160+ }
62161+
62162 mm = mm_access(task, mode);
62163 put_task_struct(task);
62164
62165@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
62166
62167 file->private_data = mm;
62168
62169+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62170+ file->f_version = current->exec_id;
62171+#endif
62172+
62173 return 0;
62174 }
62175
62176@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
62177 ssize_t copied;
62178 char *page;
62179
62180+#ifdef CONFIG_GRKERNSEC
62181+ if (write)
62182+ return -EPERM;
62183+#endif
62184+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62185+ if (file->f_version != current->exec_id) {
62186+ gr_log_badprocpid("mem");
62187+ return 0;
62188+ }
62189+#endif
62190+
62191 if (!mm)
62192 return 0;
62193
62194@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
62195 goto free;
62196
62197 while (count > 0) {
62198- int this_len = min_t(int, count, PAGE_SIZE);
62199+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
62200
62201 if (write && copy_from_user(page, buf, this_len)) {
62202 copied = -EFAULT;
62203@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
62204 if (!mm)
62205 return 0;
62206
62207+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62208+ if (file->f_version != current->exec_id) {
62209+ gr_log_badprocpid("environ");
62210+ return 0;
62211+ }
62212+#endif
62213+
62214 page = (char *)__get_free_page(GFP_TEMPORARY);
62215 if (!page)
62216 return -ENOMEM;
62217@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
62218 goto free;
62219 while (count > 0) {
62220 size_t this_len, max_len;
62221- int retval;
62222+ ssize_t retval;
62223
62224 if (src >= (mm->env_end - mm->env_start))
62225 break;
62226@@ -1467,7 +1553,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
62227 int error = -EACCES;
62228
62229 /* Are we allowed to snoop on the tasks file descriptors? */
62230- if (!proc_fd_access_allowed(inode))
62231+ if (!proc_fd_access_allowed(inode, 0))
62232 goto out;
62233
62234 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
62235@@ -1511,8 +1597,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
62236 struct path path;
62237
62238 /* Are we allowed to snoop on the tasks file descriptors? */
62239- if (!proc_fd_access_allowed(inode))
62240- goto out;
62241+ /* logging this is needed for learning on chromium to work properly,
62242+ but we don't want to flood the logs from 'ps' which does a readlink
62243+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
62244+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
62245+ */
62246+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
62247+ if (!proc_fd_access_allowed(inode,0))
62248+ goto out;
62249+ } else {
62250+ if (!proc_fd_access_allowed(inode,1))
62251+ goto out;
62252+ }
62253
62254 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
62255 if (error)
62256@@ -1562,7 +1658,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
62257 rcu_read_lock();
62258 cred = __task_cred(task);
62259 inode->i_uid = cred->euid;
62260+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62261+ inode->i_gid = grsec_proc_gid;
62262+#else
62263 inode->i_gid = cred->egid;
62264+#endif
62265 rcu_read_unlock();
62266 }
62267 security_task_to_inode(task, inode);
62268@@ -1598,10 +1698,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
62269 return -ENOENT;
62270 }
62271 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
62272+#ifdef CONFIG_GRKERNSEC_PROC_USER
62273+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
62274+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62275+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
62276+#endif
62277 task_dumpable(task)) {
62278 cred = __task_cred(task);
62279 stat->uid = cred->euid;
62280+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62281+ stat->gid = grsec_proc_gid;
62282+#else
62283 stat->gid = cred->egid;
62284+#endif
62285 }
62286 }
62287 rcu_read_unlock();
62288@@ -1639,11 +1748,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
62289
62290 if (task) {
62291 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
62292+#ifdef CONFIG_GRKERNSEC_PROC_USER
62293+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
62294+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62295+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
62296+#endif
62297 task_dumpable(task)) {
62298 rcu_read_lock();
62299 cred = __task_cred(task);
62300 inode->i_uid = cred->euid;
62301+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62302+ inode->i_gid = grsec_proc_gid;
62303+#else
62304 inode->i_gid = cred->egid;
62305+#endif
62306 rcu_read_unlock();
62307 } else {
62308 inode->i_uid = GLOBAL_ROOT_UID;
62309@@ -2172,6 +2290,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
62310 if (!task)
62311 goto out_no_task;
62312
62313+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62314+ goto out;
62315+
62316 /*
62317 * Yes, it does not scale. And it should not. Don't add
62318 * new entries into /proc/<tgid>/ without very good reasons.
62319@@ -2202,6 +2323,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
62320 if (!task)
62321 return -ENOENT;
62322
62323+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62324+ goto out;
62325+
62326 if (!dir_emit_dots(file, ctx))
62327 goto out;
62328
62329@@ -2591,7 +2715,7 @@ static const struct pid_entry tgid_base_stuff[] = {
62330 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
62331 #endif
62332 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
62333-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62334+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62335 INF("syscall", S_IRUGO, proc_pid_syscall),
62336 #endif
62337 INF("cmdline", S_IRUGO, proc_pid_cmdline),
62338@@ -2616,10 +2740,10 @@ static const struct pid_entry tgid_base_stuff[] = {
62339 #ifdef CONFIG_SECURITY
62340 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
62341 #endif
62342-#ifdef CONFIG_KALLSYMS
62343+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62344 INF("wchan", S_IRUGO, proc_pid_wchan),
62345 #endif
62346-#ifdef CONFIG_STACKTRACE
62347+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62348 ONE("stack", S_IRUGO, proc_pid_stack),
62349 #endif
62350 #ifdef CONFIG_SCHEDSTATS
62351@@ -2653,6 +2777,9 @@ static const struct pid_entry tgid_base_stuff[] = {
62352 #ifdef CONFIG_HARDWALL
62353 INF("hardwall", S_IRUGO, proc_pid_hardwall),
62354 #endif
62355+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62356+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
62357+#endif
62358 #ifdef CONFIG_USER_NS
62359 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
62360 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
62361@@ -2783,7 +2910,14 @@ static int proc_pid_instantiate(struct inode *dir,
62362 if (!inode)
62363 goto out;
62364
62365+#ifdef CONFIG_GRKERNSEC_PROC_USER
62366+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
62367+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62368+ inode->i_gid = grsec_proc_gid;
62369+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
62370+#else
62371 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
62372+#endif
62373 inode->i_op = &proc_tgid_base_inode_operations;
62374 inode->i_fop = &proc_tgid_base_operations;
62375 inode->i_flags|=S_IMMUTABLE;
62376@@ -2821,7 +2955,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
62377 if (!task)
62378 goto out;
62379
62380+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62381+ goto out_put_task;
62382+
62383 result = proc_pid_instantiate(dir, dentry, task, NULL);
62384+out_put_task:
62385 put_task_struct(task);
62386 out:
62387 return ERR_PTR(result);
62388@@ -2927,7 +3065,7 @@ static const struct pid_entry tid_base_stuff[] = {
62389 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
62390 #endif
62391 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
62392-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62393+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62394 INF("syscall", S_IRUGO, proc_pid_syscall),
62395 #endif
62396 INF("cmdline", S_IRUGO, proc_pid_cmdline),
62397@@ -2954,10 +3092,10 @@ static const struct pid_entry tid_base_stuff[] = {
62398 #ifdef CONFIG_SECURITY
62399 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
62400 #endif
62401-#ifdef CONFIG_KALLSYMS
62402+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62403 INF("wchan", S_IRUGO, proc_pid_wchan),
62404 #endif
62405-#ifdef CONFIG_STACKTRACE
62406+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62407 ONE("stack", S_IRUGO, proc_pid_stack),
62408 #endif
62409 #ifdef CONFIG_SCHEDSTATS
62410diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
62411index 82676e3..5f8518a 100644
62412--- a/fs/proc/cmdline.c
62413+++ b/fs/proc/cmdline.c
62414@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
62415
62416 static int __init proc_cmdline_init(void)
62417 {
62418+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62419+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
62420+#else
62421 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
62422+#endif
62423 return 0;
62424 }
62425 module_init(proc_cmdline_init);
62426diff --git a/fs/proc/devices.c b/fs/proc/devices.c
62427index b143471..bb105e5 100644
62428--- a/fs/proc/devices.c
62429+++ b/fs/proc/devices.c
62430@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
62431
62432 static int __init proc_devices_init(void)
62433 {
62434+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62435+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
62436+#else
62437 proc_create("devices", 0, NULL, &proc_devinfo_operations);
62438+#endif
62439 return 0;
62440 }
62441 module_init(proc_devices_init);
62442diff --git a/fs/proc/fd.c b/fs/proc/fd.c
62443index 985ea88..d118a0a 100644
62444--- a/fs/proc/fd.c
62445+++ b/fs/proc/fd.c
62446@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
62447 if (!task)
62448 return -ENOENT;
62449
62450- files = get_files_struct(task);
62451+ if (!gr_acl_handle_procpidmem(task))
62452+ files = get_files_struct(task);
62453 put_task_struct(task);
62454
62455 if (files) {
62456@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
62457 */
62458 int proc_fd_permission(struct inode *inode, int mask)
62459 {
62460+ struct task_struct *task;
62461 int rv = generic_permission(inode, mask);
62462- if (rv == 0)
62463- return 0;
62464+
62465 if (task_tgid(current) == proc_pid(inode))
62466 rv = 0;
62467+
62468+ task = get_proc_task(inode);
62469+ if (task == NULL)
62470+ return rv;
62471+
62472+ if (gr_acl_handle_procpidmem(task))
62473+ rv = -EACCES;
62474+
62475+ put_task_struct(task);
62476+
62477 return rv;
62478 }
62479
62480diff --git a/fs/proc/inode.c b/fs/proc/inode.c
62481index 124fc43..8afbb02 100644
62482--- a/fs/proc/inode.c
62483+++ b/fs/proc/inode.c
62484@@ -23,11 +23,17 @@
62485 #include <linux/slab.h>
62486 #include <linux/mount.h>
62487 #include <linux/magic.h>
62488+#include <linux/grsecurity.h>
62489
62490 #include <asm/uaccess.h>
62491
62492 #include "internal.h"
62493
62494+#ifdef CONFIG_PROC_SYSCTL
62495+extern const struct inode_operations proc_sys_inode_operations;
62496+extern const struct inode_operations proc_sys_dir_operations;
62497+#endif
62498+
62499 static void proc_evict_inode(struct inode *inode)
62500 {
62501 struct proc_dir_entry *de;
62502@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
62503 ns = PROC_I(inode)->ns.ns;
62504 if (ns_ops && ns)
62505 ns_ops->put(ns);
62506+
62507+#ifdef CONFIG_PROC_SYSCTL
62508+ if (inode->i_op == &proc_sys_inode_operations ||
62509+ inode->i_op == &proc_sys_dir_operations)
62510+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
62511+#endif
62512+
62513 }
62514
62515 static struct kmem_cache * proc_inode_cachep;
62516@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
62517 if (de->mode) {
62518 inode->i_mode = de->mode;
62519 inode->i_uid = de->uid;
62520+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62521+ inode->i_gid = grsec_proc_gid;
62522+#else
62523 inode->i_gid = de->gid;
62524+#endif
62525 }
62526 if (de->size)
62527 inode->i_size = de->size;
62528diff --git a/fs/proc/internal.h b/fs/proc/internal.h
62529index 651d09a..3d7f0bf 100644
62530--- a/fs/proc/internal.h
62531+++ b/fs/proc/internal.h
62532@@ -48,7 +48,7 @@ struct proc_dir_entry {
62533 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
62534 u8 namelen;
62535 char name[];
62536-};
62537+} __randomize_layout;
62538
62539 union proc_op {
62540 int (*proc_get_link)(struct dentry *, struct path *);
62541@@ -67,7 +67,7 @@ struct proc_inode {
62542 struct ctl_table *sysctl_entry;
62543 struct proc_ns ns;
62544 struct inode vfs_inode;
62545-};
62546+} __randomize_layout;
62547
62548 /*
62549 * General functions
62550@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
62551 struct pid *, struct task_struct *);
62552 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
62553 struct pid *, struct task_struct *);
62554+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62555+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
62556+#endif
62557
62558 /*
62559 * base.c
62560diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
62561index 05029c0..7ea1987 100644
62562--- a/fs/proc/interrupts.c
62563+++ b/fs/proc/interrupts.c
62564@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
62565
62566 static int __init proc_interrupts_init(void)
62567 {
62568+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62569+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
62570+#else
62571 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
62572+#endif
62573 return 0;
62574 }
62575 module_init(proc_interrupts_init);
62576diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
62577index 5ed0e52..a1c1f2e 100644
62578--- a/fs/proc/kcore.c
62579+++ b/fs/proc/kcore.c
62580@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
62581 * the addresses in the elf_phdr on our list.
62582 */
62583 start = kc_offset_to_vaddr(*fpos - elf_buflen);
62584- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
62585+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
62586+ if (tsz > buflen)
62587 tsz = buflen;
62588-
62589+
62590 while (buflen) {
62591 struct kcore_list *m;
62592
62593@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
62594 kfree(elf_buf);
62595 } else {
62596 if (kern_addr_valid(start)) {
62597- unsigned long n;
62598+ char *elf_buf;
62599+ mm_segment_t oldfs;
62600
62601- n = copy_to_user(buffer, (char *)start, tsz);
62602- /*
62603- * We cannot distinguish between fault on source
62604- * and fault on destination. When this happens
62605- * we clear too and hope it will trigger the
62606- * EFAULT again.
62607- */
62608- if (n) {
62609- if (clear_user(buffer + tsz - n,
62610- n))
62611+ elf_buf = kmalloc(tsz, GFP_KERNEL);
62612+ if (!elf_buf)
62613+ return -ENOMEM;
62614+ oldfs = get_fs();
62615+ set_fs(KERNEL_DS);
62616+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
62617+ set_fs(oldfs);
62618+ if (copy_to_user(buffer, elf_buf, tsz)) {
62619+ kfree(elf_buf);
62620 return -EFAULT;
62621+ }
62622 }
62623+ set_fs(oldfs);
62624+ kfree(elf_buf);
62625 } else {
62626 if (clear_user(buffer, tsz))
62627 return -EFAULT;
62628@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
62629
62630 static int open_kcore(struct inode *inode, struct file *filp)
62631 {
62632+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62633+ return -EPERM;
62634+#endif
62635 if (!capable(CAP_SYS_RAWIO))
62636 return -EPERM;
62637 if (kcore_need_update)
62638diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
62639index a77d2b2..a9153f0 100644
62640--- a/fs/proc/meminfo.c
62641+++ b/fs/proc/meminfo.c
62642@@ -150,7 +150,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
62643 vmi.used >> 10,
62644 vmi.largest_chunk >> 10
62645 #ifdef CONFIG_MEMORY_FAILURE
62646- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
62647+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
62648 #endif
62649 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
62650 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
62651diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
62652index 5f9bc8a..5c35f08 100644
62653--- a/fs/proc/nommu.c
62654+++ b/fs/proc/nommu.c
62655@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
62656
62657 if (file) {
62658 seq_pad(m, ' ');
62659- seq_path(m, &file->f_path, "");
62660+ seq_path(m, &file->f_path, "\n\\");
62661 }
62662
62663 seq_putc(m, '\n');
62664diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
62665index 4677bb7..408e936 100644
62666--- a/fs/proc/proc_net.c
62667+++ b/fs/proc/proc_net.c
62668@@ -23,6 +23,7 @@
62669 #include <linux/nsproxy.h>
62670 #include <net/net_namespace.h>
62671 #include <linux/seq_file.h>
62672+#include <linux/grsecurity.h>
62673
62674 #include "internal.h"
62675
62676@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
62677 struct task_struct *task;
62678 struct nsproxy *ns;
62679 struct net *net = NULL;
62680+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62681+ const struct cred *cred = current_cred();
62682+#endif
62683+
62684+#ifdef CONFIG_GRKERNSEC_PROC_USER
62685+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
62686+ return net;
62687+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62688+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
62689+ return net;
62690+#endif
62691
62692 rcu_read_lock();
62693 task = pid_task(proc_pid(dir), PIDTYPE_PID);
62694diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
62695index 7129046..6914844 100644
62696--- a/fs/proc/proc_sysctl.c
62697+++ b/fs/proc/proc_sysctl.c
62698@@ -11,13 +11,21 @@
62699 #include <linux/namei.h>
62700 #include <linux/mm.h>
62701 #include <linux/module.h>
62702+#include <linux/nsproxy.h>
62703+#ifdef CONFIG_GRKERNSEC
62704+#include <net/net_namespace.h>
62705+#endif
62706 #include "internal.h"
62707
62708+extern int gr_handle_chroot_sysctl(const int op);
62709+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62710+ const int op);
62711+
62712 static const struct dentry_operations proc_sys_dentry_operations;
62713 static const struct file_operations proc_sys_file_operations;
62714-static const struct inode_operations proc_sys_inode_operations;
62715+const struct inode_operations proc_sys_inode_operations;
62716 static const struct file_operations proc_sys_dir_file_operations;
62717-static const struct inode_operations proc_sys_dir_operations;
62718+const struct inode_operations proc_sys_dir_operations;
62719
62720 void proc_sys_poll_notify(struct ctl_table_poll *poll)
62721 {
62722@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
62723
62724 err = NULL;
62725 d_set_d_op(dentry, &proc_sys_dentry_operations);
62726+
62727+ gr_handle_proc_create(dentry, inode);
62728+
62729 d_add(dentry, inode);
62730
62731 out:
62732@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
62733 struct inode *inode = file_inode(filp);
62734 struct ctl_table_header *head = grab_header(inode);
62735 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
62736+ int op = write ? MAY_WRITE : MAY_READ;
62737 ssize_t error;
62738 size_t res;
62739
62740@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
62741 * and won't be until we finish.
62742 */
62743 error = -EPERM;
62744- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
62745+ if (sysctl_perm(head, table, op))
62746 goto out;
62747
62748 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
62749@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
62750 if (!table->proc_handler)
62751 goto out;
62752
62753+#ifdef CONFIG_GRKERNSEC
62754+ error = -EPERM;
62755+ if (gr_handle_chroot_sysctl(op))
62756+ goto out;
62757+ dget(filp->f_path.dentry);
62758+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
62759+ dput(filp->f_path.dentry);
62760+ goto out;
62761+ }
62762+ dput(filp->f_path.dentry);
62763+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
62764+ goto out;
62765+ if (write) {
62766+ if (current->nsproxy->net_ns != table->extra2) {
62767+ if (!capable(CAP_SYS_ADMIN))
62768+ goto out;
62769+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
62770+ goto out;
62771+ }
62772+#endif
62773+
62774 /* careful: calling conventions are nasty here */
62775 res = count;
62776 error = table->proc_handler(table, write, buf, &res, ppos);
62777@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
62778 return false;
62779 } else {
62780 d_set_d_op(child, &proc_sys_dentry_operations);
62781+
62782+ gr_handle_proc_create(child, inode);
62783+
62784 d_add(child, inode);
62785 }
62786 } else {
62787@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
62788 if ((*pos)++ < ctx->pos)
62789 return true;
62790
62791+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
62792+ return 0;
62793+
62794 if (unlikely(S_ISLNK(table->mode)))
62795 res = proc_sys_link_fill_cache(file, ctx, head, table);
62796 else
62797@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
62798 if (IS_ERR(head))
62799 return PTR_ERR(head);
62800
62801+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
62802+ return -ENOENT;
62803+
62804 generic_fillattr(inode, stat);
62805 if (table)
62806 stat->mode = (stat->mode & S_IFMT) | table->mode;
62807@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
62808 .llseek = generic_file_llseek,
62809 };
62810
62811-static const struct inode_operations proc_sys_inode_operations = {
62812+const struct inode_operations proc_sys_inode_operations = {
62813 .permission = proc_sys_permission,
62814 .setattr = proc_sys_setattr,
62815 .getattr = proc_sys_getattr,
62816 };
62817
62818-static const struct inode_operations proc_sys_dir_operations = {
62819+const struct inode_operations proc_sys_dir_operations = {
62820 .lookup = proc_sys_lookup,
62821 .permission = proc_sys_permission,
62822 .setattr = proc_sys_setattr,
62823@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
62824 static struct ctl_dir *new_dir(struct ctl_table_set *set,
62825 const char *name, int namelen)
62826 {
62827- struct ctl_table *table;
62828+ ctl_table_no_const *table;
62829 struct ctl_dir *new;
62830 struct ctl_node *node;
62831 char *new_name;
62832@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
62833 return NULL;
62834
62835 node = (struct ctl_node *)(new + 1);
62836- table = (struct ctl_table *)(node + 1);
62837+ table = (ctl_table_no_const *)(node + 1);
62838 new_name = (char *)(table + 2);
62839 memcpy(new_name, name, namelen);
62840 new_name[namelen] = '\0';
62841@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
62842 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
62843 struct ctl_table_root *link_root)
62844 {
62845- struct ctl_table *link_table, *entry, *link;
62846+ ctl_table_no_const *link_table, *link;
62847+ struct ctl_table *entry;
62848 struct ctl_table_header *links;
62849 struct ctl_node *node;
62850 char *link_name;
62851@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
62852 return NULL;
62853
62854 node = (struct ctl_node *)(links + 1);
62855- link_table = (struct ctl_table *)(node + nr_entries);
62856+ link_table = (ctl_table_no_const *)(node + nr_entries);
62857 link_name = (char *)&link_table[nr_entries + 1];
62858
62859 for (link = link_table, entry = table; entry->procname; link++, entry++) {
62860@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
62861 struct ctl_table_header ***subheader, struct ctl_table_set *set,
62862 struct ctl_table *table)
62863 {
62864- struct ctl_table *ctl_table_arg = NULL;
62865- struct ctl_table *entry, *files;
62866+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
62867+ struct ctl_table *entry;
62868 int nr_files = 0;
62869 int nr_dirs = 0;
62870 int err = -ENOMEM;
62871@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
62872 nr_files++;
62873 }
62874
62875- files = table;
62876 /* If there are mixed files and directories we need a new table */
62877 if (nr_dirs && nr_files) {
62878- struct ctl_table *new;
62879+ ctl_table_no_const *new;
62880 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
62881 GFP_KERNEL);
62882 if (!files)
62883@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
62884 /* Register everything except a directory full of subdirectories */
62885 if (nr_files || !nr_dirs) {
62886 struct ctl_table_header *header;
62887- header = __register_sysctl_table(set, path, files);
62888+ header = __register_sysctl_table(set, path, files ? files : table);
62889 if (!header) {
62890 kfree(ctl_table_arg);
62891 goto out;
62892diff --git a/fs/proc/root.c b/fs/proc/root.c
62893index 87dbcbe..55e1b4d 100644
62894--- a/fs/proc/root.c
62895+++ b/fs/proc/root.c
62896@@ -186,7 +186,15 @@ void __init proc_root_init(void)
62897 #ifdef CONFIG_PROC_DEVICETREE
62898 proc_device_tree_init();
62899 #endif
62900+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62901+#ifdef CONFIG_GRKERNSEC_PROC_USER
62902+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
62903+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62904+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
62905+#endif
62906+#else
62907 proc_mkdir("bus", NULL);
62908+#endif
62909 proc_sys_init();
62910 }
62911
62912diff --git a/fs/proc/stat.c b/fs/proc/stat.c
62913index 1cf86c0..0ee1ca5 100644
62914--- a/fs/proc/stat.c
62915+++ b/fs/proc/stat.c
62916@@ -11,6 +11,7 @@
62917 #include <linux/irqnr.h>
62918 #include <asm/cputime.h>
62919 #include <linux/tick.h>
62920+#include <linux/grsecurity.h>
62921
62922 #ifndef arch_irq_stat_cpu
62923 #define arch_irq_stat_cpu(cpu) 0
62924@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
62925 u64 sum_softirq = 0;
62926 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
62927 struct timespec boottime;
62928+ int unrestricted = 1;
62929+
62930+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62931+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62932+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
62933+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62934+ && !in_group_p(grsec_proc_gid)
62935+#endif
62936+ )
62937+ unrestricted = 0;
62938+#endif
62939+#endif
62940
62941 user = nice = system = idle = iowait =
62942 irq = softirq = steal = 0;
62943@@ -94,6 +107,7 @@ static int show_stat(struct seq_file *p, void *v)
62944 getboottime(&boottime);
62945 jif = boottime.tv_sec;
62946
62947+ if (unrestricted) {
62948 for_each_possible_cpu(i) {
62949 user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
62950 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
62951@@ -116,6 +130,7 @@ static int show_stat(struct seq_file *p, void *v)
62952 }
62953 }
62954 sum += arch_irq_stat();
62955+ }
62956
62957 seq_puts(p, "cpu ");
62958 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
62959@@ -131,6 +146,7 @@ static int show_stat(struct seq_file *p, void *v)
62960 seq_putc(p, '\n');
62961
62962 for_each_online_cpu(i) {
62963+ if (unrestricted) {
62964 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
62965 user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
62966 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
62967@@ -142,6 +158,7 @@ static int show_stat(struct seq_file *p, void *v)
62968 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
62969 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
62970 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
62971+ }
62972 seq_printf(p, "cpu%d", i);
62973 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
62974 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
62975@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
62976
62977 /* sum again ? it could be updated? */
62978 for_each_irq_nr(j)
62979- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
62980+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
62981
62982 seq_printf(p,
62983 "\nctxt %llu\n"
62984@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
62985 "processes %lu\n"
62986 "procs_running %lu\n"
62987 "procs_blocked %lu\n",
62988- nr_context_switches(),
62989+ unrestricted ? nr_context_switches() : 0ULL,
62990 (unsigned long)jif,
62991- total_forks,
62992- nr_running(),
62993- nr_iowait());
62994+ unrestricted ? total_forks : 0UL,
62995+ unrestricted ? nr_running() : 0UL,
62996+ unrestricted ? nr_iowait() : 0UL);
62997
62998 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
62999
63000diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
63001index fb52b54..5fc7c14 100644
63002--- a/fs/proc/task_mmu.c
63003+++ b/fs/proc/task_mmu.c
63004@@ -12,12 +12,19 @@
63005 #include <linux/swap.h>
63006 #include <linux/swapops.h>
63007 #include <linux/mmu_notifier.h>
63008+#include <linux/grsecurity.h>
63009
63010 #include <asm/elf.h>
63011 #include <asm/uaccess.h>
63012 #include <asm/tlbflush.h>
63013 #include "internal.h"
63014
63015+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63016+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
63017+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
63018+ _mm->pax_flags & MF_PAX_SEGMEXEC))
63019+#endif
63020+
63021 void task_mem(struct seq_file *m, struct mm_struct *mm)
63022 {
63023 unsigned long data, text, lib, swap;
63024@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63025 "VmExe:\t%8lu kB\n"
63026 "VmLib:\t%8lu kB\n"
63027 "VmPTE:\t%8lu kB\n"
63028- "VmSwap:\t%8lu kB\n",
63029- hiwater_vm << (PAGE_SHIFT-10),
63030+ "VmSwap:\t%8lu kB\n"
63031+
63032+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63033+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
63034+#endif
63035+
63036+ ,hiwater_vm << (PAGE_SHIFT-10),
63037 total_vm << (PAGE_SHIFT-10),
63038 mm->locked_vm << (PAGE_SHIFT-10),
63039 mm->pinned_vm << (PAGE_SHIFT-10),
63040@@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63041 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
63042 (PTRS_PER_PTE * sizeof(pte_t) *
63043 atomic_long_read(&mm->nr_ptes)) >> 10,
63044- swap << (PAGE_SHIFT-10));
63045+ swap << (PAGE_SHIFT-10)
63046+
63047+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63048+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63049+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
63050+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
63051+#else
63052+ , mm->context.user_cs_base
63053+ , mm->context.user_cs_limit
63054+#endif
63055+#endif
63056+
63057+ );
63058 }
63059
63060 unsigned long task_vsize(struct mm_struct *mm)
63061@@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63062 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
63063 }
63064
63065- /* We don't show the stack guard page in /proc/maps */
63066+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63067+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
63068+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
63069+#else
63070 start = vma->vm_start;
63071- if (stack_guard_page_start(vma, start))
63072- start += PAGE_SIZE;
63073 end = vma->vm_end;
63074- if (stack_guard_page_end(vma, end))
63075- end -= PAGE_SIZE;
63076+#endif
63077
63078 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
63079 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
63080@@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63081 flags & VM_WRITE ? 'w' : '-',
63082 flags & VM_EXEC ? 'x' : '-',
63083 flags & VM_MAYSHARE ? 's' : 'p',
63084+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63085+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
63086+#else
63087 pgoff,
63088+#endif
63089 MAJOR(dev), MINOR(dev), ino);
63090
63091 /*
63092@@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63093 */
63094 if (file) {
63095 seq_pad(m, ' ');
63096- seq_path(m, &file->f_path, "\n");
63097+ seq_path(m, &file->f_path, "\n\\");
63098 goto done;
63099 }
63100
63101@@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63102 * Thread stack in /proc/PID/task/TID/maps or
63103 * the main process stack.
63104 */
63105- if (!is_pid || (vma->vm_start <= mm->start_stack &&
63106- vma->vm_end >= mm->start_stack)) {
63107+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
63108+ (vma->vm_start <= mm->start_stack &&
63109+ vma->vm_end >= mm->start_stack)) {
63110 name = "[stack]";
63111 } else {
63112 /* Thread stack in /proc/PID/maps */
63113@@ -346,6 +375,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
63114 struct proc_maps_private *priv = m->private;
63115 struct task_struct *task = priv->task;
63116
63117+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63118+ if (current->exec_id != m->exec_id) {
63119+ gr_log_badprocpid("maps");
63120+ return 0;
63121+ }
63122+#endif
63123+
63124 show_map_vma(m, vma, is_pid);
63125
63126 if (m->count < m->size) /* vma is copied successfully */
63127@@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
63128 .private = &mss,
63129 };
63130
63131+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63132+ if (current->exec_id != m->exec_id) {
63133+ gr_log_badprocpid("smaps");
63134+ return 0;
63135+ }
63136+#endif
63137 memset(&mss, 0, sizeof mss);
63138- mss.vma = vma;
63139- /* mmap_sem is held in m_start */
63140- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
63141- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
63142-
63143+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63144+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
63145+#endif
63146+ mss.vma = vma;
63147+ /* mmap_sem is held in m_start */
63148+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
63149+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
63150+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63151+ }
63152+#endif
63153 show_map_vma(m, vma, is_pid);
63154
63155 seq_printf(m,
63156@@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
63157 "KernelPageSize: %8lu kB\n"
63158 "MMUPageSize: %8lu kB\n"
63159 "Locked: %8lu kB\n",
63160+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63161+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
63162+#else
63163 (vma->vm_end - vma->vm_start) >> 10,
63164+#endif
63165 mss.resident >> 10,
63166 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
63167 mss.shared_clean >> 10,
63168@@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
63169 char buffer[64];
63170 int nid;
63171
63172+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63173+ if (current->exec_id != m->exec_id) {
63174+ gr_log_badprocpid("numa_maps");
63175+ return 0;
63176+ }
63177+#endif
63178+
63179 if (!mm)
63180 return 0;
63181
63182@@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
63183 mpol_to_str(buffer, sizeof(buffer), pol);
63184 mpol_cond_put(pol);
63185
63186+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63187+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
63188+#else
63189 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
63190+#endif
63191
63192 if (file) {
63193 seq_printf(m, " file=");
63194- seq_path(m, &file->f_path, "\n\t= ");
63195+ seq_path(m, &file->f_path, "\n\t\\= ");
63196 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
63197 seq_printf(m, " heap");
63198 } else {
63199diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
63200index 678455d..ebd3245 100644
63201--- a/fs/proc/task_nommu.c
63202+++ b/fs/proc/task_nommu.c
63203@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63204 else
63205 bytes += kobjsize(mm);
63206
63207- if (current->fs && current->fs->users > 1)
63208+ if (current->fs && atomic_read(&current->fs->users) > 1)
63209 sbytes += kobjsize(current->fs);
63210 else
63211 bytes += kobjsize(current->fs);
63212@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
63213
63214 if (file) {
63215 seq_pad(m, ' ');
63216- seq_path(m, &file->f_path, "");
63217+ seq_path(m, &file->f_path, "\n\\");
63218 } else if (mm) {
63219 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
63220
63221diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
63222index 9100d69..51cd925 100644
63223--- a/fs/proc/vmcore.c
63224+++ b/fs/proc/vmcore.c
63225@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
63226 nr_bytes = count;
63227
63228 /* If pfn is not ram, return zeros for sparse dump files */
63229- if (pfn_is_ram(pfn) == 0)
63230- memset(buf, 0, nr_bytes);
63231- else {
63232+ if (pfn_is_ram(pfn) == 0) {
63233+ if (userbuf) {
63234+ if (clear_user((char __force_user *)buf, nr_bytes))
63235+ return -EFAULT;
63236+ } else
63237+ memset(buf, 0, nr_bytes);
63238+ } else {
63239 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
63240 offset, userbuf);
63241 if (tmp < 0)
63242@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
63243 static int copy_to(void *target, void *src, size_t size, int userbuf)
63244 {
63245 if (userbuf) {
63246- if (copy_to_user((char __user *) target, src, size))
63247+ if (copy_to_user((char __force_user *) target, src, size))
63248 return -EFAULT;
63249 } else {
63250 memcpy(target, src, size);
63251@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
63252 if (*fpos < m->offset + m->size) {
63253 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
63254 start = m->paddr + *fpos - m->offset;
63255- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
63256+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
63257 if (tmp < 0)
63258 return tmp;
63259 buflen -= tsz;
63260@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
63261 static ssize_t read_vmcore(struct file *file, char __user *buffer,
63262 size_t buflen, loff_t *fpos)
63263 {
63264- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
63265+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
63266 }
63267
63268 /*
63269diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
63270index b00fcc9..e0c6381 100644
63271--- a/fs/qnx6/qnx6.h
63272+++ b/fs/qnx6/qnx6.h
63273@@ -74,7 +74,7 @@ enum {
63274 BYTESEX_BE,
63275 };
63276
63277-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
63278+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
63279 {
63280 if (sbi->s_bytesex == BYTESEX_LE)
63281 return le64_to_cpu((__force __le64)n);
63282@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
63283 return (__force __fs64)cpu_to_be64(n);
63284 }
63285
63286-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
63287+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
63288 {
63289 if (sbi->s_bytesex == BYTESEX_LE)
63290 return le32_to_cpu((__force __le32)n);
63291diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
63292index 72d2917..c917c12 100644
63293--- a/fs/quota/netlink.c
63294+++ b/fs/quota/netlink.c
63295@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
63296 void quota_send_warning(struct kqid qid, dev_t dev,
63297 const char warntype)
63298 {
63299- static atomic_t seq;
63300+ static atomic_unchecked_t seq;
63301 struct sk_buff *skb;
63302 void *msg_head;
63303 int ret;
63304@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
63305 "VFS: Not enough memory to send quota warning.\n");
63306 return;
63307 }
63308- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
63309+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
63310 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
63311 if (!msg_head) {
63312 printk(KERN_ERR
63313diff --git a/fs/read_write.c b/fs/read_write.c
63314index 58e440d..8ec2838 100644
63315--- a/fs/read_write.c
63316+++ b/fs/read_write.c
63317@@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
63318
63319 old_fs = get_fs();
63320 set_fs(get_ds());
63321- p = (__force const char __user *)buf;
63322+ p = (const char __force_user *)buf;
63323 if (count > MAX_RW_COUNT)
63324 count = MAX_RW_COUNT;
63325 if (file->f_op->write)
63326diff --git a/fs/readdir.c b/fs/readdir.c
63327index 5b53d99..a6c3049 100644
63328--- a/fs/readdir.c
63329+++ b/fs/readdir.c
63330@@ -17,6 +17,7 @@
63331 #include <linux/security.h>
63332 #include <linux/syscalls.h>
63333 #include <linux/unistd.h>
63334+#include <linux/namei.h>
63335
63336 #include <asm/uaccess.h>
63337
63338@@ -69,6 +70,7 @@ struct old_linux_dirent {
63339 struct readdir_callback {
63340 struct dir_context ctx;
63341 struct old_linux_dirent __user * dirent;
63342+ struct file * file;
63343 int result;
63344 };
63345
63346@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
63347 buf->result = -EOVERFLOW;
63348 return -EOVERFLOW;
63349 }
63350+
63351+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
63352+ return 0;
63353+
63354 buf->result++;
63355 dirent = buf->dirent;
63356 if (!access_ok(VERIFY_WRITE, dirent,
63357@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
63358 if (!f.file)
63359 return -EBADF;
63360
63361+ buf.file = f.file;
63362 error = iterate_dir(f.file, &buf.ctx);
63363 if (buf.result)
63364 error = buf.result;
63365@@ -142,6 +149,7 @@ struct getdents_callback {
63366 struct dir_context ctx;
63367 struct linux_dirent __user * current_dir;
63368 struct linux_dirent __user * previous;
63369+ struct file * file;
63370 int count;
63371 int error;
63372 };
63373@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
63374 buf->error = -EOVERFLOW;
63375 return -EOVERFLOW;
63376 }
63377+
63378+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
63379+ return 0;
63380+
63381 dirent = buf->previous;
63382 if (dirent) {
63383 if (__put_user(offset, &dirent->d_off))
63384@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
63385 if (!f.file)
63386 return -EBADF;
63387
63388+ buf.file = f.file;
63389 error = iterate_dir(f.file, &buf.ctx);
63390 if (error >= 0)
63391 error = buf.error;
63392@@ -226,6 +239,7 @@ struct getdents_callback64 {
63393 struct dir_context ctx;
63394 struct linux_dirent64 __user * current_dir;
63395 struct linux_dirent64 __user * previous;
63396+ struct file *file;
63397 int count;
63398 int error;
63399 };
63400@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
63401 buf->error = -EINVAL; /* only used if we fail.. */
63402 if (reclen > buf->count)
63403 return -EINVAL;
63404+
63405+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
63406+ return 0;
63407+
63408 dirent = buf->previous;
63409 if (dirent) {
63410 if (__put_user(offset, &dirent->d_off))
63411@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
63412 if (!f.file)
63413 return -EBADF;
63414
63415+ buf.file = f.file;
63416 error = iterate_dir(f.file, &buf.ctx);
63417 if (error >= 0)
63418 error = buf.error;
63419diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
63420index 2b7882b..1c5ef48 100644
63421--- a/fs/reiserfs/do_balan.c
63422+++ b/fs/reiserfs/do_balan.c
63423@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
63424 return;
63425 }
63426
63427- atomic_inc(&(fs_generation(tb->tb_sb)));
63428+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
63429 do_balance_starts(tb);
63430
63431 /* balance leaf returns 0 except if combining L R and S into
63432diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
63433index ee382ef..f4eb6eb5 100644
63434--- a/fs/reiserfs/item_ops.c
63435+++ b/fs/reiserfs/item_ops.c
63436@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
63437 }
63438
63439 static struct item_operations errcatch_ops = {
63440- errcatch_bytes_number,
63441- errcatch_decrement_key,
63442- errcatch_is_left_mergeable,
63443- errcatch_print_item,
63444- errcatch_check_item,
63445+ .bytes_number = errcatch_bytes_number,
63446+ .decrement_key = errcatch_decrement_key,
63447+ .is_left_mergeable = errcatch_is_left_mergeable,
63448+ .print_item = errcatch_print_item,
63449+ .check_item = errcatch_check_item,
63450
63451- errcatch_create_vi,
63452- errcatch_check_left,
63453- errcatch_check_right,
63454- errcatch_part_size,
63455- errcatch_unit_num,
63456- errcatch_print_vi
63457+ .create_vi = errcatch_create_vi,
63458+ .check_left = errcatch_check_left,
63459+ .check_right = errcatch_check_right,
63460+ .part_size = errcatch_part_size,
63461+ .unit_num = errcatch_unit_num,
63462+ .print_vi = errcatch_print_vi
63463 };
63464
63465 //////////////////////////////////////////////////////////////////////////////
63466diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
63467index a958444..42b2323 100644
63468--- a/fs/reiserfs/procfs.c
63469+++ b/fs/reiserfs/procfs.c
63470@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
63471 "SMALL_TAILS " : "NO_TAILS ",
63472 replay_only(sb) ? "REPLAY_ONLY " : "",
63473 convert_reiserfs(sb) ? "CONV " : "",
63474- atomic_read(&r->s_generation_counter),
63475+ atomic_read_unchecked(&r->s_generation_counter),
63476 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
63477 SF(s_do_balance), SF(s_unneeded_left_neighbor),
63478 SF(s_good_search_by_key_reada), SF(s_bmaps),
63479diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
63480index f8adaee..0eeeeca 100644
63481--- a/fs/reiserfs/reiserfs.h
63482+++ b/fs/reiserfs/reiserfs.h
63483@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
63484 /* Comment? -Hans */
63485 wait_queue_head_t s_wait;
63486 /* To be obsoleted soon by per buffer seals.. -Hans */
63487- atomic_t s_generation_counter; // increased by one every time the
63488+ atomic_unchecked_t s_generation_counter; // increased by one every time the
63489 // tree gets re-balanced
63490 unsigned long s_properties; /* File system properties. Currently holds
63491 on-disk FS format */
63492@@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
63493 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63494
63495 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63496-#define get_generation(s) atomic_read (&fs_generation(s))
63497+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63498 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63499 #define __fs_changed(gen,s) (gen != get_generation (s))
63500 #define fs_changed(gen,s) \
63501diff --git a/fs/select.c b/fs/select.c
63502index 467bb1c..cf9d65a 100644
63503--- a/fs/select.c
63504+++ b/fs/select.c
63505@@ -20,6 +20,7 @@
63506 #include <linux/export.h>
63507 #include <linux/slab.h>
63508 #include <linux/poll.h>
63509+#include <linux/security.h>
63510 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
63511 #include <linux/file.h>
63512 #include <linux/fdtable.h>
63513@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
63514 struct poll_list *walk = head;
63515 unsigned long todo = nfds;
63516
63517+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
63518 if (nfds > rlimit(RLIMIT_NOFILE))
63519 return -EINVAL;
63520
63521diff --git a/fs/seq_file.c b/fs/seq_file.c
63522index 1d641bb..e600623 100644
63523--- a/fs/seq_file.c
63524+++ b/fs/seq_file.c
63525@@ -10,6 +10,7 @@
63526 #include <linux/seq_file.h>
63527 #include <linux/slab.h>
63528 #include <linux/cred.h>
63529+#include <linux/sched.h>
63530
63531 #include <asm/uaccess.h>
63532 #include <asm/page.h>
63533@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
63534 #ifdef CONFIG_USER_NS
63535 p->user_ns = file->f_cred->user_ns;
63536 #endif
63537+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63538+ p->exec_id = current->exec_id;
63539+#endif
63540
63541 /*
63542 * Wrappers around seq_open(e.g. swaps_open) need to be
63543@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
63544 return 0;
63545 }
63546 if (!m->buf) {
63547- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
63548+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
63549 if (!m->buf)
63550 return -ENOMEM;
63551 }
63552@@ -137,7 +141,7 @@ Eoverflow:
63553 m->op->stop(m, p);
63554 kfree(m->buf);
63555 m->count = 0;
63556- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
63557+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
63558 return !m->buf ? -ENOMEM : -EAGAIN;
63559 }
63560
63561@@ -153,7 +157,7 @@ Eoverflow:
63562 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
63563 {
63564 struct seq_file *m = file->private_data;
63565- size_t copied = 0;
63566+ ssize_t copied = 0;
63567 loff_t pos;
63568 size_t n;
63569 void *p;
63570@@ -192,7 +196,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
63571
63572 /* grab buffer if we didn't have one */
63573 if (!m->buf) {
63574- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
63575+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
63576 if (!m->buf)
63577 goto Enomem;
63578 }
63579@@ -234,7 +238,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
63580 m->op->stop(m, p);
63581 kfree(m->buf);
63582 m->count = 0;
63583- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
63584+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
63585 if (!m->buf)
63586 goto Enomem;
63587 m->version = 0;
63588@@ -584,7 +588,7 @@ static void single_stop(struct seq_file *p, void *v)
63589 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
63590 void *data)
63591 {
63592- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
63593+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
63594 int res = -ENOMEM;
63595
63596 if (op) {
63597diff --git a/fs/splice.c b/fs/splice.c
63598index 46a08f7..bb163cc 100644
63599--- a/fs/splice.c
63600+++ b/fs/splice.c
63601@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
63602 pipe_lock(pipe);
63603
63604 for (;;) {
63605- if (!pipe->readers) {
63606+ if (!atomic_read(&pipe->readers)) {
63607 send_sig(SIGPIPE, current, 0);
63608 if (!ret)
63609 ret = -EPIPE;
63610@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
63611 page_nr++;
63612 ret += buf->len;
63613
63614- if (pipe->files)
63615+ if (atomic_read(&pipe->files))
63616 do_wakeup = 1;
63617
63618 if (!--spd->nr_pages)
63619@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
63620 do_wakeup = 0;
63621 }
63622
63623- pipe->waiting_writers++;
63624+ atomic_inc(&pipe->waiting_writers);
63625 pipe_wait(pipe);
63626- pipe->waiting_writers--;
63627+ atomic_dec(&pipe->waiting_writers);
63628 }
63629
63630 pipe_unlock(pipe);
63631@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
63632 old_fs = get_fs();
63633 set_fs(get_ds());
63634 /* The cast to a user pointer is valid due to the set_fs() */
63635- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
63636+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
63637 set_fs(old_fs);
63638
63639 return res;
63640@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
63641 old_fs = get_fs();
63642 set_fs(get_ds());
63643 /* The cast to a user pointer is valid due to the set_fs() */
63644- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
63645+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
63646 set_fs(old_fs);
63647
63648 return res;
63649@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
63650 goto err;
63651
63652 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
63653- vec[i].iov_base = (void __user *) page_address(page);
63654+ vec[i].iov_base = (void __force_user *) page_address(page);
63655 vec[i].iov_len = this_len;
63656 spd.pages[i] = page;
63657 spd.nr_pages++;
63658@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
63659 ops->release(pipe, buf);
63660 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
63661 pipe->nrbufs--;
63662- if (pipe->files)
63663+ if (atomic_read(&pipe->files))
63664 sd->need_wakeup = true;
63665 }
63666
63667@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
63668 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
63669 {
63670 while (!pipe->nrbufs) {
63671- if (!pipe->writers)
63672+ if (!atomic_read(&pipe->writers))
63673 return 0;
63674
63675- if (!pipe->waiting_writers && sd->num_spliced)
63676+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
63677 return 0;
63678
63679 if (sd->flags & SPLICE_F_NONBLOCK)
63680@@ -1179,7 +1179,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
63681 * out of the pipe right after the splice_to_pipe(). So set
63682 * PIPE_READERS appropriately.
63683 */
63684- pipe->readers = 1;
63685+ atomic_set(&pipe->readers, 1);
63686
63687 current->splice_pipe = pipe;
63688 }
63689@@ -1475,6 +1475,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
63690
63691 partial[buffers].offset = off;
63692 partial[buffers].len = plen;
63693+ partial[buffers].private = 0;
63694
63695 off = 0;
63696 len -= plen;
63697@@ -1777,9 +1778,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
63698 ret = -ERESTARTSYS;
63699 break;
63700 }
63701- if (!pipe->writers)
63702+ if (!atomic_read(&pipe->writers))
63703 break;
63704- if (!pipe->waiting_writers) {
63705+ if (!atomic_read(&pipe->waiting_writers)) {
63706 if (flags & SPLICE_F_NONBLOCK) {
63707 ret = -EAGAIN;
63708 break;
63709@@ -1811,7 +1812,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
63710 pipe_lock(pipe);
63711
63712 while (pipe->nrbufs >= pipe->buffers) {
63713- if (!pipe->readers) {
63714+ if (!atomic_read(&pipe->readers)) {
63715 send_sig(SIGPIPE, current, 0);
63716 ret = -EPIPE;
63717 break;
63718@@ -1824,9 +1825,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
63719 ret = -ERESTARTSYS;
63720 break;
63721 }
63722- pipe->waiting_writers++;
63723+ atomic_inc(&pipe->waiting_writers);
63724 pipe_wait(pipe);
63725- pipe->waiting_writers--;
63726+ atomic_dec(&pipe->waiting_writers);
63727 }
63728
63729 pipe_unlock(pipe);
63730@@ -1862,14 +1863,14 @@ retry:
63731 pipe_double_lock(ipipe, opipe);
63732
63733 do {
63734- if (!opipe->readers) {
63735+ if (!atomic_read(&opipe->readers)) {
63736 send_sig(SIGPIPE, current, 0);
63737 if (!ret)
63738 ret = -EPIPE;
63739 break;
63740 }
63741
63742- if (!ipipe->nrbufs && !ipipe->writers)
63743+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
63744 break;
63745
63746 /*
63747@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
63748 pipe_double_lock(ipipe, opipe);
63749
63750 do {
63751- if (!opipe->readers) {
63752+ if (!atomic_read(&opipe->readers)) {
63753 send_sig(SIGPIPE, current, 0);
63754 if (!ret)
63755 ret = -EPIPE;
63756@@ -2011,7 +2012,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
63757 * return EAGAIN if we have the potential of some data in the
63758 * future, otherwise just return 0
63759 */
63760- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
63761+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
63762 ret = -EAGAIN;
63763
63764 pipe_unlock(ipipe);
63765diff --git a/fs/stat.c b/fs/stat.c
63766index ae0c3ce..9ee641c 100644
63767--- a/fs/stat.c
63768+++ b/fs/stat.c
63769@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
63770 stat->gid = inode->i_gid;
63771 stat->rdev = inode->i_rdev;
63772 stat->size = i_size_read(inode);
63773- stat->atime = inode->i_atime;
63774- stat->mtime = inode->i_mtime;
63775+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
63776+ stat->atime = inode->i_ctime;
63777+ stat->mtime = inode->i_ctime;
63778+ } else {
63779+ stat->atime = inode->i_atime;
63780+ stat->mtime = inode->i_mtime;
63781+ }
63782 stat->ctime = inode->i_ctime;
63783 stat->blksize = (1 << inode->i_blkbits);
63784 stat->blocks = inode->i_blocks;
63785@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
63786 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
63787 {
63788 struct inode *inode = path->dentry->d_inode;
63789+ int retval;
63790
63791- if (inode->i_op->getattr)
63792- return inode->i_op->getattr(path->mnt, path->dentry, stat);
63793+ if (inode->i_op->getattr) {
63794+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
63795+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
63796+ stat->atime = stat->ctime;
63797+ stat->mtime = stat->ctime;
63798+ }
63799+ return retval;
63800+ }
63801
63802 generic_fillattr(inode, stat);
63803 return 0;
63804diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
63805index 5e73d66..4f165fd 100644
63806--- a/fs/sysfs/dir.c
63807+++ b/fs/sysfs/dir.c
63808@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
63809 *
63810 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63811 */
63812-static unsigned int sysfs_name_hash(const char *name, const void *ns)
63813+static unsigned int sysfs_name_hash(const unsigned char *name, const void *ns)
63814 {
63815 unsigned long hash = init_name_hash();
63816 unsigned int len = strlen(name);
63817@@ -676,6 +676,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
63818 struct sysfs_dirent *sd;
63819 int rc;
63820
63821+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
63822+ const char *parent_name = parent_sd->s_name;
63823+
63824+ mode = S_IFDIR | S_IRWXU;
63825+
63826+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
63827+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
63828+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
63829+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
63830+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
63831+#endif
63832+
63833 /* allocate */
63834 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
63835 if (!sd)
63836diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
63837index 35e7d08..4d6e676 100644
63838--- a/fs/sysfs/file.c
63839+++ b/fs/sysfs/file.c
63840@@ -42,7 +42,7 @@ static DEFINE_MUTEX(sysfs_open_file_mutex);
63841
63842 struct sysfs_open_dirent {
63843 atomic_t refcnt;
63844- atomic_t event;
63845+ atomic_unchecked_t event;
63846 wait_queue_head_t poll;
63847 struct list_head files; /* goes through sysfs_open_file.list */
63848 };
63849@@ -112,7 +112,7 @@ static int sysfs_seq_show(struct seq_file *sf, void *v)
63850 return -ENODEV;
63851 }
63852
63853- of->event = atomic_read(&of->sd->s_attr.open->event);
63854+ of->event = atomic_read_unchecked(&of->sd->s_attr.open->event);
63855
63856 /*
63857 * Lookup @ops and invoke show(). Control may reach here via seq
63858@@ -365,12 +365,12 @@ static int sysfs_bin_page_mkwrite(struct vm_area_struct *vma,
63859 return ret;
63860 }
63861
63862-static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
63863- void *buf, int len, int write)
63864+static ssize_t sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
63865+ void *buf, size_t len, int write)
63866 {
63867 struct file *file = vma->vm_file;
63868 struct sysfs_open_file *of = sysfs_of(file);
63869- int ret;
63870+ ssize_t ret;
63871
63872 if (!of->vm_ops)
63873 return -EINVAL;
63874@@ -564,7 +564,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
63875 return -ENOMEM;
63876
63877 atomic_set(&new_od->refcnt, 0);
63878- atomic_set(&new_od->event, 1);
63879+ atomic_set_unchecked(&new_od->event, 1);
63880 init_waitqueue_head(&new_od->poll);
63881 INIT_LIST_HEAD(&new_od->files);
63882 goto retry;
63883@@ -768,7 +768,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
63884
63885 sysfs_put_active(attr_sd);
63886
63887- if (of->event != atomic_read(&od->event))
63888+ if (of->event != atomic_read_unchecked(&od->event))
63889 goto trigger;
63890
63891 return DEFAULT_POLLMASK;
63892@@ -787,7 +787,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
63893 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
63894 od = sd->s_attr.open;
63895 if (od) {
63896- atomic_inc(&od->event);
63897+ atomic_inc_unchecked(&od->event);
63898 wake_up_interruptible(&od->poll);
63899 }
63900 }
63901diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
63902index 3ae3f1b..081a26c 100644
63903--- a/fs/sysfs/symlink.c
63904+++ b/fs/sysfs/symlink.c
63905@@ -314,7 +314,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63906 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
63907 void *cookie)
63908 {
63909- char *page = nd_get_link(nd);
63910+ const char *page = nd_get_link(nd);
63911 if (!IS_ERR(page))
63912 free_page((unsigned long)page);
63913 }
63914diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
63915index 69d4889..a810bd4 100644
63916--- a/fs/sysv/sysv.h
63917+++ b/fs/sysv/sysv.h
63918@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
63919 #endif
63920 }
63921
63922-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
63923+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
63924 {
63925 if (sbi->s_bytesex == BYTESEX_PDP)
63926 return PDP_swab((__force __u32)n);
63927diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
63928index e18b988..f1d4ad0f 100644
63929--- a/fs/ubifs/io.c
63930+++ b/fs/ubifs/io.c
63931@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
63932 return err;
63933 }
63934
63935-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
63936+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
63937 {
63938 int err;
63939
63940diff --git a/fs/udf/misc.c b/fs/udf/misc.c
63941index c175b4d..8f36a16 100644
63942--- a/fs/udf/misc.c
63943+++ b/fs/udf/misc.c
63944@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
63945
63946 u8 udf_tag_checksum(const struct tag *t)
63947 {
63948- u8 *data = (u8 *)t;
63949+ const u8 *data = (const u8 *)t;
63950 u8 checksum = 0;
63951 int i;
63952 for (i = 0; i < sizeof(struct tag); ++i)
63953diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
63954index 8d974c4..b82f6ec 100644
63955--- a/fs/ufs/swab.h
63956+++ b/fs/ufs/swab.h
63957@@ -22,7 +22,7 @@ enum {
63958 BYTESEX_BE
63959 };
63960
63961-static inline u64
63962+static inline u64 __intentional_overflow(-1)
63963 fs64_to_cpu(struct super_block *sbp, __fs64 n)
63964 {
63965 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
63966@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
63967 return (__force __fs64)cpu_to_be64(n);
63968 }
63969
63970-static inline u32
63971+static inline u32 __intentional_overflow(-1)
63972 fs32_to_cpu(struct super_block *sbp, __fs32 n)
63973 {
63974 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
63975diff --git a/fs/utimes.c b/fs/utimes.c
63976index aa138d6..5f3a811 100644
63977--- a/fs/utimes.c
63978+++ b/fs/utimes.c
63979@@ -1,6 +1,7 @@
63980 #include <linux/compiler.h>
63981 #include <linux/file.h>
63982 #include <linux/fs.h>
63983+#include <linux/security.h>
63984 #include <linux/linkage.h>
63985 #include <linux/mount.h>
63986 #include <linux/namei.h>
63987@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
63988 }
63989 }
63990 retry_deleg:
63991+
63992+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
63993+ error = -EACCES;
63994+ goto mnt_drop_write_and_out;
63995+ }
63996+
63997 mutex_lock(&inode->i_mutex);
63998 error = notify_change(path->dentry, &newattrs, &delegated_inode);
63999 mutex_unlock(&inode->i_mutex);
64000diff --git a/fs/xattr.c b/fs/xattr.c
64001index 3377dff..f394815 100644
64002--- a/fs/xattr.c
64003+++ b/fs/xattr.c
64004@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
64005 return rc;
64006 }
64007
64008+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
64009+ssize_t
64010+pax_getxattr(struct dentry *dentry, void *value, size_t size)
64011+{
64012+ struct inode *inode = dentry->d_inode;
64013+ ssize_t error;
64014+
64015+ error = inode_permission(inode, MAY_EXEC);
64016+ if (error)
64017+ return error;
64018+
64019+ if (inode->i_op->getxattr)
64020+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
64021+ else
64022+ error = -EOPNOTSUPP;
64023+
64024+ return error;
64025+}
64026+EXPORT_SYMBOL(pax_getxattr);
64027+#endif
64028+
64029 ssize_t
64030 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
64031 {
64032@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
64033 * Extended attribute SET operations
64034 */
64035 static long
64036-setxattr(struct dentry *d, const char __user *name, const void __user *value,
64037+setxattr(struct path *path, const char __user *name, const void __user *value,
64038 size_t size, int flags)
64039 {
64040 int error;
64041@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
64042 posix_acl_fix_xattr_from_user(kvalue, size);
64043 }
64044
64045- error = vfs_setxattr(d, kname, kvalue, size, flags);
64046+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
64047+ error = -EACCES;
64048+ goto out;
64049+ }
64050+
64051+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
64052 out:
64053 if (vvalue)
64054 vfree(vvalue);
64055@@ -377,7 +403,7 @@ retry:
64056 return error;
64057 error = mnt_want_write(path.mnt);
64058 if (!error) {
64059- error = setxattr(path.dentry, name, value, size, flags);
64060+ error = setxattr(&path, name, value, size, flags);
64061 mnt_drop_write(path.mnt);
64062 }
64063 path_put(&path);
64064@@ -401,7 +427,7 @@ retry:
64065 return error;
64066 error = mnt_want_write(path.mnt);
64067 if (!error) {
64068- error = setxattr(path.dentry, name, value, size, flags);
64069+ error = setxattr(&path, name, value, size, flags);
64070 mnt_drop_write(path.mnt);
64071 }
64072 path_put(&path);
64073@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
64074 const void __user *,value, size_t, size, int, flags)
64075 {
64076 struct fd f = fdget(fd);
64077- struct dentry *dentry;
64078 int error = -EBADF;
64079
64080 if (!f.file)
64081 return error;
64082- dentry = f.file->f_path.dentry;
64083- audit_inode(NULL, dentry, 0);
64084+ audit_inode(NULL, f.file->f_path.dentry, 0);
64085 error = mnt_want_write_file(f.file);
64086 if (!error) {
64087- error = setxattr(dentry, name, value, size, flags);
64088+ error = setxattr(&f.file->f_path, name, value, size, flags);
64089 mnt_drop_write_file(f.file);
64090 }
64091 fdput(f);
64092@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
64093 * Extended attribute REMOVE operations
64094 */
64095 static long
64096-removexattr(struct dentry *d, const char __user *name)
64097+removexattr(struct path *path, const char __user *name)
64098 {
64099 int error;
64100 char kname[XATTR_NAME_MAX + 1];
64101@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
64102 if (error < 0)
64103 return error;
64104
64105- return vfs_removexattr(d, kname);
64106+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
64107+ return -EACCES;
64108+
64109+ return vfs_removexattr(path->dentry, kname);
64110 }
64111
64112 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
64113@@ -652,7 +679,7 @@ retry:
64114 return error;
64115 error = mnt_want_write(path.mnt);
64116 if (!error) {
64117- error = removexattr(path.dentry, name);
64118+ error = removexattr(&path, name);
64119 mnt_drop_write(path.mnt);
64120 }
64121 path_put(&path);
64122@@ -675,7 +702,7 @@ retry:
64123 return error;
64124 error = mnt_want_write(path.mnt);
64125 if (!error) {
64126- error = removexattr(path.dentry, name);
64127+ error = removexattr(&path, name);
64128 mnt_drop_write(path.mnt);
64129 }
64130 path_put(&path);
64131@@ -689,16 +716,16 @@ retry:
64132 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
64133 {
64134 struct fd f = fdget(fd);
64135- struct dentry *dentry;
64136+ struct path *path;
64137 int error = -EBADF;
64138
64139 if (!f.file)
64140 return error;
64141- dentry = f.file->f_path.dentry;
64142- audit_inode(NULL, dentry, 0);
64143+ path = &f.file->f_path;
64144+ audit_inode(NULL, path->dentry, 0);
64145 error = mnt_want_write_file(f.file);
64146 if (!error) {
64147- error = removexattr(dentry, name);
64148+ error = removexattr(path, name);
64149 mnt_drop_write_file(f.file);
64150 }
64151 fdput(f);
64152diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
64153index 9fbea87..6b19972 100644
64154--- a/fs/xattr_acl.c
64155+++ b/fs/xattr_acl.c
64156@@ -76,8 +76,8 @@ struct posix_acl *
64157 posix_acl_from_xattr(struct user_namespace *user_ns,
64158 const void *value, size_t size)
64159 {
64160- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
64161- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
64162+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
64163+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
64164 int count;
64165 struct posix_acl *acl;
64166 struct posix_acl_entry *acl_e;
64167diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
64168index 3b2c14b..de031fe 100644
64169--- a/fs/xfs/xfs_bmap.c
64170+++ b/fs/xfs/xfs_bmap.c
64171@@ -584,7 +584,7 @@ xfs_bmap_validate_ret(
64172
64173 #else
64174 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
64175-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
64176+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
64177 #endif /* DEBUG */
64178
64179 /*
64180diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
64181index c4e50c6..8ba93e3 100644
64182--- a/fs/xfs/xfs_dir2_readdir.c
64183+++ b/fs/xfs/xfs_dir2_readdir.c
64184@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
64185 ino = dp->d_ops->sf_get_ino(sfp, sfep);
64186 filetype = dp->d_ops->sf_get_ftype(sfep);
64187 ctx->pos = off & 0x7fffffff;
64188- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
64189+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
64190+ char name[sfep->namelen];
64191+ memcpy(name, sfep->name, sfep->namelen);
64192+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
64193+ return 0;
64194+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
64195 xfs_dir3_get_dtype(mp, filetype)))
64196 return 0;
64197 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
64198diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
64199index 33ad9a7..82c18ba 100644
64200--- a/fs/xfs/xfs_ioctl.c
64201+++ b/fs/xfs/xfs_ioctl.c
64202@@ -126,7 +126,7 @@ xfs_find_handle(
64203 }
64204
64205 error = -EFAULT;
64206- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
64207+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
64208 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
64209 goto out_put;
64210
64211diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
64212index 104455b..764c512 100644
64213--- a/fs/xfs/xfs_iops.c
64214+++ b/fs/xfs/xfs_iops.c
64215@@ -397,7 +397,7 @@ xfs_vn_put_link(
64216 struct nameidata *nd,
64217 void *p)
64218 {
64219- char *s = nd_get_link(nd);
64220+ const char *s = nd_get_link(nd);
64221
64222 if (!IS_ERR(s))
64223 kfree(s);
64224diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
64225new file mode 100644
64226index 0000000..e98584b
64227--- /dev/null
64228+++ b/grsecurity/Kconfig
64229@@ -0,0 +1,1147 @@
64230+#
64231+# grecurity configuration
64232+#
64233+menu "Memory Protections"
64234+depends on GRKERNSEC
64235+
64236+config GRKERNSEC_KMEM
64237+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
64238+ default y if GRKERNSEC_CONFIG_AUTO
64239+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
64240+ help
64241+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
64242+ be written to or read from to modify or leak the contents of the running
64243+ kernel. /dev/port will also not be allowed to be opened, and support
64244+ for /dev/cpu/*/msr and kexec will be removed. If you have module
64245+ support disabled, enabling this will close up six ways that are
64246+ currently used to insert malicious code into the running kernel.
64247+
64248+ Even with this feature enabled, we still highly recommend that
64249+ you use the RBAC system, as it is still possible for an attacker to
64250+ modify the running kernel through other more obscure methods.
64251+
64252+ Enabling this feature will prevent the "cpupower" and "powertop" tools
64253+ from working.
64254+
64255+ It is highly recommended that you say Y here if you meet all the
64256+ conditions above.
64257+
64258+config GRKERNSEC_VM86
64259+ bool "Restrict VM86 mode"
64260+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64261+ depends on X86_32
64262+
64263+ help
64264+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
64265+ make use of a special execution mode on 32bit x86 processors called
64266+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
64267+ video cards and will still work with this option enabled. The purpose
64268+ of the option is to prevent exploitation of emulation errors in
64269+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
64270+ Nearly all users should be able to enable this option.
64271+
64272+config GRKERNSEC_IO
64273+ bool "Disable privileged I/O"
64274+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64275+ depends on X86
64276+ select RTC_CLASS
64277+ select RTC_INTF_DEV
64278+ select RTC_DRV_CMOS
64279+
64280+ help
64281+ If you say Y here, all ioperm and iopl calls will return an error.
64282+ Ioperm and iopl can be used to modify the running kernel.
64283+ Unfortunately, some programs need this access to operate properly,
64284+ the most notable of which are XFree86 and hwclock. hwclock can be
64285+ remedied by having RTC support in the kernel, so real-time
64286+ clock support is enabled if this option is enabled, to ensure
64287+ that hwclock operates correctly.
64288+
64289+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
64290+ you may not be able to boot into a graphical environment with this
64291+ option enabled. In this case, you should use the RBAC system instead.
64292+
64293+config GRKERNSEC_JIT_HARDEN
64294+ bool "Harden BPF JIT against spray attacks"
64295+ default y if GRKERNSEC_CONFIG_AUTO
64296+ depends on BPF_JIT && X86
64297+ help
64298+ If you say Y here, the native code generated by the kernel's Berkeley
64299+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
64300+ attacks that attempt to fit attacker-beneficial instructions in
64301+ 32bit immediate fields of JIT-generated native instructions. The
64302+ attacker will generally aim to cause an unintended instruction sequence
64303+ of JIT-generated native code to execute by jumping into the middle of
64304+ a generated instruction. This feature effectively randomizes the 32bit
64305+ immediate constants present in the generated code to thwart such attacks.
64306+
64307+ If you're using KERNEXEC, it's recommended that you enable this option
64308+ to supplement the hardening of the kernel.
64309+
64310+config GRKERNSEC_PERF_HARDEN
64311+ bool "Disable unprivileged PERF_EVENTS usage by default"
64312+ default y if GRKERNSEC_CONFIG_AUTO
64313+ depends on PERF_EVENTS
64314+ help
64315+ If you say Y here, the range of acceptable values for the
64316+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
64317+ default to a new value: 3. When the sysctl is set to this value, no
64318+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
64319+
64320+ Though PERF_EVENTS can be used legitimately for performance monitoring
64321+ and low-level application profiling, it is forced on regardless of
64322+ configuration, has been at fault for several vulnerabilities, and
64323+ creates new opportunities for side channels and other information leaks.
64324+
64325+ This feature puts PERF_EVENTS into a secure default state and permits
64326+ the administrator to change out of it temporarily if unprivileged
64327+ application profiling is needed.
64328+
64329+config GRKERNSEC_RAND_THREADSTACK
64330+ bool "Insert random gaps between thread stacks"
64331+ default y if GRKERNSEC_CONFIG_AUTO
64332+ depends on PAX_RANDMMAP && !PPC
64333+ help
64334+ If you say Y here, a random-sized gap will be enforced between allocated
64335+ thread stacks. Glibc's NPTL and other threading libraries that
64336+ pass MAP_STACK to the kernel for thread stack allocation are supported.
64337+ The implementation currently provides 8 bits of entropy for the gap.
64338+
64339+ Many distributions do not compile threaded remote services with the
64340+ -fstack-check argument to GCC, causing the variable-sized stack-based
64341+ allocator, alloca(), to not probe the stack on allocation. This
64342+ permits an unbounded alloca() to skip over any guard page and potentially
64343+ modify another thread's stack reliably. An enforced random gap
64344+ reduces the reliability of such an attack and increases the chance
64345+ that such a read/write to another thread's stack instead lands in
64346+ an unmapped area, causing a crash and triggering grsecurity's
64347+ anti-bruteforcing logic.
64348+
64349+config GRKERNSEC_PROC_MEMMAP
64350+ bool "Harden ASLR against information leaks and entropy reduction"
64351+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
64352+ depends on PAX_NOEXEC || PAX_ASLR
64353+ help
64354+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
64355+ give no information about the addresses of its mappings if
64356+ PaX features that rely on random addresses are enabled on the task.
64357+ In addition to sanitizing this information and disabling other
64358+ dangerous sources of information, this option causes reads of sensitive
64359+ /proc/<pid> entries where the file descriptor was opened in a different
64360+ task than the one performing the read. Such attempts are logged.
64361+ This option also limits argv/env strings for suid/sgid binaries
64362+ to 512KB to prevent a complete exhaustion of the stack entropy provided
64363+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
64364+ binaries to prevent alternative mmap layouts from being abused.
64365+
64366+ If you use PaX it is essential that you say Y here as it closes up
64367+ several holes that make full ASLR useless locally.
64368+
64369+config GRKERNSEC_BRUTE
64370+ bool "Deter exploit bruteforcing"
64371+ default y if GRKERNSEC_CONFIG_AUTO
64372+ help
64373+ If you say Y here, attempts to bruteforce exploits against forking
64374+ daemons such as apache or sshd, as well as against suid/sgid binaries
64375+ will be deterred. When a child of a forking daemon is killed by PaX
64376+ or crashes due to an illegal instruction or other suspicious signal,
64377+ the parent process will be delayed 30 seconds upon every subsequent
64378+ fork until the administrator is able to assess the situation and
64379+ restart the daemon.
64380+ In the suid/sgid case, the attempt is logged, the user has all their
64381+ existing instances of the suid/sgid binary terminated and will
64382+ be unable to execute any suid/sgid binaries for 15 minutes.
64383+
64384+ It is recommended that you also enable signal logging in the auditing
64385+ section so that logs are generated when a process triggers a suspicious
64386+ signal.
64387+ If the sysctl option is enabled, a sysctl option with name
64388+ "deter_bruteforce" is created.
64389+
64390+config GRKERNSEC_MODHARDEN
64391+ bool "Harden module auto-loading"
64392+ default y if GRKERNSEC_CONFIG_AUTO
64393+ depends on MODULES
64394+ help
64395+ If you say Y here, module auto-loading in response to use of some
64396+ feature implemented by an unloaded module will be restricted to
64397+ root users. Enabling this option helps defend against attacks
64398+ by unprivileged users who abuse the auto-loading behavior to
64399+ cause a vulnerable module to load that is then exploited.
64400+
64401+ If this option prevents a legitimate use of auto-loading for a
64402+ non-root user, the administrator can execute modprobe manually
64403+ with the exact name of the module mentioned in the alert log.
64404+ Alternatively, the administrator can add the module to the list
64405+ of modules loaded at boot by modifying init scripts.
64406+
64407+ Modification of init scripts will most likely be needed on
64408+ Ubuntu servers with encrypted home directory support enabled,
64409+ as the first non-root user logging in will cause the ecb(aes),
64410+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
64411+
64412+config GRKERNSEC_HIDESYM
64413+ bool "Hide kernel symbols"
64414+ default y if GRKERNSEC_CONFIG_AUTO
64415+ select PAX_USERCOPY_SLABS
64416+ help
64417+ If you say Y here, getting information on loaded modules, and
64418+ displaying all kernel symbols through a syscall will be restricted
64419+ to users with CAP_SYS_MODULE. For software compatibility reasons,
64420+ /proc/kallsyms will be restricted to the root user. The RBAC
64421+ system can hide that entry even from root.
64422+
64423+ This option also prevents leaking of kernel addresses through
64424+ several /proc entries.
64425+
64426+ Note that this option is only effective provided the following
64427+ conditions are met:
64428+ 1) The kernel using grsecurity is not precompiled by some distribution
64429+ 2) You have also enabled GRKERNSEC_DMESG
64430+ 3) You are using the RBAC system and hiding other files such as your
64431+ kernel image and System.map. Alternatively, enabling this option
64432+ causes the permissions on /boot, /lib/modules, and the kernel
64433+ source directory to change at compile time to prevent
64434+ reading by non-root users.
64435+ If the above conditions are met, this option will aid in providing a
64436+ useful protection against local kernel exploitation of overflows
64437+ and arbitrary read/write vulnerabilities.
64438+
64439+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
64440+ in addition to this feature.
64441+
64442+config GRKERNSEC_RANDSTRUCT
64443+ bool "Randomize layout of sensitive kernel structures"
64444+ default y if GRKERNSEC_CONFIG_AUTO
64445+ select GRKERNSEC_HIDESYM
64446+ select MODVERSIONS if MODULES
64447+ help
64448+ If you say Y here, the layouts of a number of sensitive kernel
64449+ structures (task, fs, cred, etc) and all structures composed entirely
64450+ of function pointers (aka "ops" structs) will be randomized at compile-time.
64451+ This can introduce the requirement of an additional infoleak
64452+ vulnerability for exploits targeting these structure types.
64453+
64454+ Enabling this feature will introduce some performance impact, slightly
64455+ increase memory usage, and prevent the use of forensic tools like
64456+ Volatility against the system (unless the kernel source tree isn't
64457+ cleaned after kernel installation).
64458+
64459+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
64460+ It remains after a make clean to allow for external modules to be compiled
64461+ with the existing seed and will be removed by a make mrproper or
64462+ make distclean.
64463+
64464+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
64465+ bool "Use cacheline-aware structure randomization"
64466+ depends on GRKERNSEC_RANDSTRUCT
64467+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
64468+ help
64469+ If you say Y here, the RANDSTRUCT randomization will make a best effort
64470+ at restricting randomization to cacheline-sized groups of elements. It
64471+ will further not randomize bitfields in structures. This reduces the
64472+ performance hit of RANDSTRUCT at the cost of weakened randomization.
64473+
64474+config GRKERNSEC_KERN_LOCKOUT
64475+ bool "Active kernel exploit response"
64476+ default y if GRKERNSEC_CONFIG_AUTO
64477+ depends on X86 || ARM || PPC || SPARC
64478+ help
64479+ If you say Y here, when a PaX alert is triggered due to suspicious
64480+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
64481+ or an OOPS occurs due to bad memory accesses, instead of just
64482+ terminating the offending process (and potentially allowing
64483+ a subsequent exploit from the same user), we will take one of two
64484+ actions:
64485+ If the user was root, we will panic the system
64486+ If the user was non-root, we will log the attempt, terminate
64487+ all processes owned by the user, then prevent them from creating
64488+ any new processes until the system is restarted
64489+ This deters repeated kernel exploitation/bruteforcing attempts
64490+ and is useful for later forensics.
64491+
64492+config GRKERNSEC_OLD_ARM_USERLAND
64493+ bool "Old ARM userland compatibility"
64494+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
64495+ help
64496+ If you say Y here, stubs of executable code to perform such operations
64497+ as "compare-exchange" will be placed at fixed locations in the ARM vector
64498+ table. This is unfortunately needed for old ARM userland meant to run
64499+ across a wide range of processors. Without this option enabled,
64500+ the get_tls and data memory barrier stubs will be emulated by the kernel,
64501+ which is enough for Linaro userlands or other userlands designed for v6
64502+ and newer ARM CPUs. It's recommended that you try without this option enabled
64503+ first, and only enable it if your userland does not boot (it will likely fail
64504+ at init time).
64505+
64506+endmenu
64507+menu "Role Based Access Control Options"
64508+depends on GRKERNSEC
64509+
64510+config GRKERNSEC_RBAC_DEBUG
64511+ bool
64512+
64513+config GRKERNSEC_NO_RBAC
64514+ bool "Disable RBAC system"
64515+ help
64516+ If you say Y here, the /dev/grsec device will be removed from the kernel,
64517+ preventing the RBAC system from being enabled. You should only say Y
64518+ here if you have no intention of using the RBAC system, so as to prevent
64519+ an attacker with root access from misusing the RBAC system to hide files
64520+ and processes when loadable module support and /dev/[k]mem have been
64521+ locked down.
64522+
64523+config GRKERNSEC_ACL_HIDEKERN
64524+ bool "Hide kernel processes"
64525+ help
64526+ If you say Y here, all kernel threads will be hidden to all
64527+ processes but those whose subject has the "view hidden processes"
64528+ flag.
64529+
64530+config GRKERNSEC_ACL_MAXTRIES
64531+ int "Maximum tries before password lockout"
64532+ default 3
64533+ help
64534+ This option enforces the maximum number of times a user can attempt
64535+ to authorize themselves with the grsecurity RBAC system before being
64536+ denied the ability to attempt authorization again for a specified time.
64537+ The lower the number, the harder it will be to brute-force a password.
64538+
64539+config GRKERNSEC_ACL_TIMEOUT
64540+ int "Time to wait after max password tries, in seconds"
64541+ default 30
64542+ help
64543+ This option specifies the time the user must wait after attempting to
64544+ authorize to the RBAC system with the maximum number of invalid
64545+ passwords. The higher the number, the harder it will be to brute-force
64546+ a password.
64547+
64548+endmenu
64549+menu "Filesystem Protections"
64550+depends on GRKERNSEC
64551+
64552+config GRKERNSEC_PROC
64553+ bool "Proc restrictions"
64554+ default y if GRKERNSEC_CONFIG_AUTO
64555+ help
64556+ If you say Y here, the permissions of the /proc filesystem
64557+ will be altered to enhance system security and privacy. You MUST
64558+ choose either a user only restriction or a user and group restriction.
64559+ Depending upon the option you choose, you can either restrict users to
64560+ see only the processes they themselves run, or choose a group that can
64561+ view all processes and files normally restricted to root if you choose
64562+ the "restrict to user only" option. NOTE: If you're running identd or
64563+ ntpd as a non-root user, you will have to run it as the group you
64564+ specify here.
64565+
64566+config GRKERNSEC_PROC_USER
64567+ bool "Restrict /proc to user only"
64568+ depends on GRKERNSEC_PROC
64569+ help
64570+ If you say Y here, non-root users will only be able to view their own
64571+ processes, and restricts them from viewing network-related information,
64572+ and viewing kernel symbol and module information.
64573+
64574+config GRKERNSEC_PROC_USERGROUP
64575+ bool "Allow special group"
64576+ default y if GRKERNSEC_CONFIG_AUTO
64577+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
64578+ help
64579+ If you say Y here, you will be able to select a group that will be
64580+ able to view all processes and network-related information. If you've
64581+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
64582+ remain hidden. This option is useful if you want to run identd as
64583+ a non-root user. The group you select may also be chosen at boot time
64584+ via "grsec_proc_gid=" on the kernel commandline.
64585+
64586+config GRKERNSEC_PROC_GID
64587+ int "GID for special group"
64588+ depends on GRKERNSEC_PROC_USERGROUP
64589+ default 1001
64590+
64591+config GRKERNSEC_PROC_ADD
64592+ bool "Additional restrictions"
64593+ default y if GRKERNSEC_CONFIG_AUTO
64594+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
64595+ help
64596+ If you say Y here, additional restrictions will be placed on
64597+ /proc that keep normal users from viewing device information and
64598+ slabinfo information that could be useful for exploits.
64599+
64600+config GRKERNSEC_LINK
64601+ bool "Linking restrictions"
64602+ default y if GRKERNSEC_CONFIG_AUTO
64603+ help
64604+ If you say Y here, /tmp race exploits will be prevented, since users
64605+ will no longer be able to follow symlinks owned by other users in
64606+ world-writable +t directories (e.g. /tmp), unless the owner of the
64607+ symlink is the owner of the directory. users will also not be
64608+ able to hardlink to files they do not own. If the sysctl option is
64609+ enabled, a sysctl option with name "linking_restrictions" is created.
64610+
64611+config GRKERNSEC_SYMLINKOWN
64612+ bool "Kernel-enforced SymlinksIfOwnerMatch"
64613+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
64614+ help
64615+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
64616+ that prevents it from being used as a security feature. As Apache
64617+ verifies the symlink by performing a stat() against the target of
64618+ the symlink before it is followed, an attacker can setup a symlink
64619+ to point to a same-owned file, then replace the symlink with one
64620+ that targets another user's file just after Apache "validates" the
64621+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
64622+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
64623+ will be in place for the group you specify. If the sysctl option
64624+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
64625+ created.
64626+
64627+config GRKERNSEC_SYMLINKOWN_GID
64628+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
64629+ depends on GRKERNSEC_SYMLINKOWN
64630+ default 1006
64631+ help
64632+ Setting this GID determines what group kernel-enforced
64633+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
64634+ is enabled, a sysctl option with name "symlinkown_gid" is created.
64635+
64636+config GRKERNSEC_FIFO
64637+ bool "FIFO restrictions"
64638+ default y if GRKERNSEC_CONFIG_AUTO
64639+ help
64640+ If you say Y here, users will not be able to write to FIFOs they don't
64641+ own in world-writable +t directories (e.g. /tmp), unless the owner of
64642+ the FIFO is the same owner of the directory it's held in. If the sysctl
64643+ option is enabled, a sysctl option with name "fifo_restrictions" is
64644+ created.
64645+
64646+config GRKERNSEC_SYSFS_RESTRICT
64647+ bool "Sysfs/debugfs restriction"
64648+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64649+ depends on SYSFS
64650+ help
64651+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
64652+ any filesystem normally mounted under it (e.g. debugfs) will be
64653+ mostly accessible only by root. These filesystems generally provide access
64654+ to hardware and debug information that isn't appropriate for unprivileged
64655+ users of the system. Sysfs and debugfs have also become a large source
64656+ of new vulnerabilities, ranging from infoleaks to local compromise.
64657+ There has been very little oversight with an eye toward security involved
64658+ in adding new exporters of information to these filesystems, so their
64659+ use is discouraged.
64660+ For reasons of compatibility, a few directories have been whitelisted
64661+ for access by non-root users:
64662+ /sys/fs/selinux
64663+ /sys/fs/fuse
64664+ /sys/devices/system/cpu
64665+
64666+config GRKERNSEC_ROFS
64667+ bool "Runtime read-only mount protection"
64668+ depends on SYSCTL
64669+ help
64670+ If you say Y here, a sysctl option with name "romount_protect" will
64671+ be created. By setting this option to 1 at runtime, filesystems
64672+ will be protected in the following ways:
64673+ * No new writable mounts will be allowed
64674+ * Existing read-only mounts won't be able to be remounted read/write
64675+ * Write operations will be denied on all block devices
64676+ This option acts independently of grsec_lock: once it is set to 1,
64677+ it cannot be turned off. Therefore, please be mindful of the resulting
64678+ behavior if this option is enabled in an init script on a read-only
64679+ filesystem.
64680+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
64681+ and GRKERNSEC_IO should be enabled and module loading disabled via
64682+ config or at runtime.
64683+ This feature is mainly intended for secure embedded systems.
64684+
64685+
64686+config GRKERNSEC_DEVICE_SIDECHANNEL
64687+ bool "Eliminate stat/notify-based device sidechannels"
64688+ default y if GRKERNSEC_CONFIG_AUTO
64689+ help
64690+ If you say Y here, timing analyses on block or character
64691+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
64692+ will be thwarted for unprivileged users. If a process without
64693+ CAP_MKNOD stats such a device, the last access and last modify times
64694+ will match the device's create time. No access or modify events
64695+ will be triggered through inotify/dnotify/fanotify for such devices.
64696+ This feature will prevent attacks that may at a minimum
64697+ allow an attacker to determine the administrator's password length.
64698+
64699+config GRKERNSEC_CHROOT
64700+ bool "Chroot jail restrictions"
64701+ default y if GRKERNSEC_CONFIG_AUTO
64702+ help
64703+ If you say Y here, you will be able to choose several options that will
64704+ make breaking out of a chrooted jail much more difficult. If you
64705+ encounter no software incompatibilities with the following options, it
64706+ is recommended that you enable each one.
64707+
64708+config GRKERNSEC_CHROOT_MOUNT
64709+ bool "Deny mounts"
64710+ default y if GRKERNSEC_CONFIG_AUTO
64711+ depends on GRKERNSEC_CHROOT
64712+ help
64713+ If you say Y here, processes inside a chroot will not be able to
64714+ mount or remount filesystems. If the sysctl option is enabled, a
64715+ sysctl option with name "chroot_deny_mount" is created.
64716+
64717+config GRKERNSEC_CHROOT_DOUBLE
64718+ bool "Deny double-chroots"
64719+ default y if GRKERNSEC_CONFIG_AUTO
64720+ depends on GRKERNSEC_CHROOT
64721+ help
64722+ If you say Y here, processes inside a chroot will not be able to chroot
64723+ again outside the chroot. This is a widely used method of breaking
64724+ out of a chroot jail and should not be allowed. If the sysctl
64725+ option is enabled, a sysctl option with name
64726+ "chroot_deny_chroot" is created.
64727+
64728+config GRKERNSEC_CHROOT_PIVOT
64729+ bool "Deny pivot_root in chroot"
64730+ default y if GRKERNSEC_CONFIG_AUTO
64731+ depends on GRKERNSEC_CHROOT
64732+ help
64733+ If you say Y here, processes inside a chroot will not be able to use
64734+ a function called pivot_root() that was introduced in Linux 2.3.41. It
64735+ works similar to chroot in that it changes the root filesystem. This
64736+ function could be misused in a chrooted process to attempt to break out
64737+ of the chroot, and therefore should not be allowed. If the sysctl
64738+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
64739+ created.
64740+
64741+config GRKERNSEC_CHROOT_CHDIR
64742+ bool "Enforce chdir(\"/\") on all chroots"
64743+ default y if GRKERNSEC_CONFIG_AUTO
64744+ depends on GRKERNSEC_CHROOT
64745+ help
64746+ If you say Y here, the current working directory of all newly-chrooted
64747+ applications will be set to the the root directory of the chroot.
64748+ The man page on chroot(2) states:
64749+ Note that this call does not change the current working
64750+ directory, so that `.' can be outside the tree rooted at
64751+ `/'. In particular, the super-user can escape from a
64752+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
64753+
64754+ It is recommended that you say Y here, since it's not known to break
64755+ any software. If the sysctl option is enabled, a sysctl option with
64756+ name "chroot_enforce_chdir" is created.
64757+
64758+config GRKERNSEC_CHROOT_CHMOD
64759+ bool "Deny (f)chmod +s"
64760+ default y if GRKERNSEC_CONFIG_AUTO
64761+ depends on GRKERNSEC_CHROOT
64762+ help
64763+ If you say Y here, processes inside a chroot will not be able to chmod
64764+ or fchmod files to make them have suid or sgid bits. This protects
64765+ against another published method of breaking a chroot. If the sysctl
64766+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
64767+ created.
64768+
64769+config GRKERNSEC_CHROOT_FCHDIR
64770+ bool "Deny fchdir out of chroot"
64771+ default y if GRKERNSEC_CONFIG_AUTO
64772+ depends on GRKERNSEC_CHROOT
64773+ help
64774+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
64775+ to a file descriptor of the chrooting process that points to a directory
64776+ outside the filesystem will be stopped. If the sysctl option
64777+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
64778+
64779+config GRKERNSEC_CHROOT_MKNOD
64780+ bool "Deny mknod"
64781+ default y if GRKERNSEC_CONFIG_AUTO
64782+ depends on GRKERNSEC_CHROOT
64783+ help
64784+ If you say Y here, processes inside a chroot will not be allowed to
64785+ mknod. The problem with using mknod inside a chroot is that it
64786+ would allow an attacker to create a device entry that is the same
64787+ as one on the physical root of your system, which could range from
64788+ anything from the console device to a device for your harddrive (which
64789+ they could then use to wipe the drive or steal data). It is recommended
64790+ that you say Y here, unless you run into software incompatibilities.
64791+ If the sysctl option is enabled, a sysctl option with name
64792+ "chroot_deny_mknod" is created.
64793+
64794+config GRKERNSEC_CHROOT_SHMAT
64795+ bool "Deny shmat() out of chroot"
64796+ default y if GRKERNSEC_CONFIG_AUTO
64797+ depends on GRKERNSEC_CHROOT
64798+ help
64799+ If you say Y here, processes inside a chroot will not be able to attach
64800+ to shared memory segments that were created outside of the chroot jail.
64801+ It is recommended that you say Y here. If the sysctl option is enabled,
64802+ a sysctl option with name "chroot_deny_shmat" is created.
64803+
64804+config GRKERNSEC_CHROOT_UNIX
64805+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
64806+ default y if GRKERNSEC_CONFIG_AUTO
64807+ depends on GRKERNSEC_CHROOT
64808+ help
64809+ If you say Y here, processes inside a chroot will not be able to
64810+ connect to abstract (meaning not belonging to a filesystem) Unix
64811+ domain sockets that were bound outside of a chroot. It is recommended
64812+ that you say Y here. If the sysctl option is enabled, a sysctl option
64813+ with name "chroot_deny_unix" is created.
64814+
64815+config GRKERNSEC_CHROOT_FINDTASK
64816+ bool "Protect outside processes"
64817+ default y if GRKERNSEC_CONFIG_AUTO
64818+ depends on GRKERNSEC_CHROOT
64819+ help
64820+ If you say Y here, processes inside a chroot will not be able to
64821+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
64822+ getsid, or view any process outside of the chroot. If the sysctl
64823+ option is enabled, a sysctl option with name "chroot_findtask" is
64824+ created.
64825+
64826+config GRKERNSEC_CHROOT_NICE
64827+ bool "Restrict priority changes"
64828+ default y if GRKERNSEC_CONFIG_AUTO
64829+ depends on GRKERNSEC_CHROOT
64830+ help
64831+ If you say Y here, processes inside a chroot will not be able to raise
64832+ the priority of processes in the chroot, or alter the priority of
64833+ processes outside the chroot. This provides more security than simply
64834+ removing CAP_SYS_NICE from the process' capability set. If the
64835+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
64836+ is created.
64837+
64838+config GRKERNSEC_CHROOT_SYSCTL
64839+ bool "Deny sysctl writes"
64840+ default y if GRKERNSEC_CONFIG_AUTO
64841+ depends on GRKERNSEC_CHROOT
64842+ help
64843+ If you say Y here, an attacker in a chroot will not be able to
64844+ write to sysctl entries, either by sysctl(2) or through a /proc
64845+ interface. It is strongly recommended that you say Y here. If the
64846+ sysctl option is enabled, a sysctl option with name
64847+ "chroot_deny_sysctl" is created.
64848+
64849+config GRKERNSEC_CHROOT_CAPS
64850+ bool "Capability restrictions"
64851+ default y if GRKERNSEC_CONFIG_AUTO
64852+ depends on GRKERNSEC_CHROOT
64853+ help
64854+ If you say Y here, the capabilities on all processes within a
64855+ chroot jail will be lowered to stop module insertion, raw i/o,
64856+ system and net admin tasks, rebooting the system, modifying immutable
64857+ files, modifying IPC owned by another, and changing the system time.
64858+ This is left an option because it can break some apps. Disable this
64859+ if your chrooted apps are having problems performing those kinds of
64860+ tasks. If the sysctl option is enabled, a sysctl option with
64861+ name "chroot_caps" is created.
64862+
64863+config GRKERNSEC_CHROOT_INITRD
64864+ bool "Exempt initrd tasks from restrictions"
64865+ default y if GRKERNSEC_CONFIG_AUTO
64866+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
64867+ help
64868+ If you say Y here, tasks started prior to init will be exempted from
64869+ grsecurity's chroot restrictions. This option is mainly meant to
64870+ resolve Plymouth's performing privileged operations unnecessarily
64871+ in a chroot.
64872+
64873+endmenu
64874+menu "Kernel Auditing"
64875+depends on GRKERNSEC
64876+
64877+config GRKERNSEC_AUDIT_GROUP
64878+ bool "Single group for auditing"
64879+ help
64880+ If you say Y here, the exec and chdir logging features will only operate
64881+ on a group you specify. This option is recommended if you only want to
64882+ watch certain users instead of having a large amount of logs from the
64883+ entire system. If the sysctl option is enabled, a sysctl option with
64884+ name "audit_group" is created.
64885+
64886+config GRKERNSEC_AUDIT_GID
64887+ int "GID for auditing"
64888+ depends on GRKERNSEC_AUDIT_GROUP
64889+ default 1007
64890+
64891+config GRKERNSEC_EXECLOG
64892+ bool "Exec logging"
64893+ help
64894+ If you say Y here, all execve() calls will be logged (since the
64895+ other exec*() calls are frontends to execve(), all execution
64896+ will be logged). Useful for shell-servers that like to keep track
64897+ of their users. If the sysctl option is enabled, a sysctl option with
64898+ name "exec_logging" is created.
64899+ WARNING: This option when enabled will produce a LOT of logs, especially
64900+ on an active system.
64901+
64902+config GRKERNSEC_RESLOG
64903+ bool "Resource logging"
64904+ default y if GRKERNSEC_CONFIG_AUTO
64905+ help
64906+ If you say Y here, all attempts to overstep resource limits will
64907+ be logged with the resource name, the requested size, and the current
64908+ limit. It is highly recommended that you say Y here. If the sysctl
64909+ option is enabled, a sysctl option with name "resource_logging" is
64910+ created. If the RBAC system is enabled, the sysctl value is ignored.
64911+
64912+config GRKERNSEC_CHROOT_EXECLOG
64913+ bool "Log execs within chroot"
64914+ help
64915+ If you say Y here, all executions inside a chroot jail will be logged
64916+ to syslog. This can cause a large amount of logs if certain
64917+ applications (eg. djb's daemontools) are installed on the system, and
64918+ is therefore left as an option. If the sysctl option is enabled, a
64919+ sysctl option with name "chroot_execlog" is created.
64920+
64921+config GRKERNSEC_AUDIT_PTRACE
64922+ bool "Ptrace logging"
64923+ help
64924+ If you say Y here, all attempts to attach to a process via ptrace
64925+ will be logged. If the sysctl option is enabled, a sysctl option
64926+ with name "audit_ptrace" is created.
64927+
64928+config GRKERNSEC_AUDIT_CHDIR
64929+ bool "Chdir logging"
64930+ help
64931+ If you say Y here, all chdir() calls will be logged. If the sysctl
64932+ option is enabled, a sysctl option with name "audit_chdir" is created.
64933+
64934+config GRKERNSEC_AUDIT_MOUNT
64935+ bool "(Un)Mount logging"
64936+ help
64937+ If you say Y here, all mounts and unmounts will be logged. If the
64938+ sysctl option is enabled, a sysctl option with name "audit_mount" is
64939+ created.
64940+
64941+config GRKERNSEC_SIGNAL
64942+ bool "Signal logging"
64943+ default y if GRKERNSEC_CONFIG_AUTO
64944+ help
64945+ If you say Y here, certain important signals will be logged, such as
64946+ SIGSEGV, which will as a result inform you of when a error in a program
64947+ occurred, which in some cases could mean a possible exploit attempt.
64948+ If the sysctl option is enabled, a sysctl option with name
64949+ "signal_logging" is created.
64950+
64951+config GRKERNSEC_FORKFAIL
64952+ bool "Fork failure logging"
64953+ help
64954+ If you say Y here, all failed fork() attempts will be logged.
64955+ This could suggest a fork bomb, or someone attempting to overstep
64956+ their process limit. If the sysctl option is enabled, a sysctl option
64957+ with name "forkfail_logging" is created.
64958+
64959+config GRKERNSEC_TIME
64960+ bool "Time change logging"
64961+ default y if GRKERNSEC_CONFIG_AUTO
64962+ help
64963+ If you say Y here, any changes of the system clock will be logged.
64964+ If the sysctl option is enabled, a sysctl option with name
64965+ "timechange_logging" is created.
64966+
64967+config GRKERNSEC_PROC_IPADDR
64968+ bool "/proc/<pid>/ipaddr support"
64969+ default y if GRKERNSEC_CONFIG_AUTO
64970+ help
64971+ If you say Y here, a new entry will be added to each /proc/<pid>
64972+ directory that contains the IP address of the person using the task.
64973+ The IP is carried across local TCP and AF_UNIX stream sockets.
64974+ This information can be useful for IDS/IPSes to perform remote response
64975+ to a local attack. The entry is readable by only the owner of the
64976+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
64977+ the RBAC system), and thus does not create privacy concerns.
64978+
64979+config GRKERNSEC_RWXMAP_LOG
64980+ bool 'Denied RWX mmap/mprotect logging'
64981+ default y if GRKERNSEC_CONFIG_AUTO
64982+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
64983+ help
64984+ If you say Y here, calls to mmap() and mprotect() with explicit
64985+ usage of PROT_WRITE and PROT_EXEC together will be logged when
64986+ denied by the PAX_MPROTECT feature. This feature will also
64987+ log other problematic scenarios that can occur when PAX_MPROTECT
64988+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
64989+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
64990+ is created.
64991+
64992+endmenu
64993+
64994+menu "Executable Protections"
64995+depends on GRKERNSEC
64996+
64997+config GRKERNSEC_DMESG
64998+ bool "Dmesg(8) restriction"
64999+ default y if GRKERNSEC_CONFIG_AUTO
65000+ help
65001+ If you say Y here, non-root users will not be able to use dmesg(8)
65002+ to view the contents of the kernel's circular log buffer.
65003+ The kernel's log buffer often contains kernel addresses and other
65004+ identifying information useful to an attacker in fingerprinting a
65005+ system for a targeted exploit.
65006+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
65007+ created.
65008+
65009+config GRKERNSEC_HARDEN_PTRACE
65010+ bool "Deter ptrace-based process snooping"
65011+ default y if GRKERNSEC_CONFIG_AUTO
65012+ help
65013+ If you say Y here, TTY sniffers and other malicious monitoring
65014+ programs implemented through ptrace will be defeated. If you
65015+ have been using the RBAC system, this option has already been
65016+ enabled for several years for all users, with the ability to make
65017+ fine-grained exceptions.
65018+
65019+ This option only affects the ability of non-root users to ptrace
65020+ processes that are not a descendent of the ptracing process.
65021+ This means that strace ./binary and gdb ./binary will still work,
65022+ but attaching to arbitrary processes will not. If the sysctl
65023+ option is enabled, a sysctl option with name "harden_ptrace" is
65024+ created.
65025+
65026+config GRKERNSEC_PTRACE_READEXEC
65027+ bool "Require read access to ptrace sensitive binaries"
65028+ default y if GRKERNSEC_CONFIG_AUTO
65029+ help
65030+ If you say Y here, unprivileged users will not be able to ptrace unreadable
65031+ binaries. This option is useful in environments that
65032+ remove the read bits (e.g. file mode 4711) from suid binaries to
65033+ prevent infoleaking of their contents. This option adds
65034+ consistency to the use of that file mode, as the binary could normally
65035+ be read out when run without privileges while ptracing.
65036+
65037+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
65038+ is created.
65039+
65040+config GRKERNSEC_SETXID
65041+ bool "Enforce consistent multithreaded privileges"
65042+ default y if GRKERNSEC_CONFIG_AUTO
65043+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
65044+ help
65045+ If you say Y here, a change from a root uid to a non-root uid
65046+ in a multithreaded application will cause the resulting uids,
65047+ gids, supplementary groups, and capabilities in that thread
65048+ to be propagated to the other threads of the process. In most
65049+ cases this is unnecessary, as glibc will emulate this behavior
65050+ on behalf of the application. Other libcs do not act in the
65051+ same way, allowing the other threads of the process to continue
65052+ running with root privileges. If the sysctl option is enabled,
65053+ a sysctl option with name "consistent_setxid" is created.
65054+
65055+config GRKERNSEC_HARDEN_IPC
65056+ bool "Disallow access to overly-permissive IPC objects"
65057+ default y if GRKERNSEC_CONFIG_AUTO
65058+ depends on SYSVIPC
65059+ help
65060+ If you say Y here, access to overly-permissive IPC objects (shared
65061+ memory, message queues, and semaphores) will be denied for processes
65062+ given the following criteria beyond normal permission checks:
65063+ 1) If the IPC object is world-accessible and the euid doesn't match
65064+ that of the creator or current uid for the IPC object
65065+ 2) If the IPC object is group-accessible and the egid doesn't
65066+ match that of the creator or current gid for the IPC object
65067+ It's a common error to grant too much permission to these objects,
65068+ with impact ranging from denial of service and information leaking to
65069+ privilege escalation. This feature was developed in response to
65070+ research by Tim Brown:
65071+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
65072+ who found hundreds of such insecure usages. Processes with
65073+ CAP_IPC_OWNER are still permitted to access these IPC objects.
65074+ If the sysctl option is enabled, a sysctl option with name
65075+ "harden_ipc" is created.
65076+
65077+config GRKERNSEC_TPE
65078+ bool "Trusted Path Execution (TPE)"
65079+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
65080+ help
65081+ If you say Y here, you will be able to choose a gid to add to the
65082+ supplementary groups of users you want to mark as "untrusted."
65083+ These users will not be able to execute any files that are not in
65084+ root-owned directories writable only by root. If the sysctl option
65085+ is enabled, a sysctl option with name "tpe" is created.
65086+
65087+config GRKERNSEC_TPE_ALL
65088+ bool "Partially restrict all non-root users"
65089+ depends on GRKERNSEC_TPE
65090+ help
65091+ If you say Y here, all non-root users will be covered under
65092+ a weaker TPE restriction. This is separate from, and in addition to,
65093+ the main TPE options that you have selected elsewhere. Thus, if a
65094+ "trusted" GID is chosen, this restriction applies to even that GID.
65095+ Under this restriction, all non-root users will only be allowed to
65096+ execute files in directories they own that are not group or
65097+ world-writable, or in directories owned by root and writable only by
65098+ root. If the sysctl option is enabled, a sysctl option with name
65099+ "tpe_restrict_all" is created.
65100+
65101+config GRKERNSEC_TPE_INVERT
65102+ bool "Invert GID option"
65103+ depends on GRKERNSEC_TPE
65104+ help
65105+ If you say Y here, the group you specify in the TPE configuration will
65106+ decide what group TPE restrictions will be *disabled* for. This
65107+ option is useful if you want TPE restrictions to be applied to most
65108+ users on the system. If the sysctl option is enabled, a sysctl option
65109+ with name "tpe_invert" is created. Unlike other sysctl options, this
65110+ entry will default to on for backward-compatibility.
65111+
65112+config GRKERNSEC_TPE_GID
65113+ int
65114+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
65115+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
65116+
65117+config GRKERNSEC_TPE_UNTRUSTED_GID
65118+ int "GID for TPE-untrusted users"
65119+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
65120+ default 1005
65121+ help
65122+ Setting this GID determines what group TPE restrictions will be
65123+ *enabled* for. If the sysctl option is enabled, a sysctl option
65124+ with name "tpe_gid" is created.
65125+
65126+config GRKERNSEC_TPE_TRUSTED_GID
65127+ int "GID for TPE-trusted users"
65128+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
65129+ default 1005
65130+ help
65131+ Setting this GID determines what group TPE restrictions will be
65132+ *disabled* for. If the sysctl option is enabled, a sysctl option
65133+ with name "tpe_gid" is created.
65134+
65135+endmenu
65136+menu "Network Protections"
65137+depends on GRKERNSEC
65138+
65139+config GRKERNSEC_RANDNET
65140+ bool "Larger entropy pools"
65141+ default y if GRKERNSEC_CONFIG_AUTO
65142+ help
65143+ If you say Y here, the entropy pools used for many features of Linux
65144+ and grsecurity will be doubled in size. Since several grsecurity
65145+ features use additional randomness, it is recommended that you say Y
65146+ here. Saying Y here has a similar effect as modifying
65147+ /proc/sys/kernel/random/poolsize.
65148+
65149+config GRKERNSEC_BLACKHOLE
65150+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
65151+ default y if GRKERNSEC_CONFIG_AUTO
65152+ depends on NET
65153+ help
65154+ If you say Y here, neither TCP resets nor ICMP
65155+ destination-unreachable packets will be sent in response to packets
65156+ sent to ports for which no associated listening process exists.
65157+ This feature supports both IPV4 and IPV6 and exempts the
65158+ loopback interface from blackholing. Enabling this feature
65159+ makes a host more resilient to DoS attacks and reduces network
65160+ visibility against scanners.
65161+
65162+ The blackhole feature as-implemented is equivalent to the FreeBSD
65163+ blackhole feature, as it prevents RST responses to all packets, not
65164+ just SYNs. Under most application behavior this causes no
65165+ problems, but applications (like haproxy) may not close certain
65166+ connections in a way that cleanly terminates them on the remote
65167+ end, leaving the remote host in LAST_ACK state. Because of this
65168+ side-effect and to prevent intentional LAST_ACK DoSes, this
65169+ feature also adds automatic mitigation against such attacks.
65170+ The mitigation drastically reduces the amount of time a socket
65171+ can spend in LAST_ACK state. If you're using haproxy and not
65172+ all servers it connects to have this option enabled, consider
65173+ disabling this feature on the haproxy host.
65174+
65175+ If the sysctl option is enabled, two sysctl options with names
65176+ "ip_blackhole" and "lastack_retries" will be created.
65177+ While "ip_blackhole" takes the standard zero/non-zero on/off
65178+ toggle, "lastack_retries" uses the same kinds of values as
65179+ "tcp_retries1" and "tcp_retries2". The default value of 4
65180+ prevents a socket from lasting more than 45 seconds in LAST_ACK
65181+ state.
65182+
65183+config GRKERNSEC_NO_SIMULT_CONNECT
65184+ bool "Disable TCP Simultaneous Connect"
65185+ default y if GRKERNSEC_CONFIG_AUTO
65186+ depends on NET
65187+ help
65188+ If you say Y here, a feature by Willy Tarreau will be enabled that
65189+ removes a weakness in Linux's strict implementation of TCP that
65190+ allows two clients to connect to each other without either entering
65191+ a listening state. The weakness allows an attacker to easily prevent
65192+ a client from connecting to a known server provided the source port
65193+ for the connection is guessed correctly.
65194+
65195+ As the weakness could be used to prevent an antivirus or IPS from
65196+ fetching updates, or prevent an SSL gateway from fetching a CRL,
65197+ it should be eliminated by enabling this option. Though Linux is
65198+ one of few operating systems supporting simultaneous connect, it
65199+ has no legitimate use in practice and is rarely supported by firewalls.
65200+
65201+config GRKERNSEC_SOCKET
65202+ bool "Socket restrictions"
65203+ depends on NET
65204+ help
65205+ If you say Y here, you will be able to choose from several options.
65206+ If you assign a GID on your system and add it to the supplementary
65207+ groups of users you want to restrict socket access to, this patch
65208+ will perform up to three things, based on the option(s) you choose.
65209+
65210+config GRKERNSEC_SOCKET_ALL
65211+ bool "Deny any sockets to group"
65212+ depends on GRKERNSEC_SOCKET
65213+ help
65214+ If you say Y here, you will be able to choose a GID of whose users will
65215+ be unable to connect to other hosts from your machine or run server
65216+ applications from your machine. If the sysctl option is enabled, a
65217+ sysctl option with name "socket_all" is created.
65218+
65219+config GRKERNSEC_SOCKET_ALL_GID
65220+ int "GID to deny all sockets for"
65221+ depends on GRKERNSEC_SOCKET_ALL
65222+ default 1004
65223+ help
65224+ Here you can choose the GID to disable socket access for. Remember to
65225+ add the users you want socket access disabled for to the GID
65226+ specified here. If the sysctl option is enabled, a sysctl option
65227+ with name "socket_all_gid" is created.
65228+
65229+config GRKERNSEC_SOCKET_CLIENT
65230+ bool "Deny client sockets to group"
65231+ depends on GRKERNSEC_SOCKET
65232+ help
65233+ If you say Y here, you will be able to choose a GID of whose users will
65234+ be unable to connect to other hosts from your machine, but will be
65235+ able to run servers. If this option is enabled, all users in the group
65236+ you specify will have to use passive mode when initiating ftp transfers
65237+ from the shell on your machine. If the sysctl option is enabled, a
65238+ sysctl option with name "socket_client" is created.
65239+
65240+config GRKERNSEC_SOCKET_CLIENT_GID
65241+ int "GID to deny client sockets for"
65242+ depends on GRKERNSEC_SOCKET_CLIENT
65243+ default 1003
65244+ help
65245+ Here you can choose the GID to disable client socket access for.
65246+ Remember to add the users you want client socket access disabled for to
65247+ the GID specified here. If the sysctl option is enabled, a sysctl
65248+ option with name "socket_client_gid" is created.
65249+
65250+config GRKERNSEC_SOCKET_SERVER
65251+ bool "Deny server sockets to group"
65252+ depends on GRKERNSEC_SOCKET
65253+ help
65254+ If you say Y here, you will be able to choose a GID of whose users will
65255+ be unable to run server applications from your machine. If the sysctl
65256+ option is enabled, a sysctl option with name "socket_server" is created.
65257+
65258+config GRKERNSEC_SOCKET_SERVER_GID
65259+ int "GID to deny server sockets for"
65260+ depends on GRKERNSEC_SOCKET_SERVER
65261+ default 1002
65262+ help
65263+ Here you can choose the GID to disable server socket access for.
65264+ Remember to add the users you want server socket access disabled for to
65265+ the GID specified here. If the sysctl option is enabled, a sysctl
65266+ option with name "socket_server_gid" is created.
65267+
65268+endmenu
65269+
65270+menu "Physical Protections"
65271+depends on GRKERNSEC
65272+
65273+config GRKERNSEC_DENYUSB
65274+ bool "Deny new USB connections after toggle"
65275+ default y if GRKERNSEC_CONFIG_AUTO
65276+ depends on SYSCTL && USB_SUPPORT
65277+ help
65278+ If you say Y here, a new sysctl option with name "deny_new_usb"
65279+ will be created. Setting its value to 1 will prevent any new
65280+ USB devices from being recognized by the OS. Any attempted USB
65281+ device insertion will be logged. This option is intended to be
65282+ used against custom USB devices designed to exploit vulnerabilities
65283+ in various USB device drivers.
65284+
65285+ For greatest effectiveness, this sysctl should be set after any
65286+ relevant init scripts. This option is safe to enable in distros
65287+ as each user can choose whether or not to toggle the sysctl.
65288+
65289+config GRKERNSEC_DENYUSB_FORCE
65290+ bool "Reject all USB devices not connected at boot"
65291+ select USB
65292+ depends on GRKERNSEC_DENYUSB
65293+ help
65294+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
65295+ that doesn't involve a sysctl entry. This option should only be
65296+ enabled if you're sure you want to deny all new USB connections
65297+ at runtime and don't want to modify init scripts. This should not
65298+ be enabled by distros. It forces the core USB code to be built
65299+ into the kernel image so that all devices connected at boot time
65300+ can be recognized and new USB device connections can be prevented
65301+ prior to init running.
65302+
65303+endmenu
65304+
65305+menu "Sysctl Support"
65306+depends on GRKERNSEC && SYSCTL
65307+
65308+config GRKERNSEC_SYSCTL
65309+ bool "Sysctl support"
65310+ default y if GRKERNSEC_CONFIG_AUTO
65311+ help
65312+ If you say Y here, you will be able to change the options that
65313+ grsecurity runs with at bootup, without having to recompile your
65314+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
65315+ to enable (1) or disable (0) various features. All the sysctl entries
65316+ are mutable until the "grsec_lock" entry is set to a non-zero value.
65317+ All features enabled in the kernel configuration are disabled at boot
65318+ if you do not say Y to the "Turn on features by default" option.
65319+ All options should be set at startup, and the grsec_lock entry should
65320+ be set to a non-zero value after all the options are set.
65321+ *THIS IS EXTREMELY IMPORTANT*
65322+
65323+config GRKERNSEC_SYSCTL_DISTRO
65324+ bool "Extra sysctl support for distro makers (READ HELP)"
65325+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
65326+ help
65327+ If you say Y here, additional sysctl options will be created
65328+ for features that affect processes running as root. Therefore,
65329+ it is critical when using this option that the grsec_lock entry be
65330+ enabled after boot. Only distros with prebuilt kernel packages
65331+ with this option enabled that can ensure grsec_lock is enabled
65332+ after boot should use this option.
65333+ *Failure to set grsec_lock after boot makes all grsec features
65334+ this option covers useless*
65335+
65336+ Currently this option creates the following sysctl entries:
65337+ "Disable Privileged I/O": "disable_priv_io"
65338+
65339+config GRKERNSEC_SYSCTL_ON
65340+ bool "Turn on features by default"
65341+ default y if GRKERNSEC_CONFIG_AUTO
65342+ depends on GRKERNSEC_SYSCTL
65343+ help
65344+ If you say Y here, instead of having all features enabled in the
65345+ kernel configuration disabled at boot time, the features will be
65346+ enabled at boot time. It is recommended you say Y here unless
65347+ there is some reason you would want all sysctl-tunable features to
65348+ be disabled by default. As mentioned elsewhere, it is important
65349+ to enable the grsec_lock entry once you have finished modifying
65350+ the sysctl entries.
65351+
65352+endmenu
65353+menu "Logging Options"
65354+depends on GRKERNSEC
65355+
65356+config GRKERNSEC_FLOODTIME
65357+ int "Seconds in between log messages (minimum)"
65358+ default 10
65359+ help
65360+ This option allows you to enforce the number of seconds between
65361+ grsecurity log messages. The default should be suitable for most
65362+ people, however, if you choose to change it, choose a value small enough
65363+ to allow informative logs to be produced, but large enough to
65364+ prevent flooding.
65365+
65366+config GRKERNSEC_FLOODBURST
65367+ int "Number of messages in a burst (maximum)"
65368+ default 6
65369+ help
65370+ This option allows you to choose the maximum number of messages allowed
65371+ within the flood time interval you chose in a separate option. The
65372+ default should be suitable for most people, however if you find that
65373+ many of your logs are being interpreted as flooding, you may want to
65374+ raise this value.
65375+
65376+endmenu
65377diff --git a/grsecurity/Makefile b/grsecurity/Makefile
65378new file mode 100644
65379index 0000000..5307c8a
65380--- /dev/null
65381+++ b/grsecurity/Makefile
65382@@ -0,0 +1,54 @@
65383+# grsecurity – access control and security hardening for Linux
65384+# All code in this directory and various hooks located throughout the Linux kernel are
65385+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
65386+# http://www.grsecurity.net spender@grsecurity.net
65387+#
65388+# This program is free software; you can redistribute it and/or
65389+# modify it under the terms of the GNU General Public License version 2
65390+# as published by the Free Software Foundation.
65391+#
65392+# This program is distributed in the hope that it will be useful,
65393+# but WITHOUT ANY WARRANTY; without even the implied warranty of
65394+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
65395+# GNU General Public License for more details.
65396+#
65397+# You should have received a copy of the GNU General Public License
65398+# along with this program; if not, write to the Free Software
65399+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
65400+
65401+KBUILD_CFLAGS += -Werror
65402+
65403+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
65404+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
65405+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
65406+ grsec_usb.o grsec_ipc.o
65407+
65408+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
65409+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
65410+ gracl_learn.o grsec_log.o gracl_policy.o
65411+ifdef CONFIG_COMPAT
65412+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
65413+endif
65414+
65415+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
65416+
65417+ifdef CONFIG_NET
65418+obj-y += grsec_sock.o
65419+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
65420+endif
65421+
65422+ifndef CONFIG_GRKERNSEC
65423+obj-y += grsec_disabled.o
65424+endif
65425+
65426+ifdef CONFIG_GRKERNSEC_HIDESYM
65427+extra-y := grsec_hidesym.o
65428+$(obj)/grsec_hidesym.o:
65429+ @-chmod -f 500 /boot
65430+ @-chmod -f 500 /lib/modules
65431+ @-chmod -f 500 /lib64/modules
65432+ @-chmod -f 500 /lib32/modules
65433+ @-chmod -f 700 .
65434+ @-chmod -f 700 $(objtree)
65435+ @echo ' grsec: protected kernel image paths'
65436+endif
65437diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
65438new file mode 100644
65439index 0000000..19a5b7c
65440--- /dev/null
65441+++ b/grsecurity/gracl.c
65442@@ -0,0 +1,2678 @@
65443+#include <linux/kernel.h>
65444+#include <linux/module.h>
65445+#include <linux/sched.h>
65446+#include <linux/mm.h>
65447+#include <linux/file.h>
65448+#include <linux/fs.h>
65449+#include <linux/namei.h>
65450+#include <linux/mount.h>
65451+#include <linux/tty.h>
65452+#include <linux/proc_fs.h>
65453+#include <linux/lglock.h>
65454+#include <linux/slab.h>
65455+#include <linux/vmalloc.h>
65456+#include <linux/types.h>
65457+#include <linux/sysctl.h>
65458+#include <linux/netdevice.h>
65459+#include <linux/ptrace.h>
65460+#include <linux/gracl.h>
65461+#include <linux/gralloc.h>
65462+#include <linux/security.h>
65463+#include <linux/grinternal.h>
65464+#include <linux/pid_namespace.h>
65465+#include <linux/stop_machine.h>
65466+#include <linux/fdtable.h>
65467+#include <linux/percpu.h>
65468+#include <linux/lglock.h>
65469+#include <linux/hugetlb.h>
65470+#include <linux/posix-timers.h>
65471+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
65472+#include <linux/magic.h>
65473+#include <linux/pagemap.h>
65474+#include "../fs/btrfs/async-thread.h"
65475+#include "../fs/btrfs/ctree.h"
65476+#include "../fs/btrfs/btrfs_inode.h"
65477+#endif
65478+#include "../fs/mount.h"
65479+
65480+#include <asm/uaccess.h>
65481+#include <asm/errno.h>
65482+#include <asm/mman.h>
65483+
65484+#define FOR_EACH_ROLE_START(role) \
65485+ role = running_polstate.role_list; \
65486+ while (role) {
65487+
65488+#define FOR_EACH_ROLE_END(role) \
65489+ role = role->prev; \
65490+ }
65491+
65492+extern struct path gr_real_root;
65493+
65494+static struct gr_policy_state running_polstate;
65495+struct gr_policy_state *polstate = &running_polstate;
65496+extern struct gr_alloc_state *current_alloc_state;
65497+
65498+extern char *gr_shared_page[4];
65499+DEFINE_RWLOCK(gr_inode_lock);
65500+
65501+static unsigned int gr_status __read_only = GR_STATUS_INIT;
65502+
65503+#ifdef CONFIG_NET
65504+extern struct vfsmount *sock_mnt;
65505+#endif
65506+
65507+extern struct vfsmount *pipe_mnt;
65508+extern struct vfsmount *shm_mnt;
65509+
65510+#ifdef CONFIG_HUGETLBFS
65511+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
65512+#endif
65513+
65514+extern u16 acl_sp_role_value;
65515+extern struct acl_object_label *fakefs_obj_rw;
65516+extern struct acl_object_label *fakefs_obj_rwx;
65517+
65518+int gr_acl_is_enabled(void)
65519+{
65520+ return (gr_status & GR_READY);
65521+}
65522+
65523+void gr_enable_rbac_system(void)
65524+{
65525+ pax_open_kernel();
65526+ gr_status |= GR_READY;
65527+ pax_close_kernel();
65528+}
65529+
65530+int gr_rbac_disable(void *unused)
65531+{
65532+ pax_open_kernel();
65533+ gr_status &= ~GR_READY;
65534+ pax_close_kernel();
65535+
65536+ return 0;
65537+}
65538+
65539+static inline dev_t __get_dev(const struct dentry *dentry)
65540+{
65541+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
65542+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
65543+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
65544+ else
65545+#endif
65546+ return dentry->d_sb->s_dev;
65547+}
65548+
65549+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
65550+{
65551+ return __get_dev(dentry);
65552+}
65553+
65554+static char gr_task_roletype_to_char(struct task_struct *task)
65555+{
65556+ switch (task->role->roletype &
65557+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
65558+ GR_ROLE_SPECIAL)) {
65559+ case GR_ROLE_DEFAULT:
65560+ return 'D';
65561+ case GR_ROLE_USER:
65562+ return 'U';
65563+ case GR_ROLE_GROUP:
65564+ return 'G';
65565+ case GR_ROLE_SPECIAL:
65566+ return 'S';
65567+ }
65568+
65569+ return 'X';
65570+}
65571+
65572+char gr_roletype_to_char(void)
65573+{
65574+ return gr_task_roletype_to_char(current);
65575+}
65576+
65577+__inline__ int
65578+gr_acl_tpe_check(void)
65579+{
65580+ if (unlikely(!(gr_status & GR_READY)))
65581+ return 0;
65582+ if (current->role->roletype & GR_ROLE_TPE)
65583+ return 1;
65584+ else
65585+ return 0;
65586+}
65587+
65588+int
65589+gr_handle_rawio(const struct inode *inode)
65590+{
65591+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65592+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
65593+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
65594+ !capable(CAP_SYS_RAWIO))
65595+ return 1;
65596+#endif
65597+ return 0;
65598+}
65599+
65600+int
65601+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
65602+{
65603+ if (likely(lena != lenb))
65604+ return 0;
65605+
65606+ return !memcmp(a, b, lena);
65607+}
65608+
65609+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
65610+{
65611+ *buflen -= namelen;
65612+ if (*buflen < 0)
65613+ return -ENAMETOOLONG;
65614+ *buffer -= namelen;
65615+ memcpy(*buffer, str, namelen);
65616+ return 0;
65617+}
65618+
65619+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
65620+{
65621+ return prepend(buffer, buflen, name->name, name->len);
65622+}
65623+
65624+static int prepend_path(const struct path *path, struct path *root,
65625+ char **buffer, int *buflen)
65626+{
65627+ struct dentry *dentry = path->dentry;
65628+ struct vfsmount *vfsmnt = path->mnt;
65629+ struct mount *mnt = real_mount(vfsmnt);
65630+ bool slash = false;
65631+ int error = 0;
65632+
65633+ while (dentry != root->dentry || vfsmnt != root->mnt) {
65634+ struct dentry * parent;
65635+
65636+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
65637+ /* Global root? */
65638+ if (!mnt_has_parent(mnt)) {
65639+ goto out;
65640+ }
65641+ dentry = mnt->mnt_mountpoint;
65642+ mnt = mnt->mnt_parent;
65643+ vfsmnt = &mnt->mnt;
65644+ continue;
65645+ }
65646+ parent = dentry->d_parent;
65647+ prefetch(parent);
65648+ spin_lock(&dentry->d_lock);
65649+ error = prepend_name(buffer, buflen, &dentry->d_name);
65650+ spin_unlock(&dentry->d_lock);
65651+ if (!error)
65652+ error = prepend(buffer, buflen, "/", 1);
65653+ if (error)
65654+ break;
65655+
65656+ slash = true;
65657+ dentry = parent;
65658+ }
65659+
65660+out:
65661+ if (!error && !slash)
65662+ error = prepend(buffer, buflen, "/", 1);
65663+
65664+ return error;
65665+}
65666+
65667+/* this must be called with mount_lock and rename_lock held */
65668+
65669+static char *__our_d_path(const struct path *path, struct path *root,
65670+ char *buf, int buflen)
65671+{
65672+ char *res = buf + buflen;
65673+ int error;
65674+
65675+ prepend(&res, &buflen, "\0", 1);
65676+ error = prepend_path(path, root, &res, &buflen);
65677+ if (error)
65678+ return ERR_PTR(error);
65679+
65680+ return res;
65681+}
65682+
65683+static char *
65684+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
65685+{
65686+ char *retval;
65687+
65688+ retval = __our_d_path(path, root, buf, buflen);
65689+ if (unlikely(IS_ERR(retval)))
65690+ retval = strcpy(buf, "<path too long>");
65691+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
65692+ retval[1] = '\0';
65693+
65694+ return retval;
65695+}
65696+
65697+static char *
65698+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
65699+ char *buf, int buflen)
65700+{
65701+ struct path path;
65702+ char *res;
65703+
65704+ path.dentry = (struct dentry *)dentry;
65705+ path.mnt = (struct vfsmount *)vfsmnt;
65706+
65707+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
65708+ by the RBAC system */
65709+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
65710+
65711+ return res;
65712+}
65713+
65714+static char *
65715+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
65716+ char *buf, int buflen)
65717+{
65718+ char *res;
65719+ struct path path;
65720+ struct path root;
65721+ struct task_struct *reaper = init_pid_ns.child_reaper;
65722+
65723+ path.dentry = (struct dentry *)dentry;
65724+ path.mnt = (struct vfsmount *)vfsmnt;
65725+
65726+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
65727+ get_fs_root(reaper->fs, &root);
65728+
65729+ read_seqlock_excl(&mount_lock);
65730+ write_seqlock(&rename_lock);
65731+ res = gen_full_path(&path, &root, buf, buflen);
65732+ write_sequnlock(&rename_lock);
65733+ read_sequnlock_excl(&mount_lock);
65734+
65735+ path_put(&root);
65736+ return res;
65737+}
65738+
65739+char *
65740+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
65741+{
65742+ char *ret;
65743+ read_seqlock_excl(&mount_lock);
65744+ write_seqlock(&rename_lock);
65745+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
65746+ PAGE_SIZE);
65747+ write_sequnlock(&rename_lock);
65748+ read_sequnlock_excl(&mount_lock);
65749+ return ret;
65750+}
65751+
65752+static char *
65753+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
65754+{
65755+ char *ret;
65756+ char *buf;
65757+ int buflen;
65758+
65759+ read_seqlock_excl(&mount_lock);
65760+ write_seqlock(&rename_lock);
65761+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
65762+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
65763+ buflen = (int)(ret - buf);
65764+ if (buflen >= 5)
65765+ prepend(&ret, &buflen, "/proc", 5);
65766+ else
65767+ ret = strcpy(buf, "<path too long>");
65768+ write_sequnlock(&rename_lock);
65769+ read_sequnlock_excl(&mount_lock);
65770+ return ret;
65771+}
65772+
65773+char *
65774+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
65775+{
65776+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
65777+ PAGE_SIZE);
65778+}
65779+
65780+char *
65781+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
65782+{
65783+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
65784+ PAGE_SIZE);
65785+}
65786+
65787+char *
65788+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
65789+{
65790+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
65791+ PAGE_SIZE);
65792+}
65793+
65794+char *
65795+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
65796+{
65797+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
65798+ PAGE_SIZE);
65799+}
65800+
65801+char *
65802+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
65803+{
65804+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
65805+ PAGE_SIZE);
65806+}
65807+
65808+__inline__ __u32
65809+to_gr_audit(const __u32 reqmode)
65810+{
65811+ /* masks off auditable permission flags, then shifts them to create
65812+ auditing flags, and adds the special case of append auditing if
65813+ we're requesting write */
65814+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
65815+}
65816+
65817+struct acl_role_label *
65818+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
65819+ const gid_t gid)
65820+{
65821+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
65822+ struct acl_role_label *match;
65823+ struct role_allowed_ip *ipp;
65824+ unsigned int x;
65825+ u32 curr_ip = task->signal->saved_ip;
65826+
65827+ match = state->acl_role_set.r_hash[index];
65828+
65829+ while (match) {
65830+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
65831+ for (x = 0; x < match->domain_child_num; x++) {
65832+ if (match->domain_children[x] == uid)
65833+ goto found;
65834+ }
65835+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
65836+ break;
65837+ match = match->next;
65838+ }
65839+found:
65840+ if (match == NULL) {
65841+ try_group:
65842+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
65843+ match = state->acl_role_set.r_hash[index];
65844+
65845+ while (match) {
65846+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
65847+ for (x = 0; x < match->domain_child_num; x++) {
65848+ if (match->domain_children[x] == gid)
65849+ goto found2;
65850+ }
65851+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
65852+ break;
65853+ match = match->next;
65854+ }
65855+found2:
65856+ if (match == NULL)
65857+ match = state->default_role;
65858+ if (match->allowed_ips == NULL)
65859+ return match;
65860+ else {
65861+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
65862+ if (likely
65863+ ((ntohl(curr_ip) & ipp->netmask) ==
65864+ (ntohl(ipp->addr) & ipp->netmask)))
65865+ return match;
65866+ }
65867+ match = state->default_role;
65868+ }
65869+ } else if (match->allowed_ips == NULL) {
65870+ return match;
65871+ } else {
65872+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
65873+ if (likely
65874+ ((ntohl(curr_ip) & ipp->netmask) ==
65875+ (ntohl(ipp->addr) & ipp->netmask)))
65876+ return match;
65877+ }
65878+ goto try_group;
65879+ }
65880+
65881+ return match;
65882+}
65883+
65884+static struct acl_role_label *
65885+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
65886+ const gid_t gid)
65887+{
65888+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
65889+}
65890+
65891+struct acl_subject_label *
65892+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
65893+ const struct acl_role_label *role)
65894+{
65895+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
65896+ struct acl_subject_label *match;
65897+
65898+ match = role->subj_hash[index];
65899+
65900+ while (match && (match->inode != ino || match->device != dev ||
65901+ (match->mode & GR_DELETED))) {
65902+ match = match->next;
65903+ }
65904+
65905+ if (match && !(match->mode & GR_DELETED))
65906+ return match;
65907+ else
65908+ return NULL;
65909+}
65910+
65911+struct acl_subject_label *
65912+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
65913+ const struct acl_role_label *role)
65914+{
65915+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
65916+ struct acl_subject_label *match;
65917+
65918+ match = role->subj_hash[index];
65919+
65920+ while (match && (match->inode != ino || match->device != dev ||
65921+ !(match->mode & GR_DELETED))) {
65922+ match = match->next;
65923+ }
65924+
65925+ if (match && (match->mode & GR_DELETED))
65926+ return match;
65927+ else
65928+ return NULL;
65929+}
65930+
65931+static struct acl_object_label *
65932+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
65933+ const struct acl_subject_label *subj)
65934+{
65935+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
65936+ struct acl_object_label *match;
65937+
65938+ match = subj->obj_hash[index];
65939+
65940+ while (match && (match->inode != ino || match->device != dev ||
65941+ (match->mode & GR_DELETED))) {
65942+ match = match->next;
65943+ }
65944+
65945+ if (match && !(match->mode & GR_DELETED))
65946+ return match;
65947+ else
65948+ return NULL;
65949+}
65950+
65951+static struct acl_object_label *
65952+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
65953+ const struct acl_subject_label *subj)
65954+{
65955+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
65956+ struct acl_object_label *match;
65957+
65958+ match = subj->obj_hash[index];
65959+
65960+ while (match && (match->inode != ino || match->device != dev ||
65961+ !(match->mode & GR_DELETED))) {
65962+ match = match->next;
65963+ }
65964+
65965+ if (match && (match->mode & GR_DELETED))
65966+ return match;
65967+
65968+ match = subj->obj_hash[index];
65969+
65970+ while (match && (match->inode != ino || match->device != dev ||
65971+ (match->mode & GR_DELETED))) {
65972+ match = match->next;
65973+ }
65974+
65975+ if (match && !(match->mode & GR_DELETED))
65976+ return match;
65977+ else
65978+ return NULL;
65979+}
65980+
65981+struct name_entry *
65982+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
65983+{
65984+ unsigned int len = strlen(name);
65985+ unsigned int key = full_name_hash(name, len);
65986+ unsigned int index = key % state->name_set.n_size;
65987+ struct name_entry *match;
65988+
65989+ match = state->name_set.n_hash[index];
65990+
65991+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
65992+ match = match->next;
65993+
65994+ return match;
65995+}
65996+
65997+static struct name_entry *
65998+lookup_name_entry(const char *name)
65999+{
66000+ return __lookup_name_entry(&running_polstate, name);
66001+}
66002+
66003+static struct name_entry *
66004+lookup_name_entry_create(const char *name)
66005+{
66006+ unsigned int len = strlen(name);
66007+ unsigned int key = full_name_hash(name, len);
66008+ unsigned int index = key % running_polstate.name_set.n_size;
66009+ struct name_entry *match;
66010+
66011+ match = running_polstate.name_set.n_hash[index];
66012+
66013+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66014+ !match->deleted))
66015+ match = match->next;
66016+
66017+ if (match && match->deleted)
66018+ return match;
66019+
66020+ match = running_polstate.name_set.n_hash[index];
66021+
66022+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66023+ match->deleted))
66024+ match = match->next;
66025+
66026+ if (match && !match->deleted)
66027+ return match;
66028+ else
66029+ return NULL;
66030+}
66031+
66032+static struct inodev_entry *
66033+lookup_inodev_entry(const ino_t ino, const dev_t dev)
66034+{
66035+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
66036+ struct inodev_entry *match;
66037+
66038+ match = running_polstate.inodev_set.i_hash[index];
66039+
66040+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
66041+ match = match->next;
66042+
66043+ return match;
66044+}
66045+
66046+void
66047+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
66048+{
66049+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
66050+ state->inodev_set.i_size);
66051+ struct inodev_entry **curr;
66052+
66053+ entry->prev = NULL;
66054+
66055+ curr = &state->inodev_set.i_hash[index];
66056+ if (*curr != NULL)
66057+ (*curr)->prev = entry;
66058+
66059+ entry->next = *curr;
66060+ *curr = entry;
66061+
66062+ return;
66063+}
66064+
66065+static void
66066+insert_inodev_entry(struct inodev_entry *entry)
66067+{
66068+ __insert_inodev_entry(&running_polstate, entry);
66069+}
66070+
66071+void
66072+insert_acl_obj_label(struct acl_object_label *obj,
66073+ struct acl_subject_label *subj)
66074+{
66075+ unsigned int index =
66076+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
66077+ struct acl_object_label **curr;
66078+
66079+ obj->prev = NULL;
66080+
66081+ curr = &subj->obj_hash[index];
66082+ if (*curr != NULL)
66083+ (*curr)->prev = obj;
66084+
66085+ obj->next = *curr;
66086+ *curr = obj;
66087+
66088+ return;
66089+}
66090+
66091+void
66092+insert_acl_subj_label(struct acl_subject_label *obj,
66093+ struct acl_role_label *role)
66094+{
66095+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
66096+ struct acl_subject_label **curr;
66097+
66098+ obj->prev = NULL;
66099+
66100+ curr = &role->subj_hash[index];
66101+ if (*curr != NULL)
66102+ (*curr)->prev = obj;
66103+
66104+ obj->next = *curr;
66105+ *curr = obj;
66106+
66107+ return;
66108+}
66109+
66110+/* derived from glibc fnmatch() 0: match, 1: no match*/
66111+
66112+static int
66113+glob_match(const char *p, const char *n)
66114+{
66115+ char c;
66116+
66117+ while ((c = *p++) != '\0') {
66118+ switch (c) {
66119+ case '?':
66120+ if (*n == '\0')
66121+ return 1;
66122+ else if (*n == '/')
66123+ return 1;
66124+ break;
66125+ case '\\':
66126+ if (*n != c)
66127+ return 1;
66128+ break;
66129+ case '*':
66130+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
66131+ if (*n == '/')
66132+ return 1;
66133+ else if (c == '?') {
66134+ if (*n == '\0')
66135+ return 1;
66136+ else
66137+ ++n;
66138+ }
66139+ }
66140+ if (c == '\0') {
66141+ return 0;
66142+ } else {
66143+ const char *endp;
66144+
66145+ if ((endp = strchr(n, '/')) == NULL)
66146+ endp = n + strlen(n);
66147+
66148+ if (c == '[') {
66149+ for (--p; n < endp; ++n)
66150+ if (!glob_match(p, n))
66151+ return 0;
66152+ } else if (c == '/') {
66153+ while (*n != '\0' && *n != '/')
66154+ ++n;
66155+ if (*n == '/' && !glob_match(p, n + 1))
66156+ return 0;
66157+ } else {
66158+ for (--p; n < endp; ++n)
66159+ if (*n == c && !glob_match(p, n))
66160+ return 0;
66161+ }
66162+
66163+ return 1;
66164+ }
66165+ case '[':
66166+ {
66167+ int not;
66168+ char cold;
66169+
66170+ if (*n == '\0' || *n == '/')
66171+ return 1;
66172+
66173+ not = (*p == '!' || *p == '^');
66174+ if (not)
66175+ ++p;
66176+
66177+ c = *p++;
66178+ for (;;) {
66179+ unsigned char fn = (unsigned char)*n;
66180+
66181+ if (c == '\0')
66182+ return 1;
66183+ else {
66184+ if (c == fn)
66185+ goto matched;
66186+ cold = c;
66187+ c = *p++;
66188+
66189+ if (c == '-' && *p != ']') {
66190+ unsigned char cend = *p++;
66191+
66192+ if (cend == '\0')
66193+ return 1;
66194+
66195+ if (cold <= fn && fn <= cend)
66196+ goto matched;
66197+
66198+ c = *p++;
66199+ }
66200+ }
66201+
66202+ if (c == ']')
66203+ break;
66204+ }
66205+ if (!not)
66206+ return 1;
66207+ break;
66208+ matched:
66209+ while (c != ']') {
66210+ if (c == '\0')
66211+ return 1;
66212+
66213+ c = *p++;
66214+ }
66215+ if (not)
66216+ return 1;
66217+ }
66218+ break;
66219+ default:
66220+ if (c != *n)
66221+ return 1;
66222+ }
66223+
66224+ ++n;
66225+ }
66226+
66227+ if (*n == '\0')
66228+ return 0;
66229+
66230+ if (*n == '/')
66231+ return 0;
66232+
66233+ return 1;
66234+}
66235+
66236+static struct acl_object_label *
66237+chk_glob_label(struct acl_object_label *globbed,
66238+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
66239+{
66240+ struct acl_object_label *tmp;
66241+
66242+ if (*path == NULL)
66243+ *path = gr_to_filename_nolock(dentry, mnt);
66244+
66245+ tmp = globbed;
66246+
66247+ while (tmp) {
66248+ if (!glob_match(tmp->filename, *path))
66249+ return tmp;
66250+ tmp = tmp->next;
66251+ }
66252+
66253+ return NULL;
66254+}
66255+
66256+static struct acl_object_label *
66257+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
66258+ const ino_t curr_ino, const dev_t curr_dev,
66259+ const struct acl_subject_label *subj, char **path, const int checkglob)
66260+{
66261+ struct acl_subject_label *tmpsubj;
66262+ struct acl_object_label *retval;
66263+ struct acl_object_label *retval2;
66264+
66265+ tmpsubj = (struct acl_subject_label *) subj;
66266+ read_lock(&gr_inode_lock);
66267+ do {
66268+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
66269+ if (retval) {
66270+ if (checkglob && retval->globbed) {
66271+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
66272+ if (retval2)
66273+ retval = retval2;
66274+ }
66275+ break;
66276+ }
66277+ } while ((tmpsubj = tmpsubj->parent_subject));
66278+ read_unlock(&gr_inode_lock);
66279+
66280+ return retval;
66281+}
66282+
66283+static __inline__ struct acl_object_label *
66284+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
66285+ struct dentry *curr_dentry,
66286+ const struct acl_subject_label *subj, char **path, const int checkglob)
66287+{
66288+ int newglob = checkglob;
66289+ ino_t inode;
66290+ dev_t device;
66291+
66292+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
66293+ as we don't want a / * rule to match instead of the / object
66294+ don't do this for create lookups that call this function though, since they're looking up
66295+ on the parent and thus need globbing checks on all paths
66296+ */
66297+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
66298+ newglob = GR_NO_GLOB;
66299+
66300+ spin_lock(&curr_dentry->d_lock);
66301+ inode = curr_dentry->d_inode->i_ino;
66302+ device = __get_dev(curr_dentry);
66303+ spin_unlock(&curr_dentry->d_lock);
66304+
66305+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
66306+}
66307+
66308+#ifdef CONFIG_HUGETLBFS
66309+static inline bool
66310+is_hugetlbfs_mnt(const struct vfsmount *mnt)
66311+{
66312+ int i;
66313+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
66314+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
66315+ return true;
66316+ }
66317+
66318+ return false;
66319+}
66320+#endif
66321+
66322+static struct acl_object_label *
66323+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66324+ const struct acl_subject_label *subj, char *path, const int checkglob)
66325+{
66326+ struct dentry *dentry = (struct dentry *) l_dentry;
66327+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
66328+ struct mount *real_mnt = real_mount(mnt);
66329+ struct acl_object_label *retval;
66330+ struct dentry *parent;
66331+
66332+ read_seqlock_excl(&mount_lock);
66333+ write_seqlock(&rename_lock);
66334+
66335+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
66336+#ifdef CONFIG_NET
66337+ mnt == sock_mnt ||
66338+#endif
66339+#ifdef CONFIG_HUGETLBFS
66340+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
66341+#endif
66342+ /* ignore Eric Biederman */
66343+ IS_PRIVATE(l_dentry->d_inode))) {
66344+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
66345+ goto out;
66346+ }
66347+
66348+ for (;;) {
66349+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
66350+ break;
66351+
66352+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
66353+ if (!mnt_has_parent(real_mnt))
66354+ break;
66355+
66356+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
66357+ if (retval != NULL)
66358+ goto out;
66359+
66360+ dentry = real_mnt->mnt_mountpoint;
66361+ real_mnt = real_mnt->mnt_parent;
66362+ mnt = &real_mnt->mnt;
66363+ continue;
66364+ }
66365+
66366+ parent = dentry->d_parent;
66367+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
66368+ if (retval != NULL)
66369+ goto out;
66370+
66371+ dentry = parent;
66372+ }
66373+
66374+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
66375+
66376+ /* gr_real_root is pinned so we don't have to hold a reference */
66377+ if (retval == NULL)
66378+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
66379+out:
66380+ write_sequnlock(&rename_lock);
66381+ read_sequnlock_excl(&mount_lock);
66382+
66383+ BUG_ON(retval == NULL);
66384+
66385+ return retval;
66386+}
66387+
66388+static __inline__ struct acl_object_label *
66389+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66390+ const struct acl_subject_label *subj)
66391+{
66392+ char *path = NULL;
66393+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
66394+}
66395+
66396+static __inline__ struct acl_object_label *
66397+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66398+ const struct acl_subject_label *subj)
66399+{
66400+ char *path = NULL;
66401+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
66402+}
66403+
66404+static __inline__ struct acl_object_label *
66405+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66406+ const struct acl_subject_label *subj, char *path)
66407+{
66408+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
66409+}
66410+
66411+struct acl_subject_label *
66412+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66413+ const struct acl_role_label *role)
66414+{
66415+ struct dentry *dentry = (struct dentry *) l_dentry;
66416+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
66417+ struct mount *real_mnt = real_mount(mnt);
66418+ struct acl_subject_label *retval;
66419+ struct dentry *parent;
66420+
66421+ read_seqlock_excl(&mount_lock);
66422+ write_seqlock(&rename_lock);
66423+
66424+ for (;;) {
66425+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
66426+ break;
66427+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
66428+ if (!mnt_has_parent(real_mnt))
66429+ break;
66430+
66431+ spin_lock(&dentry->d_lock);
66432+ read_lock(&gr_inode_lock);
66433+ retval =
66434+ lookup_acl_subj_label(dentry->d_inode->i_ino,
66435+ __get_dev(dentry), role);
66436+ read_unlock(&gr_inode_lock);
66437+ spin_unlock(&dentry->d_lock);
66438+ if (retval != NULL)
66439+ goto out;
66440+
66441+ dentry = real_mnt->mnt_mountpoint;
66442+ real_mnt = real_mnt->mnt_parent;
66443+ mnt = &real_mnt->mnt;
66444+ continue;
66445+ }
66446+
66447+ spin_lock(&dentry->d_lock);
66448+ read_lock(&gr_inode_lock);
66449+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
66450+ __get_dev(dentry), role);
66451+ read_unlock(&gr_inode_lock);
66452+ parent = dentry->d_parent;
66453+ spin_unlock(&dentry->d_lock);
66454+
66455+ if (retval != NULL)
66456+ goto out;
66457+
66458+ dentry = parent;
66459+ }
66460+
66461+ spin_lock(&dentry->d_lock);
66462+ read_lock(&gr_inode_lock);
66463+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
66464+ __get_dev(dentry), role);
66465+ read_unlock(&gr_inode_lock);
66466+ spin_unlock(&dentry->d_lock);
66467+
66468+ if (unlikely(retval == NULL)) {
66469+ /* gr_real_root is pinned, we don't need to hold a reference */
66470+ read_lock(&gr_inode_lock);
66471+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
66472+ __get_dev(gr_real_root.dentry), role);
66473+ read_unlock(&gr_inode_lock);
66474+ }
66475+out:
66476+ write_sequnlock(&rename_lock);
66477+ read_sequnlock_excl(&mount_lock);
66478+
66479+ BUG_ON(retval == NULL);
66480+
66481+ return retval;
66482+}
66483+
66484+void
66485+assign_special_role(const char *rolename)
66486+{
66487+ struct acl_object_label *obj;
66488+ struct acl_role_label *r;
66489+ struct acl_role_label *assigned = NULL;
66490+ struct task_struct *tsk;
66491+ struct file *filp;
66492+
66493+ FOR_EACH_ROLE_START(r)
66494+ if (!strcmp(rolename, r->rolename) &&
66495+ (r->roletype & GR_ROLE_SPECIAL)) {
66496+ assigned = r;
66497+ break;
66498+ }
66499+ FOR_EACH_ROLE_END(r)
66500+
66501+ if (!assigned)
66502+ return;
66503+
66504+ read_lock(&tasklist_lock);
66505+ read_lock(&grsec_exec_file_lock);
66506+
66507+ tsk = current->real_parent;
66508+ if (tsk == NULL)
66509+ goto out_unlock;
66510+
66511+ filp = tsk->exec_file;
66512+ if (filp == NULL)
66513+ goto out_unlock;
66514+
66515+ tsk->is_writable = 0;
66516+ tsk->inherited = 0;
66517+
66518+ tsk->acl_sp_role = 1;
66519+ tsk->acl_role_id = ++acl_sp_role_value;
66520+ tsk->role = assigned;
66521+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
66522+
66523+ /* ignore additional mmap checks for processes that are writable
66524+ by the default ACL */
66525+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
66526+ if (unlikely(obj->mode & GR_WRITE))
66527+ tsk->is_writable = 1;
66528+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
66529+ if (unlikely(obj->mode & GR_WRITE))
66530+ tsk->is_writable = 1;
66531+
66532+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
66533+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
66534+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
66535+#endif
66536+
66537+out_unlock:
66538+ read_unlock(&grsec_exec_file_lock);
66539+ read_unlock(&tasklist_lock);
66540+ return;
66541+}
66542+
66543+
66544+static void
66545+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
66546+{
66547+ struct task_struct *task = current;
66548+ const struct cred *cred = current_cred();
66549+
66550+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
66551+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
66552+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
66553+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
66554+
66555+ return;
66556+}
66557+
66558+static void
66559+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
66560+{
66561+ struct task_struct *task = current;
66562+ const struct cred *cred = current_cred();
66563+
66564+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
66565+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
66566+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
66567+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
66568+
66569+ return;
66570+}
66571+
66572+static void
66573+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
66574+{
66575+ struct task_struct *task = current;
66576+ const struct cred *cred = current_cred();
66577+
66578+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
66579+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
66580+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
66581+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
66582+
66583+ return;
66584+}
66585+
66586+static void
66587+gr_set_proc_res(struct task_struct *task)
66588+{
66589+ struct acl_subject_label *proc;
66590+ unsigned short i;
66591+
66592+ proc = task->acl;
66593+
66594+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
66595+ return;
66596+
66597+ for (i = 0; i < RLIM_NLIMITS; i++) {
66598+ if (!(proc->resmask & (1U << i)))
66599+ continue;
66600+
66601+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
66602+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
66603+
66604+ if (i == RLIMIT_CPU)
66605+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
66606+ }
66607+
66608+ return;
66609+}
66610+
66611+/* both of the below must be called with
66612+ rcu_read_lock();
66613+ read_lock(&tasklist_lock);
66614+ read_lock(&grsec_exec_file_lock);
66615+*/
66616+
66617+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
66618+{
66619+ char *tmpname;
66620+ struct acl_subject_label *tmpsubj;
66621+ struct file *filp;
66622+ struct name_entry *nmatch;
66623+
66624+ filp = task->exec_file;
66625+ if (filp == NULL)
66626+ return NULL;
66627+
66628+ /* the following is to apply the correct subject
66629+ on binaries running when the RBAC system
66630+ is enabled, when the binaries have been
66631+ replaced or deleted since their execution
66632+ -----
66633+ when the RBAC system starts, the inode/dev
66634+ from exec_file will be one the RBAC system
66635+ is unaware of. It only knows the inode/dev
66636+ of the present file on disk, or the absence
66637+ of it.
66638+ */
66639+
66640+ if (filename)
66641+ nmatch = __lookup_name_entry(state, filename);
66642+ else {
66643+ preempt_disable();
66644+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
66645+
66646+ nmatch = __lookup_name_entry(state, tmpname);
66647+ preempt_enable();
66648+ }
66649+ tmpsubj = NULL;
66650+ if (nmatch) {
66651+ if (nmatch->deleted)
66652+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
66653+ else
66654+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
66655+ }
66656+ /* this also works for the reload case -- if we don't match a potentially inherited subject
66657+ then we fall back to a normal lookup based on the binary's ino/dev
66658+ */
66659+ if (tmpsubj == NULL)
66660+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
66661+
66662+ return tmpsubj;
66663+}
66664+
66665+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
66666+{
66667+ return __gr_get_subject_for_task(&running_polstate, task, filename);
66668+}
66669+
66670+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
66671+{
66672+ struct acl_object_label *obj;
66673+ struct file *filp;
66674+
66675+ filp = task->exec_file;
66676+
66677+ task->acl = subj;
66678+ task->is_writable = 0;
66679+ /* ignore additional mmap checks for processes that are writable
66680+ by the default ACL */
66681+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
66682+ if (unlikely(obj->mode & GR_WRITE))
66683+ task->is_writable = 1;
66684+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
66685+ if (unlikely(obj->mode & GR_WRITE))
66686+ task->is_writable = 1;
66687+
66688+ gr_set_proc_res(task);
66689+
66690+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
66691+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
66692+#endif
66693+}
66694+
66695+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
66696+{
66697+ __gr_apply_subject_to_task(&running_polstate, task, subj);
66698+}
66699+
66700+__u32
66701+gr_search_file(const struct dentry * dentry, const __u32 mode,
66702+ const struct vfsmount * mnt)
66703+{
66704+ __u32 retval = mode;
66705+ struct acl_subject_label *curracl;
66706+ struct acl_object_label *currobj;
66707+
66708+ if (unlikely(!(gr_status & GR_READY)))
66709+ return (mode & ~GR_AUDITS);
66710+
66711+ curracl = current->acl;
66712+
66713+ currobj = chk_obj_label(dentry, mnt, curracl);
66714+ retval = currobj->mode & mode;
66715+
66716+ /* if we're opening a specified transfer file for writing
66717+ (e.g. /dev/initctl), then transfer our role to init
66718+ */
66719+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
66720+ current->role->roletype & GR_ROLE_PERSIST)) {
66721+ struct task_struct *task = init_pid_ns.child_reaper;
66722+
66723+ if (task->role != current->role) {
66724+ struct acl_subject_label *subj;
66725+
66726+ task->acl_sp_role = 0;
66727+ task->acl_role_id = current->acl_role_id;
66728+ task->role = current->role;
66729+ rcu_read_lock();
66730+ read_lock(&grsec_exec_file_lock);
66731+ subj = gr_get_subject_for_task(task, NULL);
66732+ gr_apply_subject_to_task(task, subj);
66733+ read_unlock(&grsec_exec_file_lock);
66734+ rcu_read_unlock();
66735+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
66736+ }
66737+ }
66738+
66739+ if (unlikely
66740+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
66741+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
66742+ __u32 new_mode = mode;
66743+
66744+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
66745+
66746+ retval = new_mode;
66747+
66748+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
66749+ new_mode |= GR_INHERIT;
66750+
66751+ if (!(mode & GR_NOLEARN))
66752+ gr_log_learn(dentry, mnt, new_mode);
66753+ }
66754+
66755+ return retval;
66756+}
66757+
66758+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
66759+ const struct dentry *parent,
66760+ const struct vfsmount *mnt)
66761+{
66762+ struct name_entry *match;
66763+ struct acl_object_label *matchpo;
66764+ struct acl_subject_label *curracl;
66765+ char *path;
66766+
66767+ if (unlikely(!(gr_status & GR_READY)))
66768+ return NULL;
66769+
66770+ preempt_disable();
66771+ path = gr_to_filename_rbac(new_dentry, mnt);
66772+ match = lookup_name_entry_create(path);
66773+
66774+ curracl = current->acl;
66775+
66776+ if (match) {
66777+ read_lock(&gr_inode_lock);
66778+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
66779+ read_unlock(&gr_inode_lock);
66780+
66781+ if (matchpo) {
66782+ preempt_enable();
66783+ return matchpo;
66784+ }
66785+ }
66786+
66787+ // lookup parent
66788+
66789+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
66790+
66791+ preempt_enable();
66792+ return matchpo;
66793+}
66794+
66795+__u32
66796+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
66797+ const struct vfsmount * mnt, const __u32 mode)
66798+{
66799+ struct acl_object_label *matchpo;
66800+ __u32 retval;
66801+
66802+ if (unlikely(!(gr_status & GR_READY)))
66803+ return (mode & ~GR_AUDITS);
66804+
66805+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
66806+
66807+ retval = matchpo->mode & mode;
66808+
66809+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
66810+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
66811+ __u32 new_mode = mode;
66812+
66813+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
66814+
66815+ gr_log_learn(new_dentry, mnt, new_mode);
66816+ return new_mode;
66817+ }
66818+
66819+ return retval;
66820+}
66821+
66822+__u32
66823+gr_check_link(const struct dentry * new_dentry,
66824+ const struct dentry * parent_dentry,
66825+ const struct vfsmount * parent_mnt,
66826+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
66827+{
66828+ struct acl_object_label *obj;
66829+ __u32 oldmode, newmode;
66830+ __u32 needmode;
66831+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
66832+ GR_DELETE | GR_INHERIT;
66833+
66834+ if (unlikely(!(gr_status & GR_READY)))
66835+ return (GR_CREATE | GR_LINK);
66836+
66837+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
66838+ oldmode = obj->mode;
66839+
66840+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
66841+ newmode = obj->mode;
66842+
66843+ needmode = newmode & checkmodes;
66844+
66845+ // old name for hardlink must have at least the permissions of the new name
66846+ if ((oldmode & needmode) != needmode)
66847+ goto bad;
66848+
66849+ // if old name had restrictions/auditing, make sure the new name does as well
66850+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
66851+
66852+ // don't allow hardlinking of suid/sgid/fcapped files without permission
66853+ if (is_privileged_binary(old_dentry))
66854+ needmode |= GR_SETID;
66855+
66856+ if ((newmode & needmode) != needmode)
66857+ goto bad;
66858+
66859+ // enforce minimum permissions
66860+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
66861+ return newmode;
66862+bad:
66863+ needmode = oldmode;
66864+ if (is_privileged_binary(old_dentry))
66865+ needmode |= GR_SETID;
66866+
66867+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
66868+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
66869+ return (GR_CREATE | GR_LINK);
66870+ } else if (newmode & GR_SUPPRESS)
66871+ return GR_SUPPRESS;
66872+ else
66873+ return 0;
66874+}
66875+
66876+int
66877+gr_check_hidden_task(const struct task_struct *task)
66878+{
66879+ if (unlikely(!(gr_status & GR_READY)))
66880+ return 0;
66881+
66882+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
66883+ return 1;
66884+
66885+ return 0;
66886+}
66887+
66888+int
66889+gr_check_protected_task(const struct task_struct *task)
66890+{
66891+ if (unlikely(!(gr_status & GR_READY) || !task))
66892+ return 0;
66893+
66894+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
66895+ task->acl != current->acl)
66896+ return 1;
66897+
66898+ return 0;
66899+}
66900+
66901+int
66902+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
66903+{
66904+ struct task_struct *p;
66905+ int ret = 0;
66906+
66907+ if (unlikely(!(gr_status & GR_READY) || !pid))
66908+ return ret;
66909+
66910+ read_lock(&tasklist_lock);
66911+ do_each_pid_task(pid, type, p) {
66912+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
66913+ p->acl != current->acl) {
66914+ ret = 1;
66915+ goto out;
66916+ }
66917+ } while_each_pid_task(pid, type, p);
66918+out:
66919+ read_unlock(&tasklist_lock);
66920+
66921+ return ret;
66922+}
66923+
66924+void
66925+gr_copy_label(struct task_struct *tsk)
66926+{
66927+ struct task_struct *p = current;
66928+
66929+ tsk->inherited = p->inherited;
66930+ tsk->acl_sp_role = 0;
66931+ tsk->acl_role_id = p->acl_role_id;
66932+ tsk->acl = p->acl;
66933+ tsk->role = p->role;
66934+ tsk->signal->used_accept = 0;
66935+ tsk->signal->curr_ip = p->signal->curr_ip;
66936+ tsk->signal->saved_ip = p->signal->saved_ip;
66937+ if (p->exec_file)
66938+ get_file(p->exec_file);
66939+ tsk->exec_file = p->exec_file;
66940+ tsk->is_writable = p->is_writable;
66941+ if (unlikely(p->signal->used_accept)) {
66942+ p->signal->curr_ip = 0;
66943+ p->signal->saved_ip = 0;
66944+ }
66945+
66946+ return;
66947+}
66948+
66949+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
66950+
66951+int
66952+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
66953+{
66954+ unsigned int i;
66955+ __u16 num;
66956+ uid_t *uidlist;
66957+ uid_t curuid;
66958+ int realok = 0;
66959+ int effectiveok = 0;
66960+ int fsok = 0;
66961+ uid_t globalreal, globaleffective, globalfs;
66962+
66963+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
66964+ struct user_struct *user;
66965+
66966+ if (!uid_valid(real))
66967+ goto skipit;
66968+
66969+ /* find user based on global namespace */
66970+
66971+ globalreal = GR_GLOBAL_UID(real);
66972+
66973+ user = find_user(make_kuid(&init_user_ns, globalreal));
66974+ if (user == NULL)
66975+ goto skipit;
66976+
66977+ if (gr_process_kernel_setuid_ban(user)) {
66978+ /* for find_user */
66979+ free_uid(user);
66980+ return 1;
66981+ }
66982+
66983+ /* for find_user */
66984+ free_uid(user);
66985+
66986+skipit:
66987+#endif
66988+
66989+ if (unlikely(!(gr_status & GR_READY)))
66990+ return 0;
66991+
66992+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
66993+ gr_log_learn_uid_change(real, effective, fs);
66994+
66995+ num = current->acl->user_trans_num;
66996+ uidlist = current->acl->user_transitions;
66997+
66998+ if (uidlist == NULL)
66999+ return 0;
67000+
67001+ if (!uid_valid(real)) {
67002+ realok = 1;
67003+ globalreal = (uid_t)-1;
67004+ } else {
67005+ globalreal = GR_GLOBAL_UID(real);
67006+ }
67007+ if (!uid_valid(effective)) {
67008+ effectiveok = 1;
67009+ globaleffective = (uid_t)-1;
67010+ } else {
67011+ globaleffective = GR_GLOBAL_UID(effective);
67012+ }
67013+ if (!uid_valid(fs)) {
67014+ fsok = 1;
67015+ globalfs = (uid_t)-1;
67016+ } else {
67017+ globalfs = GR_GLOBAL_UID(fs);
67018+ }
67019+
67020+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
67021+ for (i = 0; i < num; i++) {
67022+ curuid = uidlist[i];
67023+ if (globalreal == curuid)
67024+ realok = 1;
67025+ if (globaleffective == curuid)
67026+ effectiveok = 1;
67027+ if (globalfs == curuid)
67028+ fsok = 1;
67029+ }
67030+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
67031+ for (i = 0; i < num; i++) {
67032+ curuid = uidlist[i];
67033+ if (globalreal == curuid)
67034+ break;
67035+ if (globaleffective == curuid)
67036+ break;
67037+ if (globalfs == curuid)
67038+ break;
67039+ }
67040+ /* not in deny list */
67041+ if (i == num) {
67042+ realok = 1;
67043+ effectiveok = 1;
67044+ fsok = 1;
67045+ }
67046+ }
67047+
67048+ if (realok && effectiveok && fsok)
67049+ return 0;
67050+ else {
67051+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
67052+ return 1;
67053+ }
67054+}
67055+
67056+int
67057+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
67058+{
67059+ unsigned int i;
67060+ __u16 num;
67061+ gid_t *gidlist;
67062+ gid_t curgid;
67063+ int realok = 0;
67064+ int effectiveok = 0;
67065+ int fsok = 0;
67066+ gid_t globalreal, globaleffective, globalfs;
67067+
67068+ if (unlikely(!(gr_status & GR_READY)))
67069+ return 0;
67070+
67071+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
67072+ gr_log_learn_gid_change(real, effective, fs);
67073+
67074+ num = current->acl->group_trans_num;
67075+ gidlist = current->acl->group_transitions;
67076+
67077+ if (gidlist == NULL)
67078+ return 0;
67079+
67080+ if (!gid_valid(real)) {
67081+ realok = 1;
67082+ globalreal = (gid_t)-1;
67083+ } else {
67084+ globalreal = GR_GLOBAL_GID(real);
67085+ }
67086+ if (!gid_valid(effective)) {
67087+ effectiveok = 1;
67088+ globaleffective = (gid_t)-1;
67089+ } else {
67090+ globaleffective = GR_GLOBAL_GID(effective);
67091+ }
67092+ if (!gid_valid(fs)) {
67093+ fsok = 1;
67094+ globalfs = (gid_t)-1;
67095+ } else {
67096+ globalfs = GR_GLOBAL_GID(fs);
67097+ }
67098+
67099+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
67100+ for (i = 0; i < num; i++) {
67101+ curgid = gidlist[i];
67102+ if (globalreal == curgid)
67103+ realok = 1;
67104+ if (globaleffective == curgid)
67105+ effectiveok = 1;
67106+ if (globalfs == curgid)
67107+ fsok = 1;
67108+ }
67109+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
67110+ for (i = 0; i < num; i++) {
67111+ curgid = gidlist[i];
67112+ if (globalreal == curgid)
67113+ break;
67114+ if (globaleffective == curgid)
67115+ break;
67116+ if (globalfs == curgid)
67117+ break;
67118+ }
67119+ /* not in deny list */
67120+ if (i == num) {
67121+ realok = 1;
67122+ effectiveok = 1;
67123+ fsok = 1;
67124+ }
67125+ }
67126+
67127+ if (realok && effectiveok && fsok)
67128+ return 0;
67129+ else {
67130+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
67131+ return 1;
67132+ }
67133+}
67134+
67135+extern int gr_acl_is_capable(const int cap);
67136+
67137+void
67138+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
67139+{
67140+ struct acl_role_label *role = task->role;
67141+ struct acl_subject_label *subj = NULL;
67142+ struct acl_object_label *obj;
67143+ struct file *filp;
67144+ uid_t uid;
67145+ gid_t gid;
67146+
67147+ if (unlikely(!(gr_status & GR_READY)))
67148+ return;
67149+
67150+ uid = GR_GLOBAL_UID(kuid);
67151+ gid = GR_GLOBAL_GID(kgid);
67152+
67153+ filp = task->exec_file;
67154+
67155+ /* kernel process, we'll give them the kernel role */
67156+ if (unlikely(!filp)) {
67157+ task->role = running_polstate.kernel_role;
67158+ task->acl = running_polstate.kernel_role->root_label;
67159+ return;
67160+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
67161+ /* save the current ip at time of role lookup so that the proper
67162+ IP will be learned for role_allowed_ip */
67163+ task->signal->saved_ip = task->signal->curr_ip;
67164+ role = lookup_acl_role_label(task, uid, gid);
67165+ }
67166+
67167+ /* don't change the role if we're not a privileged process */
67168+ if (role && task->role != role &&
67169+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
67170+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
67171+ return;
67172+
67173+ /* perform subject lookup in possibly new role
67174+ we can use this result below in the case where role == task->role
67175+ */
67176+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
67177+
67178+ /* if we changed uid/gid, but result in the same role
67179+ and are using inheritance, don't lose the inherited subject
67180+ if current subject is other than what normal lookup
67181+ would result in, we arrived via inheritance, don't
67182+ lose subject
67183+ */
67184+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
67185+ (subj == task->acl)))
67186+ task->acl = subj;
67187+
67188+ /* leave task->inherited unaffected */
67189+
67190+ task->role = role;
67191+
67192+ task->is_writable = 0;
67193+
67194+ /* ignore additional mmap checks for processes that are writable
67195+ by the default ACL */
67196+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67197+ if (unlikely(obj->mode & GR_WRITE))
67198+ task->is_writable = 1;
67199+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
67200+ if (unlikely(obj->mode & GR_WRITE))
67201+ task->is_writable = 1;
67202+
67203+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67204+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67205+#endif
67206+
67207+ gr_set_proc_res(task);
67208+
67209+ return;
67210+}
67211+
67212+int
67213+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
67214+ const int unsafe_flags)
67215+{
67216+ struct task_struct *task = current;
67217+ struct acl_subject_label *newacl;
67218+ struct acl_object_label *obj;
67219+ __u32 retmode;
67220+
67221+ if (unlikely(!(gr_status & GR_READY)))
67222+ return 0;
67223+
67224+ newacl = chk_subj_label(dentry, mnt, task->role);
67225+
67226+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
67227+ did an exec
67228+ */
67229+ rcu_read_lock();
67230+ read_lock(&tasklist_lock);
67231+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
67232+ (task->parent->acl->mode & GR_POVERRIDE))) {
67233+ read_unlock(&tasklist_lock);
67234+ rcu_read_unlock();
67235+ goto skip_check;
67236+ }
67237+ read_unlock(&tasklist_lock);
67238+ rcu_read_unlock();
67239+
67240+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
67241+ !(task->role->roletype & GR_ROLE_GOD) &&
67242+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
67243+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
67244+ if (unsafe_flags & LSM_UNSAFE_SHARE)
67245+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
67246+ else
67247+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
67248+ return -EACCES;
67249+ }
67250+
67251+skip_check:
67252+
67253+ obj = chk_obj_label(dentry, mnt, task->acl);
67254+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
67255+
67256+ if (!(task->acl->mode & GR_INHERITLEARN) &&
67257+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
67258+ if (obj->nested)
67259+ task->acl = obj->nested;
67260+ else
67261+ task->acl = newacl;
67262+ task->inherited = 0;
67263+ } else {
67264+ task->inherited = 1;
67265+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
67266+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
67267+ }
67268+
67269+ task->is_writable = 0;
67270+
67271+ /* ignore additional mmap checks for processes that are writable
67272+ by the default ACL */
67273+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
67274+ if (unlikely(obj->mode & GR_WRITE))
67275+ task->is_writable = 1;
67276+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
67277+ if (unlikely(obj->mode & GR_WRITE))
67278+ task->is_writable = 1;
67279+
67280+ gr_set_proc_res(task);
67281+
67282+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67283+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67284+#endif
67285+ return 0;
67286+}
67287+
67288+/* always called with valid inodev ptr */
67289+static void
67290+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
67291+{
67292+ struct acl_object_label *matchpo;
67293+ struct acl_subject_label *matchps;
67294+ struct acl_subject_label *subj;
67295+ struct acl_role_label *role;
67296+ unsigned int x;
67297+
67298+ FOR_EACH_ROLE_START(role)
67299+ FOR_EACH_SUBJECT_START(role, subj, x)
67300+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
67301+ matchpo->mode |= GR_DELETED;
67302+ FOR_EACH_SUBJECT_END(subj,x)
67303+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
67304+ /* nested subjects aren't in the role's subj_hash table */
67305+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
67306+ matchpo->mode |= GR_DELETED;
67307+ FOR_EACH_NESTED_SUBJECT_END(subj)
67308+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
67309+ matchps->mode |= GR_DELETED;
67310+ FOR_EACH_ROLE_END(role)
67311+
67312+ inodev->nentry->deleted = 1;
67313+
67314+ return;
67315+}
67316+
67317+void
67318+gr_handle_delete(const ino_t ino, const dev_t dev)
67319+{
67320+ struct inodev_entry *inodev;
67321+
67322+ if (unlikely(!(gr_status & GR_READY)))
67323+ return;
67324+
67325+ write_lock(&gr_inode_lock);
67326+ inodev = lookup_inodev_entry(ino, dev);
67327+ if (inodev != NULL)
67328+ do_handle_delete(inodev, ino, dev);
67329+ write_unlock(&gr_inode_lock);
67330+
67331+ return;
67332+}
67333+
67334+static void
67335+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
67336+ const ino_t newinode, const dev_t newdevice,
67337+ struct acl_subject_label *subj)
67338+{
67339+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
67340+ struct acl_object_label *match;
67341+
67342+ match = subj->obj_hash[index];
67343+
67344+ while (match && (match->inode != oldinode ||
67345+ match->device != olddevice ||
67346+ !(match->mode & GR_DELETED)))
67347+ match = match->next;
67348+
67349+ if (match && (match->inode == oldinode)
67350+ && (match->device == olddevice)
67351+ && (match->mode & GR_DELETED)) {
67352+ if (match->prev == NULL) {
67353+ subj->obj_hash[index] = match->next;
67354+ if (match->next != NULL)
67355+ match->next->prev = NULL;
67356+ } else {
67357+ match->prev->next = match->next;
67358+ if (match->next != NULL)
67359+ match->next->prev = match->prev;
67360+ }
67361+ match->prev = NULL;
67362+ match->next = NULL;
67363+ match->inode = newinode;
67364+ match->device = newdevice;
67365+ match->mode &= ~GR_DELETED;
67366+
67367+ insert_acl_obj_label(match, subj);
67368+ }
67369+
67370+ return;
67371+}
67372+
67373+static void
67374+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
67375+ const ino_t newinode, const dev_t newdevice,
67376+ struct acl_role_label *role)
67377+{
67378+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
67379+ struct acl_subject_label *match;
67380+
67381+ match = role->subj_hash[index];
67382+
67383+ while (match && (match->inode != oldinode ||
67384+ match->device != olddevice ||
67385+ !(match->mode & GR_DELETED)))
67386+ match = match->next;
67387+
67388+ if (match && (match->inode == oldinode)
67389+ && (match->device == olddevice)
67390+ && (match->mode & GR_DELETED)) {
67391+ if (match->prev == NULL) {
67392+ role->subj_hash[index] = match->next;
67393+ if (match->next != NULL)
67394+ match->next->prev = NULL;
67395+ } else {
67396+ match->prev->next = match->next;
67397+ if (match->next != NULL)
67398+ match->next->prev = match->prev;
67399+ }
67400+ match->prev = NULL;
67401+ match->next = NULL;
67402+ match->inode = newinode;
67403+ match->device = newdevice;
67404+ match->mode &= ~GR_DELETED;
67405+
67406+ insert_acl_subj_label(match, role);
67407+ }
67408+
67409+ return;
67410+}
67411+
67412+static void
67413+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
67414+ const ino_t newinode, const dev_t newdevice)
67415+{
67416+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
67417+ struct inodev_entry *match;
67418+
67419+ match = running_polstate.inodev_set.i_hash[index];
67420+
67421+ while (match && (match->nentry->inode != oldinode ||
67422+ match->nentry->device != olddevice || !match->nentry->deleted))
67423+ match = match->next;
67424+
67425+ if (match && (match->nentry->inode == oldinode)
67426+ && (match->nentry->device == olddevice) &&
67427+ match->nentry->deleted) {
67428+ if (match->prev == NULL) {
67429+ running_polstate.inodev_set.i_hash[index] = match->next;
67430+ if (match->next != NULL)
67431+ match->next->prev = NULL;
67432+ } else {
67433+ match->prev->next = match->next;
67434+ if (match->next != NULL)
67435+ match->next->prev = match->prev;
67436+ }
67437+ match->prev = NULL;
67438+ match->next = NULL;
67439+ match->nentry->inode = newinode;
67440+ match->nentry->device = newdevice;
67441+ match->nentry->deleted = 0;
67442+
67443+ insert_inodev_entry(match);
67444+ }
67445+
67446+ return;
67447+}
67448+
67449+static void
67450+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
67451+{
67452+ struct acl_subject_label *subj;
67453+ struct acl_role_label *role;
67454+ unsigned int x;
67455+
67456+ FOR_EACH_ROLE_START(role)
67457+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
67458+
67459+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
67460+ if ((subj->inode == ino) && (subj->device == dev)) {
67461+ subj->inode = ino;
67462+ subj->device = dev;
67463+ }
67464+ /* nested subjects aren't in the role's subj_hash table */
67465+ update_acl_obj_label(matchn->inode, matchn->device,
67466+ ino, dev, subj);
67467+ FOR_EACH_NESTED_SUBJECT_END(subj)
67468+ FOR_EACH_SUBJECT_START(role, subj, x)
67469+ update_acl_obj_label(matchn->inode, matchn->device,
67470+ ino, dev, subj);
67471+ FOR_EACH_SUBJECT_END(subj,x)
67472+ FOR_EACH_ROLE_END(role)
67473+
67474+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
67475+
67476+ return;
67477+}
67478+
67479+static void
67480+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
67481+ const struct vfsmount *mnt)
67482+{
67483+ ino_t ino = dentry->d_inode->i_ino;
67484+ dev_t dev = __get_dev(dentry);
67485+
67486+ __do_handle_create(matchn, ino, dev);
67487+
67488+ return;
67489+}
67490+
67491+void
67492+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
67493+{
67494+ struct name_entry *matchn;
67495+
67496+ if (unlikely(!(gr_status & GR_READY)))
67497+ return;
67498+
67499+ preempt_disable();
67500+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
67501+
67502+ if (unlikely((unsigned long)matchn)) {
67503+ write_lock(&gr_inode_lock);
67504+ do_handle_create(matchn, dentry, mnt);
67505+ write_unlock(&gr_inode_lock);
67506+ }
67507+ preempt_enable();
67508+
67509+ return;
67510+}
67511+
67512+void
67513+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
67514+{
67515+ struct name_entry *matchn;
67516+
67517+ if (unlikely(!(gr_status & GR_READY)))
67518+ return;
67519+
67520+ preempt_disable();
67521+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
67522+
67523+ if (unlikely((unsigned long)matchn)) {
67524+ write_lock(&gr_inode_lock);
67525+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
67526+ write_unlock(&gr_inode_lock);
67527+ }
67528+ preempt_enable();
67529+
67530+ return;
67531+}
67532+
67533+void
67534+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67535+ struct dentry *old_dentry,
67536+ struct dentry *new_dentry,
67537+ struct vfsmount *mnt, const __u8 replace)
67538+{
67539+ struct name_entry *matchn;
67540+ struct inodev_entry *inodev;
67541+ struct inode *inode = new_dentry->d_inode;
67542+ ino_t old_ino = old_dentry->d_inode->i_ino;
67543+ dev_t old_dev = __get_dev(old_dentry);
67544+
67545+ /* vfs_rename swaps the name and parent link for old_dentry and
67546+ new_dentry
67547+ at this point, old_dentry has the new name, parent link, and inode
67548+ for the renamed file
67549+ if a file is being replaced by a rename, new_dentry has the inode
67550+ and name for the replaced file
67551+ */
67552+
67553+ if (unlikely(!(gr_status & GR_READY)))
67554+ return;
67555+
67556+ preempt_disable();
67557+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
67558+
67559+ /* we wouldn't have to check d_inode if it weren't for
67560+ NFS silly-renaming
67561+ */
67562+
67563+ write_lock(&gr_inode_lock);
67564+ if (unlikely(replace && inode)) {
67565+ ino_t new_ino = inode->i_ino;
67566+ dev_t new_dev = __get_dev(new_dentry);
67567+
67568+ inodev = lookup_inodev_entry(new_ino, new_dev);
67569+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
67570+ do_handle_delete(inodev, new_ino, new_dev);
67571+ }
67572+
67573+ inodev = lookup_inodev_entry(old_ino, old_dev);
67574+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
67575+ do_handle_delete(inodev, old_ino, old_dev);
67576+
67577+ if (unlikely((unsigned long)matchn))
67578+ do_handle_create(matchn, old_dentry, mnt);
67579+
67580+ write_unlock(&gr_inode_lock);
67581+ preempt_enable();
67582+
67583+ return;
67584+}
67585+
67586+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
67587+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
67588+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
67589+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
67590+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
67591+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
67592+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
67593+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
67594+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
67595+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
67596+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
67597+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
67598+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
67599+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
67600+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
67601+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
67602+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
67603+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
67604+};
67605+
67606+void
67607+gr_learn_resource(const struct task_struct *task,
67608+ const int res, const unsigned long wanted, const int gt)
67609+{
67610+ struct acl_subject_label *acl;
67611+ const struct cred *cred;
67612+
67613+ if (unlikely((gr_status & GR_READY) &&
67614+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
67615+ goto skip_reslog;
67616+
67617+ gr_log_resource(task, res, wanted, gt);
67618+skip_reslog:
67619+
67620+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
67621+ return;
67622+
67623+ acl = task->acl;
67624+
67625+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
67626+ !(acl->resmask & (1U << (unsigned short) res))))
67627+ return;
67628+
67629+ if (wanted >= acl->res[res].rlim_cur) {
67630+ unsigned long res_add;
67631+
67632+ res_add = wanted + res_learn_bumps[res];
67633+
67634+ acl->res[res].rlim_cur = res_add;
67635+
67636+ if (wanted > acl->res[res].rlim_max)
67637+ acl->res[res].rlim_max = res_add;
67638+
67639+ /* only log the subject filename, since resource logging is supported for
67640+ single-subject learning only */
67641+ rcu_read_lock();
67642+ cred = __task_cred(task);
67643+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
67644+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
67645+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
67646+ "", (unsigned long) res, &task->signal->saved_ip);
67647+ rcu_read_unlock();
67648+ }
67649+
67650+ return;
67651+}
67652+EXPORT_SYMBOL(gr_learn_resource);
67653+#endif
67654+
67655+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
67656+void
67657+pax_set_initial_flags(struct linux_binprm *bprm)
67658+{
67659+ struct task_struct *task = current;
67660+ struct acl_subject_label *proc;
67661+ unsigned long flags;
67662+
67663+ if (unlikely(!(gr_status & GR_READY)))
67664+ return;
67665+
67666+ flags = pax_get_flags(task);
67667+
67668+ proc = task->acl;
67669+
67670+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
67671+ flags &= ~MF_PAX_PAGEEXEC;
67672+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
67673+ flags &= ~MF_PAX_SEGMEXEC;
67674+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
67675+ flags &= ~MF_PAX_RANDMMAP;
67676+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
67677+ flags &= ~MF_PAX_EMUTRAMP;
67678+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
67679+ flags &= ~MF_PAX_MPROTECT;
67680+
67681+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
67682+ flags |= MF_PAX_PAGEEXEC;
67683+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
67684+ flags |= MF_PAX_SEGMEXEC;
67685+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
67686+ flags |= MF_PAX_RANDMMAP;
67687+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
67688+ flags |= MF_PAX_EMUTRAMP;
67689+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
67690+ flags |= MF_PAX_MPROTECT;
67691+
67692+ pax_set_flags(task, flags);
67693+
67694+ return;
67695+}
67696+#endif
67697+
67698+int
67699+gr_handle_proc_ptrace(struct task_struct *task)
67700+{
67701+ struct file *filp;
67702+ struct task_struct *tmp = task;
67703+ struct task_struct *curtemp = current;
67704+ __u32 retmode;
67705+
67706+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
67707+ if (unlikely(!(gr_status & GR_READY)))
67708+ return 0;
67709+#endif
67710+
67711+ read_lock(&tasklist_lock);
67712+ read_lock(&grsec_exec_file_lock);
67713+ filp = task->exec_file;
67714+
67715+ while (task_pid_nr(tmp) > 0) {
67716+ if (tmp == curtemp)
67717+ break;
67718+ tmp = tmp->real_parent;
67719+ }
67720+
67721+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
67722+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
67723+ read_unlock(&grsec_exec_file_lock);
67724+ read_unlock(&tasklist_lock);
67725+ return 1;
67726+ }
67727+
67728+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
67729+ if (!(gr_status & GR_READY)) {
67730+ read_unlock(&grsec_exec_file_lock);
67731+ read_unlock(&tasklist_lock);
67732+ return 0;
67733+ }
67734+#endif
67735+
67736+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
67737+ read_unlock(&grsec_exec_file_lock);
67738+ read_unlock(&tasklist_lock);
67739+
67740+ if (retmode & GR_NOPTRACE)
67741+ return 1;
67742+
67743+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
67744+ && (current->acl != task->acl || (current->acl != current->role->root_label
67745+ && task_pid_nr(current) != task_pid_nr(task))))
67746+ return 1;
67747+
67748+ return 0;
67749+}
67750+
67751+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
67752+{
67753+ if (unlikely(!(gr_status & GR_READY)))
67754+ return;
67755+
67756+ if (!(current->role->roletype & GR_ROLE_GOD))
67757+ return;
67758+
67759+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
67760+ p->role->rolename, gr_task_roletype_to_char(p),
67761+ p->acl->filename);
67762+}
67763+
67764+int
67765+gr_handle_ptrace(struct task_struct *task, const long request)
67766+{
67767+ struct task_struct *tmp = task;
67768+ struct task_struct *curtemp = current;
67769+ __u32 retmode;
67770+
67771+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
67772+ if (unlikely(!(gr_status & GR_READY)))
67773+ return 0;
67774+#endif
67775+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67776+ read_lock(&tasklist_lock);
67777+ while (task_pid_nr(tmp) > 0) {
67778+ if (tmp == curtemp)
67779+ break;
67780+ tmp = tmp->real_parent;
67781+ }
67782+
67783+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
67784+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
67785+ read_unlock(&tasklist_lock);
67786+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
67787+ return 1;
67788+ }
67789+ read_unlock(&tasklist_lock);
67790+ }
67791+
67792+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
67793+ if (!(gr_status & GR_READY))
67794+ return 0;
67795+#endif
67796+
67797+ read_lock(&grsec_exec_file_lock);
67798+ if (unlikely(!task->exec_file)) {
67799+ read_unlock(&grsec_exec_file_lock);
67800+ return 0;
67801+ }
67802+
67803+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
67804+ read_unlock(&grsec_exec_file_lock);
67805+
67806+ if (retmode & GR_NOPTRACE) {
67807+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
67808+ return 1;
67809+ }
67810+
67811+ if (retmode & GR_PTRACERD) {
67812+ switch (request) {
67813+ case PTRACE_SEIZE:
67814+ case PTRACE_POKETEXT:
67815+ case PTRACE_POKEDATA:
67816+ case PTRACE_POKEUSR:
67817+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
67818+ case PTRACE_SETREGS:
67819+ case PTRACE_SETFPREGS:
67820+#endif
67821+#ifdef CONFIG_X86
67822+ case PTRACE_SETFPXREGS:
67823+#endif
67824+#ifdef CONFIG_ALTIVEC
67825+ case PTRACE_SETVRREGS:
67826+#endif
67827+ return 1;
67828+ default:
67829+ return 0;
67830+ }
67831+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
67832+ !(current->role->roletype & GR_ROLE_GOD) &&
67833+ (current->acl != task->acl)) {
67834+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
67835+ return 1;
67836+ }
67837+
67838+ return 0;
67839+}
67840+
67841+static int is_writable_mmap(const struct file *filp)
67842+{
67843+ struct task_struct *task = current;
67844+ struct acl_object_label *obj, *obj2;
67845+
67846+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
67847+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
67848+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67849+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
67850+ task->role->root_label);
67851+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
67852+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
67853+ return 1;
67854+ }
67855+ }
67856+ return 0;
67857+}
67858+
67859+int
67860+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
67861+{
67862+ __u32 mode;
67863+
67864+ if (unlikely(!file || !(prot & PROT_EXEC)))
67865+ return 1;
67866+
67867+ if (is_writable_mmap(file))
67868+ return 0;
67869+
67870+ mode =
67871+ gr_search_file(file->f_path.dentry,
67872+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
67873+ file->f_path.mnt);
67874+
67875+ if (!gr_tpe_allow(file))
67876+ return 0;
67877+
67878+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
67879+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67880+ return 0;
67881+ } else if (unlikely(!(mode & GR_EXEC))) {
67882+ return 0;
67883+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
67884+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67885+ return 1;
67886+ }
67887+
67888+ return 1;
67889+}
67890+
67891+int
67892+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
67893+{
67894+ __u32 mode;
67895+
67896+ if (unlikely(!file || !(prot & PROT_EXEC)))
67897+ return 1;
67898+
67899+ if (is_writable_mmap(file))
67900+ return 0;
67901+
67902+ mode =
67903+ gr_search_file(file->f_path.dentry,
67904+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
67905+ file->f_path.mnt);
67906+
67907+ if (!gr_tpe_allow(file))
67908+ return 0;
67909+
67910+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
67911+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67912+ return 0;
67913+ } else if (unlikely(!(mode & GR_EXEC))) {
67914+ return 0;
67915+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
67916+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67917+ return 1;
67918+ }
67919+
67920+ return 1;
67921+}
67922+
67923+void
67924+gr_acl_handle_psacct(struct task_struct *task, const long code)
67925+{
67926+ unsigned long runtime, cputime;
67927+ cputime_t utime, stime;
67928+ unsigned int wday, cday;
67929+ __u8 whr, chr;
67930+ __u8 wmin, cmin;
67931+ __u8 wsec, csec;
67932+ struct timespec timeval;
67933+
67934+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
67935+ !(task->acl->mode & GR_PROCACCT)))
67936+ return;
67937+
67938+ do_posix_clock_monotonic_gettime(&timeval);
67939+ runtime = timeval.tv_sec - task->start_time.tv_sec;
67940+ wday = runtime / (60 * 60 * 24);
67941+ runtime -= wday * (60 * 60 * 24);
67942+ whr = runtime / (60 * 60);
67943+ runtime -= whr * (60 * 60);
67944+ wmin = runtime / 60;
67945+ runtime -= wmin * 60;
67946+ wsec = runtime;
67947+
67948+ task_cputime(task, &utime, &stime);
67949+ cputime = cputime_to_secs(utime + stime);
67950+ cday = cputime / (60 * 60 * 24);
67951+ cputime -= cday * (60 * 60 * 24);
67952+ chr = cputime / (60 * 60);
67953+ cputime -= chr * (60 * 60);
67954+ cmin = cputime / 60;
67955+ cputime -= cmin * 60;
67956+ csec = cputime;
67957+
67958+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
67959+
67960+ return;
67961+}
67962+
67963+#ifdef CONFIG_TASKSTATS
67964+int gr_is_taskstats_denied(int pid)
67965+{
67966+ struct task_struct *task;
67967+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67968+ const struct cred *cred;
67969+#endif
67970+ int ret = 0;
67971+
67972+ /* restrict taskstats viewing to un-chrooted root users
67973+ who have the 'view' subject flag if the RBAC system is enabled
67974+ */
67975+
67976+ rcu_read_lock();
67977+ read_lock(&tasklist_lock);
67978+ task = find_task_by_vpid(pid);
67979+ if (task) {
67980+#ifdef CONFIG_GRKERNSEC_CHROOT
67981+ if (proc_is_chrooted(task))
67982+ ret = -EACCES;
67983+#endif
67984+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67985+ cred = __task_cred(task);
67986+#ifdef CONFIG_GRKERNSEC_PROC_USER
67987+ if (gr_is_global_nonroot(cred->uid))
67988+ ret = -EACCES;
67989+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67990+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
67991+ ret = -EACCES;
67992+#endif
67993+#endif
67994+ if (gr_status & GR_READY) {
67995+ if (!(task->acl->mode & GR_VIEW))
67996+ ret = -EACCES;
67997+ }
67998+ } else
67999+ ret = -ENOENT;
68000+
68001+ read_unlock(&tasklist_lock);
68002+ rcu_read_unlock();
68003+
68004+ return ret;
68005+}
68006+#endif
68007+
68008+/* AUXV entries are filled via a descendant of search_binary_handler
68009+ after we've already applied the subject for the target
68010+*/
68011+int gr_acl_enable_at_secure(void)
68012+{
68013+ if (unlikely(!(gr_status & GR_READY)))
68014+ return 0;
68015+
68016+ if (current->acl->mode & GR_ATSECURE)
68017+ return 1;
68018+
68019+ return 0;
68020+}
68021+
68022+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
68023+{
68024+ struct task_struct *task = current;
68025+ struct dentry *dentry = file->f_path.dentry;
68026+ struct vfsmount *mnt = file->f_path.mnt;
68027+ struct acl_object_label *obj, *tmp;
68028+ struct acl_subject_label *subj;
68029+ unsigned int bufsize;
68030+ int is_not_root;
68031+ char *path;
68032+ dev_t dev = __get_dev(dentry);
68033+
68034+ if (unlikely(!(gr_status & GR_READY)))
68035+ return 1;
68036+
68037+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68038+ return 1;
68039+
68040+ /* ignore Eric Biederman */
68041+ if (IS_PRIVATE(dentry->d_inode))
68042+ return 1;
68043+
68044+ subj = task->acl;
68045+ read_lock(&gr_inode_lock);
68046+ do {
68047+ obj = lookup_acl_obj_label(ino, dev, subj);
68048+ if (obj != NULL) {
68049+ read_unlock(&gr_inode_lock);
68050+ return (obj->mode & GR_FIND) ? 1 : 0;
68051+ }
68052+ } while ((subj = subj->parent_subject));
68053+ read_unlock(&gr_inode_lock);
68054+
68055+ /* this is purely an optimization since we're looking for an object
68056+ for the directory we're doing a readdir on
68057+ if it's possible for any globbed object to match the entry we're
68058+ filling into the directory, then the object we find here will be
68059+ an anchor point with attached globbed objects
68060+ */
68061+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
68062+ if (obj->globbed == NULL)
68063+ return (obj->mode & GR_FIND) ? 1 : 0;
68064+
68065+ is_not_root = ((obj->filename[0] == '/') &&
68066+ (obj->filename[1] == '\0')) ? 0 : 1;
68067+ bufsize = PAGE_SIZE - namelen - is_not_root;
68068+
68069+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
68070+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
68071+ return 1;
68072+
68073+ preempt_disable();
68074+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
68075+ bufsize);
68076+
68077+ bufsize = strlen(path);
68078+
68079+ /* if base is "/", don't append an additional slash */
68080+ if (is_not_root)
68081+ *(path + bufsize) = '/';
68082+ memcpy(path + bufsize + is_not_root, name, namelen);
68083+ *(path + bufsize + namelen + is_not_root) = '\0';
68084+
68085+ tmp = obj->globbed;
68086+ while (tmp) {
68087+ if (!glob_match(tmp->filename, path)) {
68088+ preempt_enable();
68089+ return (tmp->mode & GR_FIND) ? 1 : 0;
68090+ }
68091+ tmp = tmp->next;
68092+ }
68093+ preempt_enable();
68094+ return (obj->mode & GR_FIND) ? 1 : 0;
68095+}
68096+
68097+void gr_put_exec_file(struct task_struct *task)
68098+{
68099+ struct file *filp;
68100+
68101+ write_lock(&grsec_exec_file_lock);
68102+ filp = task->exec_file;
68103+ task->exec_file = NULL;
68104+ write_unlock(&grsec_exec_file_lock);
68105+
68106+ if (filp)
68107+ fput(filp);
68108+
68109+ return;
68110+}
68111+
68112+
68113+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
68114+EXPORT_SYMBOL(gr_acl_is_enabled);
68115+#endif
68116+#ifdef CONFIG_SECURITY
68117+EXPORT_SYMBOL(gr_check_user_change);
68118+EXPORT_SYMBOL(gr_check_group_change);
68119+#endif
68120+
68121diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
68122new file mode 100644
68123index 0000000..18ffbbd
68124--- /dev/null
68125+++ b/grsecurity/gracl_alloc.c
68126@@ -0,0 +1,105 @@
68127+#include <linux/kernel.h>
68128+#include <linux/mm.h>
68129+#include <linux/slab.h>
68130+#include <linux/vmalloc.h>
68131+#include <linux/gracl.h>
68132+#include <linux/grsecurity.h>
68133+
68134+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
68135+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
68136+
68137+static __inline__ int
68138+alloc_pop(void)
68139+{
68140+ if (current_alloc_state->alloc_stack_next == 1)
68141+ return 0;
68142+
68143+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
68144+
68145+ current_alloc_state->alloc_stack_next--;
68146+
68147+ return 1;
68148+}
68149+
68150+static __inline__ int
68151+alloc_push(void *buf)
68152+{
68153+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
68154+ return 1;
68155+
68156+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
68157+
68158+ current_alloc_state->alloc_stack_next++;
68159+
68160+ return 0;
68161+}
68162+
68163+void *
68164+acl_alloc(unsigned long len)
68165+{
68166+ void *ret = NULL;
68167+
68168+ if (!len || len > PAGE_SIZE)
68169+ goto out;
68170+
68171+ ret = kmalloc(len, GFP_KERNEL);
68172+
68173+ if (ret) {
68174+ if (alloc_push(ret)) {
68175+ kfree(ret);
68176+ ret = NULL;
68177+ }
68178+ }
68179+
68180+out:
68181+ return ret;
68182+}
68183+
68184+void *
68185+acl_alloc_num(unsigned long num, unsigned long len)
68186+{
68187+ if (!len || (num > (PAGE_SIZE / len)))
68188+ return NULL;
68189+
68190+ return acl_alloc(num * len);
68191+}
68192+
68193+void
68194+acl_free_all(void)
68195+{
68196+ if (!current_alloc_state->alloc_stack)
68197+ return;
68198+
68199+ while (alloc_pop()) ;
68200+
68201+ if (current_alloc_state->alloc_stack) {
68202+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
68203+ kfree(current_alloc_state->alloc_stack);
68204+ else
68205+ vfree(current_alloc_state->alloc_stack);
68206+ }
68207+
68208+ current_alloc_state->alloc_stack = NULL;
68209+ current_alloc_state->alloc_stack_size = 1;
68210+ current_alloc_state->alloc_stack_next = 1;
68211+
68212+ return;
68213+}
68214+
68215+int
68216+acl_alloc_stack_init(unsigned long size)
68217+{
68218+ if ((size * sizeof (void *)) <= PAGE_SIZE)
68219+ current_alloc_state->alloc_stack =
68220+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
68221+ else
68222+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
68223+
68224+ current_alloc_state->alloc_stack_size = size;
68225+ current_alloc_state->alloc_stack_next = 1;
68226+
68227+ if (!current_alloc_state->alloc_stack)
68228+ return 0;
68229+ else
68230+ return 1;
68231+}
68232diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
68233new file mode 100644
68234index 0000000..bdd51ea
68235--- /dev/null
68236+++ b/grsecurity/gracl_cap.c
68237@@ -0,0 +1,110 @@
68238+#include <linux/kernel.h>
68239+#include <linux/module.h>
68240+#include <linux/sched.h>
68241+#include <linux/gracl.h>
68242+#include <linux/grsecurity.h>
68243+#include <linux/grinternal.h>
68244+
68245+extern const char *captab_log[];
68246+extern int captab_log_entries;
68247+
68248+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
68249+{
68250+ struct acl_subject_label *curracl;
68251+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
68252+ kernel_cap_t cap_audit = __cap_empty_set;
68253+
68254+ if (!gr_acl_is_enabled())
68255+ return 1;
68256+
68257+ curracl = task->acl;
68258+
68259+ cap_drop = curracl->cap_lower;
68260+ cap_mask = curracl->cap_mask;
68261+ cap_audit = curracl->cap_invert_audit;
68262+
68263+ while ((curracl = curracl->parent_subject)) {
68264+ /* if the cap isn't specified in the current computed mask but is specified in the
68265+ current level subject, and is lowered in the current level subject, then add
68266+ it to the set of dropped capabilities
68267+ otherwise, add the current level subject's mask to the current computed mask
68268+ */
68269+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
68270+ cap_raise(cap_mask, cap);
68271+ if (cap_raised(curracl->cap_lower, cap))
68272+ cap_raise(cap_drop, cap);
68273+ if (cap_raised(curracl->cap_invert_audit, cap))
68274+ cap_raise(cap_audit, cap);
68275+ }
68276+ }
68277+
68278+ if (!cap_raised(cap_drop, cap)) {
68279+ if (cap_raised(cap_audit, cap))
68280+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
68281+ return 1;
68282+ }
68283+
68284+ curracl = task->acl;
68285+
68286+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
68287+ && cap_raised(cred->cap_effective, cap)) {
68288+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
68289+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
68290+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
68291+ gr_to_filename(task->exec_file->f_path.dentry,
68292+ task->exec_file->f_path.mnt) : curracl->filename,
68293+ curracl->filename, 0UL,
68294+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
68295+ return 1;
68296+ }
68297+
68298+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
68299+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
68300+
68301+ return 0;
68302+}
68303+
68304+int
68305+gr_acl_is_capable(const int cap)
68306+{
68307+ return gr_task_acl_is_capable(current, current_cred(), cap);
68308+}
68309+
68310+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
68311+{
68312+ struct acl_subject_label *curracl;
68313+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
68314+
68315+ if (!gr_acl_is_enabled())
68316+ return 1;
68317+
68318+ curracl = task->acl;
68319+
68320+ cap_drop = curracl->cap_lower;
68321+ cap_mask = curracl->cap_mask;
68322+
68323+ while ((curracl = curracl->parent_subject)) {
68324+ /* if the cap isn't specified in the current computed mask but is specified in the
68325+ current level subject, and is lowered in the current level subject, then add
68326+ it to the set of dropped capabilities
68327+ otherwise, add the current level subject's mask to the current computed mask
68328+ */
68329+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
68330+ cap_raise(cap_mask, cap);
68331+ if (cap_raised(curracl->cap_lower, cap))
68332+ cap_raise(cap_drop, cap);
68333+ }
68334+ }
68335+
68336+ if (!cap_raised(cap_drop, cap))
68337+ return 1;
68338+
68339+ return 0;
68340+}
68341+
68342+int
68343+gr_acl_is_capable_nolog(const int cap)
68344+{
68345+ return gr_task_acl_is_capable_nolog(current, cap);
68346+}
68347+
68348diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
68349new file mode 100644
68350index 0000000..ca25605
68351--- /dev/null
68352+++ b/grsecurity/gracl_compat.c
68353@@ -0,0 +1,270 @@
68354+#include <linux/kernel.h>
68355+#include <linux/gracl.h>
68356+#include <linux/compat.h>
68357+#include <linux/gracl_compat.h>
68358+
68359+#include <asm/uaccess.h>
68360+
68361+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
68362+{
68363+ struct gr_arg_wrapper_compat uwrapcompat;
68364+
68365+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
68366+ return -EFAULT;
68367+
68368+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
68369+ (uwrapcompat.version != 0x2901)) ||
68370+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
68371+ return -EINVAL;
68372+
68373+ uwrap->arg = compat_ptr(uwrapcompat.arg);
68374+ uwrap->version = uwrapcompat.version;
68375+ uwrap->size = sizeof(struct gr_arg);
68376+
68377+ return 0;
68378+}
68379+
68380+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
68381+{
68382+ struct gr_arg_compat argcompat;
68383+
68384+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
68385+ return -EFAULT;
68386+
68387+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
68388+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
68389+ arg->role_db.num_roles = argcompat.role_db.num_roles;
68390+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
68391+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
68392+ arg->role_db.num_objects = argcompat.role_db.num_objects;
68393+
68394+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
68395+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
68396+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
68397+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
68398+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
68399+ arg->segv_device = argcompat.segv_device;
68400+ arg->segv_inode = argcompat.segv_inode;
68401+ arg->segv_uid = argcompat.segv_uid;
68402+ arg->num_sprole_pws = argcompat.num_sprole_pws;
68403+ arg->mode = argcompat.mode;
68404+
68405+ return 0;
68406+}
68407+
68408+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
68409+{
68410+ struct acl_object_label_compat objcompat;
68411+
68412+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
68413+ return -EFAULT;
68414+
68415+ obj->filename = compat_ptr(objcompat.filename);
68416+ obj->inode = objcompat.inode;
68417+ obj->device = objcompat.device;
68418+ obj->mode = objcompat.mode;
68419+
68420+ obj->nested = compat_ptr(objcompat.nested);
68421+ obj->globbed = compat_ptr(objcompat.globbed);
68422+
68423+ obj->prev = compat_ptr(objcompat.prev);
68424+ obj->next = compat_ptr(objcompat.next);
68425+
68426+ return 0;
68427+}
68428+
68429+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
68430+{
68431+ unsigned int i;
68432+ struct acl_subject_label_compat subjcompat;
68433+
68434+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
68435+ return -EFAULT;
68436+
68437+ subj->filename = compat_ptr(subjcompat.filename);
68438+ subj->inode = subjcompat.inode;
68439+ subj->device = subjcompat.device;
68440+ subj->mode = subjcompat.mode;
68441+ subj->cap_mask = subjcompat.cap_mask;
68442+ subj->cap_lower = subjcompat.cap_lower;
68443+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
68444+
68445+ for (i = 0; i < GR_NLIMITS; i++) {
68446+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
68447+ subj->res[i].rlim_cur = RLIM_INFINITY;
68448+ else
68449+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
68450+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
68451+ subj->res[i].rlim_max = RLIM_INFINITY;
68452+ else
68453+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
68454+ }
68455+ subj->resmask = subjcompat.resmask;
68456+
68457+ subj->user_trans_type = subjcompat.user_trans_type;
68458+ subj->group_trans_type = subjcompat.group_trans_type;
68459+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
68460+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
68461+ subj->user_trans_num = subjcompat.user_trans_num;
68462+ subj->group_trans_num = subjcompat.group_trans_num;
68463+
68464+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
68465+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
68466+ subj->ip_type = subjcompat.ip_type;
68467+ subj->ips = compat_ptr(subjcompat.ips);
68468+ subj->ip_num = subjcompat.ip_num;
68469+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
68470+
68471+ subj->crashes = subjcompat.crashes;
68472+ subj->expires = subjcompat.expires;
68473+
68474+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
68475+ subj->hash = compat_ptr(subjcompat.hash);
68476+ subj->prev = compat_ptr(subjcompat.prev);
68477+ subj->next = compat_ptr(subjcompat.next);
68478+
68479+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
68480+ subj->obj_hash_size = subjcompat.obj_hash_size;
68481+ subj->pax_flags = subjcompat.pax_flags;
68482+
68483+ return 0;
68484+}
68485+
68486+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
68487+{
68488+ struct acl_role_label_compat rolecompat;
68489+
68490+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
68491+ return -EFAULT;
68492+
68493+ role->rolename = compat_ptr(rolecompat.rolename);
68494+ role->uidgid = rolecompat.uidgid;
68495+ role->roletype = rolecompat.roletype;
68496+
68497+ role->auth_attempts = rolecompat.auth_attempts;
68498+ role->expires = rolecompat.expires;
68499+
68500+ role->root_label = compat_ptr(rolecompat.root_label);
68501+ role->hash = compat_ptr(rolecompat.hash);
68502+
68503+ role->prev = compat_ptr(rolecompat.prev);
68504+ role->next = compat_ptr(rolecompat.next);
68505+
68506+ role->transitions = compat_ptr(rolecompat.transitions);
68507+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
68508+ role->domain_children = compat_ptr(rolecompat.domain_children);
68509+ role->domain_child_num = rolecompat.domain_child_num;
68510+
68511+ role->umask = rolecompat.umask;
68512+
68513+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
68514+ role->subj_hash_size = rolecompat.subj_hash_size;
68515+
68516+ return 0;
68517+}
68518+
68519+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
68520+{
68521+ struct role_allowed_ip_compat roleip_compat;
68522+
68523+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
68524+ return -EFAULT;
68525+
68526+ roleip->addr = roleip_compat.addr;
68527+ roleip->netmask = roleip_compat.netmask;
68528+
68529+ roleip->prev = compat_ptr(roleip_compat.prev);
68530+ roleip->next = compat_ptr(roleip_compat.next);
68531+
68532+ return 0;
68533+}
68534+
68535+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
68536+{
68537+ struct role_transition_compat trans_compat;
68538+
68539+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
68540+ return -EFAULT;
68541+
68542+ trans->rolename = compat_ptr(trans_compat.rolename);
68543+
68544+ trans->prev = compat_ptr(trans_compat.prev);
68545+ trans->next = compat_ptr(trans_compat.next);
68546+
68547+ return 0;
68548+
68549+}
68550+
68551+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
68552+{
68553+ struct gr_hash_struct_compat hash_compat;
68554+
68555+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
68556+ return -EFAULT;
68557+
68558+ hash->table = compat_ptr(hash_compat.table);
68559+ hash->nametable = compat_ptr(hash_compat.nametable);
68560+ hash->first = compat_ptr(hash_compat.first);
68561+
68562+ hash->table_size = hash_compat.table_size;
68563+ hash->used_size = hash_compat.used_size;
68564+
68565+ hash->type = hash_compat.type;
68566+
68567+ return 0;
68568+}
68569+
68570+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
68571+{
68572+ compat_uptr_t ptrcompat;
68573+
68574+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
68575+ return -EFAULT;
68576+
68577+ *(void **)ptr = compat_ptr(ptrcompat);
68578+
68579+ return 0;
68580+}
68581+
68582+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
68583+{
68584+ struct acl_ip_label_compat ip_compat;
68585+
68586+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
68587+ return -EFAULT;
68588+
68589+ ip->iface = compat_ptr(ip_compat.iface);
68590+ ip->addr = ip_compat.addr;
68591+ ip->netmask = ip_compat.netmask;
68592+ ip->low = ip_compat.low;
68593+ ip->high = ip_compat.high;
68594+ ip->mode = ip_compat.mode;
68595+ ip->type = ip_compat.type;
68596+
68597+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
68598+
68599+ ip->prev = compat_ptr(ip_compat.prev);
68600+ ip->next = compat_ptr(ip_compat.next);
68601+
68602+ return 0;
68603+}
68604+
68605+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
68606+{
68607+ struct sprole_pw_compat pw_compat;
68608+
68609+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
68610+ return -EFAULT;
68611+
68612+ pw->rolename = compat_ptr(pw_compat.rolename);
68613+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
68614+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
68615+
68616+ return 0;
68617+}
68618+
68619+size_t get_gr_arg_wrapper_size_compat(void)
68620+{
68621+ return sizeof(struct gr_arg_wrapper_compat);
68622+}
68623+
68624diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
68625new file mode 100644
68626index 0000000..a89b1f4
68627--- /dev/null
68628+++ b/grsecurity/gracl_fs.c
68629@@ -0,0 +1,437 @@
68630+#include <linux/kernel.h>
68631+#include <linux/sched.h>
68632+#include <linux/types.h>
68633+#include <linux/fs.h>
68634+#include <linux/file.h>
68635+#include <linux/stat.h>
68636+#include <linux/grsecurity.h>
68637+#include <linux/grinternal.h>
68638+#include <linux/gracl.h>
68639+
68640+umode_t
68641+gr_acl_umask(void)
68642+{
68643+ if (unlikely(!gr_acl_is_enabled()))
68644+ return 0;
68645+
68646+ return current->role->umask;
68647+}
68648+
68649+__u32
68650+gr_acl_handle_hidden_file(const struct dentry * dentry,
68651+ const struct vfsmount * mnt)
68652+{
68653+ __u32 mode;
68654+
68655+ if (unlikely(d_is_negative(dentry)))
68656+ return GR_FIND;
68657+
68658+ mode =
68659+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
68660+
68661+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
68662+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
68663+ return mode;
68664+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
68665+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
68666+ return 0;
68667+ } else if (unlikely(!(mode & GR_FIND)))
68668+ return 0;
68669+
68670+ return GR_FIND;
68671+}
68672+
68673+__u32
68674+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
68675+ int acc_mode)
68676+{
68677+ __u32 reqmode = GR_FIND;
68678+ __u32 mode;
68679+
68680+ if (unlikely(d_is_negative(dentry)))
68681+ return reqmode;
68682+
68683+ if (acc_mode & MAY_APPEND)
68684+ reqmode |= GR_APPEND;
68685+ else if (acc_mode & MAY_WRITE)
68686+ reqmode |= GR_WRITE;
68687+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
68688+ reqmode |= GR_READ;
68689+
68690+ mode =
68691+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
68692+ mnt);
68693+
68694+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
68695+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
68696+ reqmode & GR_READ ? " reading" : "",
68697+ reqmode & GR_WRITE ? " writing" : reqmode &
68698+ GR_APPEND ? " appending" : "");
68699+ return reqmode;
68700+ } else
68701+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
68702+ {
68703+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
68704+ reqmode & GR_READ ? " reading" : "",
68705+ reqmode & GR_WRITE ? " writing" : reqmode &
68706+ GR_APPEND ? " appending" : "");
68707+ return 0;
68708+ } else if (unlikely((mode & reqmode) != reqmode))
68709+ return 0;
68710+
68711+ return reqmode;
68712+}
68713+
68714+__u32
68715+gr_acl_handle_creat(const struct dentry * dentry,
68716+ const struct dentry * p_dentry,
68717+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
68718+ const int imode)
68719+{
68720+ __u32 reqmode = GR_WRITE | GR_CREATE;
68721+ __u32 mode;
68722+
68723+ if (acc_mode & MAY_APPEND)
68724+ reqmode |= GR_APPEND;
68725+ // if a directory was required or the directory already exists, then
68726+ // don't count this open as a read
68727+ if ((acc_mode & MAY_READ) &&
68728+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
68729+ reqmode |= GR_READ;
68730+ if ((open_flags & O_CREAT) &&
68731+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
68732+ reqmode |= GR_SETID;
68733+
68734+ mode =
68735+ gr_check_create(dentry, p_dentry, p_mnt,
68736+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
68737+
68738+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
68739+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
68740+ reqmode & GR_READ ? " reading" : "",
68741+ reqmode & GR_WRITE ? " writing" : reqmode &
68742+ GR_APPEND ? " appending" : "");
68743+ return reqmode;
68744+ } else
68745+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
68746+ {
68747+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
68748+ reqmode & GR_READ ? " reading" : "",
68749+ reqmode & GR_WRITE ? " writing" : reqmode &
68750+ GR_APPEND ? " appending" : "");
68751+ return 0;
68752+ } else if (unlikely((mode & reqmode) != reqmode))
68753+ return 0;
68754+
68755+ return reqmode;
68756+}
68757+
68758+__u32
68759+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
68760+ const int fmode)
68761+{
68762+ __u32 mode, reqmode = GR_FIND;
68763+
68764+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
68765+ reqmode |= GR_EXEC;
68766+ if (fmode & S_IWOTH)
68767+ reqmode |= GR_WRITE;
68768+ if (fmode & S_IROTH)
68769+ reqmode |= GR_READ;
68770+
68771+ mode =
68772+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
68773+ mnt);
68774+
68775+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
68776+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
68777+ reqmode & GR_READ ? " reading" : "",
68778+ reqmode & GR_WRITE ? " writing" : "",
68779+ reqmode & GR_EXEC ? " executing" : "");
68780+ return reqmode;
68781+ } else
68782+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
68783+ {
68784+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
68785+ reqmode & GR_READ ? " reading" : "",
68786+ reqmode & GR_WRITE ? " writing" : "",
68787+ reqmode & GR_EXEC ? " executing" : "");
68788+ return 0;
68789+ } else if (unlikely((mode & reqmode) != reqmode))
68790+ return 0;
68791+
68792+ return reqmode;
68793+}
68794+
68795+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
68796+{
68797+ __u32 mode;
68798+
68799+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
68800+
68801+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
68802+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
68803+ return mode;
68804+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
68805+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
68806+ return 0;
68807+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
68808+ return 0;
68809+
68810+ return (reqmode);
68811+}
68812+
68813+__u32
68814+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
68815+{
68816+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
68817+}
68818+
68819+__u32
68820+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
68821+{
68822+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
68823+}
68824+
68825+__u32
68826+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
68827+{
68828+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
68829+}
68830+
68831+__u32
68832+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
68833+{
68834+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
68835+}
68836+
68837+__u32
68838+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
68839+ umode_t *modeptr)
68840+{
68841+ umode_t mode;
68842+
68843+ *modeptr &= ~gr_acl_umask();
68844+ mode = *modeptr;
68845+
68846+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
68847+ return 1;
68848+
68849+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
68850+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
68851+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
68852+ GR_CHMOD_ACL_MSG);
68853+ } else {
68854+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
68855+ }
68856+}
68857+
68858+__u32
68859+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
68860+{
68861+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
68862+}
68863+
68864+__u32
68865+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
68866+{
68867+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
68868+}
68869+
68870+__u32
68871+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
68872+{
68873+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
68874+}
68875+
68876+__u32
68877+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
68878+{
68879+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
68880+}
68881+
68882+__u32
68883+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
68884+{
68885+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
68886+ GR_UNIXCONNECT_ACL_MSG);
68887+}
68888+
68889+/* hardlinks require at minimum create and link permission,
68890+ any additional privilege required is based on the
68891+ privilege of the file being linked to
68892+*/
68893+__u32
68894+gr_acl_handle_link(const struct dentry * new_dentry,
68895+ const struct dentry * parent_dentry,
68896+ const struct vfsmount * parent_mnt,
68897+ const struct dentry * old_dentry,
68898+ const struct vfsmount * old_mnt, const struct filename *to)
68899+{
68900+ __u32 mode;
68901+ __u32 needmode = GR_CREATE | GR_LINK;
68902+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
68903+
68904+ mode =
68905+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
68906+ old_mnt);
68907+
68908+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
68909+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
68910+ return mode;
68911+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
68912+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
68913+ return 0;
68914+ } else if (unlikely((mode & needmode) != needmode))
68915+ return 0;
68916+
68917+ return 1;
68918+}
68919+
68920+__u32
68921+gr_acl_handle_symlink(const struct dentry * new_dentry,
68922+ const struct dentry * parent_dentry,
68923+ const struct vfsmount * parent_mnt, const struct filename *from)
68924+{
68925+ __u32 needmode = GR_WRITE | GR_CREATE;
68926+ __u32 mode;
68927+
68928+ mode =
68929+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
68930+ GR_CREATE | GR_AUDIT_CREATE |
68931+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
68932+
68933+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
68934+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
68935+ return mode;
68936+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
68937+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
68938+ return 0;
68939+ } else if (unlikely((mode & needmode) != needmode))
68940+ return 0;
68941+
68942+ return (GR_WRITE | GR_CREATE);
68943+}
68944+
68945+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
68946+{
68947+ __u32 mode;
68948+
68949+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
68950+
68951+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
68952+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
68953+ return mode;
68954+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
68955+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
68956+ return 0;
68957+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
68958+ return 0;
68959+
68960+ return (reqmode);
68961+}
68962+
68963+__u32
68964+gr_acl_handle_mknod(const struct dentry * new_dentry,
68965+ const struct dentry * parent_dentry,
68966+ const struct vfsmount * parent_mnt,
68967+ const int mode)
68968+{
68969+ __u32 reqmode = GR_WRITE | GR_CREATE;
68970+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
68971+ reqmode |= GR_SETID;
68972+
68973+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
68974+ reqmode, GR_MKNOD_ACL_MSG);
68975+}
68976+
68977+__u32
68978+gr_acl_handle_mkdir(const struct dentry *new_dentry,
68979+ const struct dentry *parent_dentry,
68980+ const struct vfsmount *parent_mnt)
68981+{
68982+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
68983+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
68984+}
68985+
68986+#define RENAME_CHECK_SUCCESS(old, new) \
68987+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
68988+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
68989+
68990+int
68991+gr_acl_handle_rename(struct dentry *new_dentry,
68992+ struct dentry *parent_dentry,
68993+ const struct vfsmount *parent_mnt,
68994+ struct dentry *old_dentry,
68995+ struct inode *old_parent_inode,
68996+ struct vfsmount *old_mnt, const struct filename *newname)
68997+{
68998+ __u32 comp1, comp2;
68999+ int error = 0;
69000+
69001+ if (unlikely(!gr_acl_is_enabled()))
69002+ return 0;
69003+
69004+ if (d_is_negative(new_dentry)) {
69005+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
69006+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
69007+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
69008+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
69009+ GR_DELETE | GR_AUDIT_DELETE |
69010+ GR_AUDIT_READ | GR_AUDIT_WRITE |
69011+ GR_SUPPRESS, old_mnt);
69012+ } else {
69013+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
69014+ GR_CREATE | GR_DELETE |
69015+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
69016+ GR_AUDIT_READ | GR_AUDIT_WRITE |
69017+ GR_SUPPRESS, parent_mnt);
69018+ comp2 =
69019+ gr_search_file(old_dentry,
69020+ GR_READ | GR_WRITE | GR_AUDIT_READ |
69021+ GR_DELETE | GR_AUDIT_DELETE |
69022+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
69023+ }
69024+
69025+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
69026+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
69027+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69028+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
69029+ && !(comp2 & GR_SUPPRESS)) {
69030+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69031+ error = -EACCES;
69032+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
69033+ error = -EACCES;
69034+
69035+ return error;
69036+}
69037+
69038+void
69039+gr_acl_handle_exit(void)
69040+{
69041+ u16 id;
69042+ char *rolename;
69043+
69044+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
69045+ !(current->role->roletype & GR_ROLE_PERSIST))) {
69046+ id = current->acl_role_id;
69047+ rolename = current->role->rolename;
69048+ gr_set_acls(1);
69049+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
69050+ }
69051+
69052+ gr_put_exec_file(current);
69053+ return;
69054+}
69055+
69056+int
69057+gr_acl_handle_procpidmem(const struct task_struct *task)
69058+{
69059+ if (unlikely(!gr_acl_is_enabled()))
69060+ return 0;
69061+
69062+ if (task != current && task->acl->mode & GR_PROTPROCFD)
69063+ return -EACCES;
69064+
69065+ return 0;
69066+}
69067diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
69068new file mode 100644
69069index 0000000..f056b81
69070--- /dev/null
69071+++ b/grsecurity/gracl_ip.c
69072@@ -0,0 +1,386 @@
69073+#include <linux/kernel.h>
69074+#include <asm/uaccess.h>
69075+#include <asm/errno.h>
69076+#include <net/sock.h>
69077+#include <linux/file.h>
69078+#include <linux/fs.h>
69079+#include <linux/net.h>
69080+#include <linux/in.h>
69081+#include <linux/skbuff.h>
69082+#include <linux/ip.h>
69083+#include <linux/udp.h>
69084+#include <linux/types.h>
69085+#include <linux/sched.h>
69086+#include <linux/netdevice.h>
69087+#include <linux/inetdevice.h>
69088+#include <linux/gracl.h>
69089+#include <linux/grsecurity.h>
69090+#include <linux/grinternal.h>
69091+
69092+#define GR_BIND 0x01
69093+#define GR_CONNECT 0x02
69094+#define GR_INVERT 0x04
69095+#define GR_BINDOVERRIDE 0x08
69096+#define GR_CONNECTOVERRIDE 0x10
69097+#define GR_SOCK_FAMILY 0x20
69098+
69099+static const char * gr_protocols[IPPROTO_MAX] = {
69100+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
69101+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
69102+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
69103+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
69104+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
69105+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
69106+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
69107+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
69108+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
69109+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
69110+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
69111+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
69112+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
69113+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
69114+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
69115+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
69116+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
69117+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
69118+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
69119+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
69120+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
69121+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
69122+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
69123+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
69124+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
69125+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
69126+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
69127+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
69128+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
69129+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
69130+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
69131+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
69132+ };
69133+
69134+static const char * gr_socktypes[SOCK_MAX] = {
69135+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
69136+ "unknown:7", "unknown:8", "unknown:9", "packet"
69137+ };
69138+
69139+static const char * gr_sockfamilies[AF_MAX+1] = {
69140+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
69141+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
69142+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
69143+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
69144+ };
69145+
69146+const char *
69147+gr_proto_to_name(unsigned char proto)
69148+{
69149+ return gr_protocols[proto];
69150+}
69151+
69152+const char *
69153+gr_socktype_to_name(unsigned char type)
69154+{
69155+ return gr_socktypes[type];
69156+}
69157+
69158+const char *
69159+gr_sockfamily_to_name(unsigned char family)
69160+{
69161+ return gr_sockfamilies[family];
69162+}
69163+
69164+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
69165+
69166+int
69167+gr_search_socket(const int domain, const int type, const int protocol)
69168+{
69169+ struct acl_subject_label *curr;
69170+ const struct cred *cred = current_cred();
69171+
69172+ if (unlikely(!gr_acl_is_enabled()))
69173+ goto exit;
69174+
69175+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
69176+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
69177+ goto exit; // let the kernel handle it
69178+
69179+ curr = current->acl;
69180+
69181+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
69182+ /* the family is allowed, if this is PF_INET allow it only if
69183+ the extra sock type/protocol checks pass */
69184+ if (domain == PF_INET)
69185+ goto inet_check;
69186+ goto exit;
69187+ } else {
69188+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69189+ __u32 fakeip = 0;
69190+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69191+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69192+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69193+ gr_to_filename(current->exec_file->f_path.dentry,
69194+ current->exec_file->f_path.mnt) :
69195+ curr->filename, curr->filename,
69196+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
69197+ &current->signal->saved_ip);
69198+ goto exit;
69199+ }
69200+ goto exit_fail;
69201+ }
69202+
69203+inet_check:
69204+ /* the rest of this checking is for IPv4 only */
69205+ if (!curr->ips)
69206+ goto exit;
69207+
69208+ if ((curr->ip_type & (1U << type)) &&
69209+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
69210+ goto exit;
69211+
69212+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69213+ /* we don't place acls on raw sockets , and sometimes
69214+ dgram/ip sockets are opened for ioctl and not
69215+ bind/connect, so we'll fake a bind learn log */
69216+ if (type == SOCK_RAW || type == SOCK_PACKET) {
69217+ __u32 fakeip = 0;
69218+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69219+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69220+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69221+ gr_to_filename(current->exec_file->f_path.dentry,
69222+ current->exec_file->f_path.mnt) :
69223+ curr->filename, curr->filename,
69224+ &fakeip, 0, type,
69225+ protocol, GR_CONNECT, &current->signal->saved_ip);
69226+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
69227+ __u32 fakeip = 0;
69228+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69229+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69230+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69231+ gr_to_filename(current->exec_file->f_path.dentry,
69232+ current->exec_file->f_path.mnt) :
69233+ curr->filename, curr->filename,
69234+ &fakeip, 0, type,
69235+ protocol, GR_BIND, &current->signal->saved_ip);
69236+ }
69237+ /* we'll log when they use connect or bind */
69238+ goto exit;
69239+ }
69240+
69241+exit_fail:
69242+ if (domain == PF_INET)
69243+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
69244+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
69245+ else if (rcu_access_pointer(net_families[domain]) != NULL)
69246+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
69247+ gr_socktype_to_name(type), protocol);
69248+
69249+ return 0;
69250+exit:
69251+ return 1;
69252+}
69253+
69254+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
69255+{
69256+ if ((ip->mode & mode) &&
69257+ (ip_port >= ip->low) &&
69258+ (ip_port <= ip->high) &&
69259+ ((ntohl(ip_addr) & our_netmask) ==
69260+ (ntohl(our_addr) & our_netmask))
69261+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
69262+ && (ip->type & (1U << type))) {
69263+ if (ip->mode & GR_INVERT)
69264+ return 2; // specifically denied
69265+ else
69266+ return 1; // allowed
69267+ }
69268+
69269+ return 0; // not specifically allowed, may continue parsing
69270+}
69271+
69272+static int
69273+gr_search_connectbind(const int full_mode, struct sock *sk,
69274+ struct sockaddr_in *addr, const int type)
69275+{
69276+ char iface[IFNAMSIZ] = {0};
69277+ struct acl_subject_label *curr;
69278+ struct acl_ip_label *ip;
69279+ struct inet_sock *isk;
69280+ struct net_device *dev;
69281+ struct in_device *idev;
69282+ unsigned long i;
69283+ int ret;
69284+ int mode = full_mode & (GR_BIND | GR_CONNECT);
69285+ __u32 ip_addr = 0;
69286+ __u32 our_addr;
69287+ __u32 our_netmask;
69288+ char *p;
69289+ __u16 ip_port = 0;
69290+ const struct cred *cred = current_cred();
69291+
69292+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
69293+ return 0;
69294+
69295+ curr = current->acl;
69296+ isk = inet_sk(sk);
69297+
69298+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
69299+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
69300+ addr->sin_addr.s_addr = curr->inaddr_any_override;
69301+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
69302+ struct sockaddr_in saddr;
69303+ int err;
69304+
69305+ saddr.sin_family = AF_INET;
69306+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
69307+ saddr.sin_port = isk->inet_sport;
69308+
69309+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
69310+ if (err)
69311+ return err;
69312+
69313+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
69314+ if (err)
69315+ return err;
69316+ }
69317+
69318+ if (!curr->ips)
69319+ return 0;
69320+
69321+ ip_addr = addr->sin_addr.s_addr;
69322+ ip_port = ntohs(addr->sin_port);
69323+
69324+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69325+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69326+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69327+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69328+ gr_to_filename(current->exec_file->f_path.dentry,
69329+ current->exec_file->f_path.mnt) :
69330+ curr->filename, curr->filename,
69331+ &ip_addr, ip_port, type,
69332+ sk->sk_protocol, mode, &current->signal->saved_ip);
69333+ return 0;
69334+ }
69335+
69336+ for (i = 0; i < curr->ip_num; i++) {
69337+ ip = *(curr->ips + i);
69338+ if (ip->iface != NULL) {
69339+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
69340+ p = strchr(iface, ':');
69341+ if (p != NULL)
69342+ *p = '\0';
69343+ dev = dev_get_by_name(sock_net(sk), iface);
69344+ if (dev == NULL)
69345+ continue;
69346+ idev = in_dev_get(dev);
69347+ if (idev == NULL) {
69348+ dev_put(dev);
69349+ continue;
69350+ }
69351+ rcu_read_lock();
69352+ for_ifa(idev) {
69353+ if (!strcmp(ip->iface, ifa->ifa_label)) {
69354+ our_addr = ifa->ifa_address;
69355+ our_netmask = 0xffffffff;
69356+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
69357+ if (ret == 1) {
69358+ rcu_read_unlock();
69359+ in_dev_put(idev);
69360+ dev_put(dev);
69361+ return 0;
69362+ } else if (ret == 2) {
69363+ rcu_read_unlock();
69364+ in_dev_put(idev);
69365+ dev_put(dev);
69366+ goto denied;
69367+ }
69368+ }
69369+ } endfor_ifa(idev);
69370+ rcu_read_unlock();
69371+ in_dev_put(idev);
69372+ dev_put(dev);
69373+ } else {
69374+ our_addr = ip->addr;
69375+ our_netmask = ip->netmask;
69376+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
69377+ if (ret == 1)
69378+ return 0;
69379+ else if (ret == 2)
69380+ goto denied;
69381+ }
69382+ }
69383+
69384+denied:
69385+ if (mode == GR_BIND)
69386+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
69387+ else if (mode == GR_CONNECT)
69388+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
69389+
69390+ return -EACCES;
69391+}
69392+
69393+int
69394+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
69395+{
69396+ /* always allow disconnection of dgram sockets with connect */
69397+ if (addr->sin_family == AF_UNSPEC)
69398+ return 0;
69399+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
69400+}
69401+
69402+int
69403+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
69404+{
69405+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
69406+}
69407+
69408+int gr_search_listen(struct socket *sock)
69409+{
69410+ struct sock *sk = sock->sk;
69411+ struct sockaddr_in addr;
69412+
69413+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
69414+ addr.sin_port = inet_sk(sk)->inet_sport;
69415+
69416+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
69417+}
69418+
69419+int gr_search_accept(struct socket *sock)
69420+{
69421+ struct sock *sk = sock->sk;
69422+ struct sockaddr_in addr;
69423+
69424+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
69425+ addr.sin_port = inet_sk(sk)->inet_sport;
69426+
69427+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
69428+}
69429+
69430+int
69431+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
69432+{
69433+ if (addr)
69434+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
69435+ else {
69436+ struct sockaddr_in sin;
69437+ const struct inet_sock *inet = inet_sk(sk);
69438+
69439+ sin.sin_addr.s_addr = inet->inet_daddr;
69440+ sin.sin_port = inet->inet_dport;
69441+
69442+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
69443+ }
69444+}
69445+
69446+int
69447+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
69448+{
69449+ struct sockaddr_in sin;
69450+
69451+ if (unlikely(skb->len < sizeof (struct udphdr)))
69452+ return 0; // skip this packet
69453+
69454+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
69455+ sin.sin_port = udp_hdr(skb)->source;
69456+
69457+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
69458+}
69459diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
69460new file mode 100644
69461index 0000000..25f54ef
69462--- /dev/null
69463+++ b/grsecurity/gracl_learn.c
69464@@ -0,0 +1,207 @@
69465+#include <linux/kernel.h>
69466+#include <linux/mm.h>
69467+#include <linux/sched.h>
69468+#include <linux/poll.h>
69469+#include <linux/string.h>
69470+#include <linux/file.h>
69471+#include <linux/types.h>
69472+#include <linux/vmalloc.h>
69473+#include <linux/grinternal.h>
69474+
69475+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
69476+ size_t count, loff_t *ppos);
69477+extern int gr_acl_is_enabled(void);
69478+
69479+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
69480+static int gr_learn_attached;
69481+
69482+/* use a 512k buffer */
69483+#define LEARN_BUFFER_SIZE (512 * 1024)
69484+
69485+static DEFINE_SPINLOCK(gr_learn_lock);
69486+static DEFINE_MUTEX(gr_learn_user_mutex);
69487+
69488+/* we need to maintain two buffers, so that the kernel context of grlearn
69489+ uses a semaphore around the userspace copying, and the other kernel contexts
69490+ use a spinlock when copying into the buffer, since they cannot sleep
69491+*/
69492+static char *learn_buffer;
69493+static char *learn_buffer_user;
69494+static int learn_buffer_len;
69495+static int learn_buffer_user_len;
69496+
69497+static ssize_t
69498+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
69499+{
69500+ DECLARE_WAITQUEUE(wait, current);
69501+ ssize_t retval = 0;
69502+
69503+ add_wait_queue(&learn_wait, &wait);
69504+ set_current_state(TASK_INTERRUPTIBLE);
69505+ do {
69506+ mutex_lock(&gr_learn_user_mutex);
69507+ spin_lock(&gr_learn_lock);
69508+ if (learn_buffer_len)
69509+ break;
69510+ spin_unlock(&gr_learn_lock);
69511+ mutex_unlock(&gr_learn_user_mutex);
69512+ if (file->f_flags & O_NONBLOCK) {
69513+ retval = -EAGAIN;
69514+ goto out;
69515+ }
69516+ if (signal_pending(current)) {
69517+ retval = -ERESTARTSYS;
69518+ goto out;
69519+ }
69520+
69521+ schedule();
69522+ } while (1);
69523+
69524+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
69525+ learn_buffer_user_len = learn_buffer_len;
69526+ retval = learn_buffer_len;
69527+ learn_buffer_len = 0;
69528+
69529+ spin_unlock(&gr_learn_lock);
69530+
69531+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
69532+ retval = -EFAULT;
69533+
69534+ mutex_unlock(&gr_learn_user_mutex);
69535+out:
69536+ set_current_state(TASK_RUNNING);
69537+ remove_wait_queue(&learn_wait, &wait);
69538+ return retval;
69539+}
69540+
69541+static unsigned int
69542+poll_learn(struct file * file, poll_table * wait)
69543+{
69544+ poll_wait(file, &learn_wait, wait);
69545+
69546+ if (learn_buffer_len)
69547+ return (POLLIN | POLLRDNORM);
69548+
69549+ return 0;
69550+}
69551+
69552+void
69553+gr_clear_learn_entries(void)
69554+{
69555+ char *tmp;
69556+
69557+ mutex_lock(&gr_learn_user_mutex);
69558+ spin_lock(&gr_learn_lock);
69559+ tmp = learn_buffer;
69560+ learn_buffer = NULL;
69561+ spin_unlock(&gr_learn_lock);
69562+ if (tmp)
69563+ vfree(tmp);
69564+ if (learn_buffer_user != NULL) {
69565+ vfree(learn_buffer_user);
69566+ learn_buffer_user = NULL;
69567+ }
69568+ learn_buffer_len = 0;
69569+ mutex_unlock(&gr_learn_user_mutex);
69570+
69571+ return;
69572+}
69573+
69574+void
69575+gr_add_learn_entry(const char *fmt, ...)
69576+{
69577+ va_list args;
69578+ unsigned int len;
69579+
69580+ if (!gr_learn_attached)
69581+ return;
69582+
69583+ spin_lock(&gr_learn_lock);
69584+
69585+ /* leave a gap at the end so we know when it's "full" but don't have to
69586+ compute the exact length of the string we're trying to append
69587+ */
69588+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
69589+ spin_unlock(&gr_learn_lock);
69590+ wake_up_interruptible(&learn_wait);
69591+ return;
69592+ }
69593+ if (learn_buffer == NULL) {
69594+ spin_unlock(&gr_learn_lock);
69595+ return;
69596+ }
69597+
69598+ va_start(args, fmt);
69599+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
69600+ va_end(args);
69601+
69602+ learn_buffer_len += len + 1;
69603+
69604+ spin_unlock(&gr_learn_lock);
69605+ wake_up_interruptible(&learn_wait);
69606+
69607+ return;
69608+}
69609+
69610+static int
69611+open_learn(struct inode *inode, struct file *file)
69612+{
69613+ if (file->f_mode & FMODE_READ && gr_learn_attached)
69614+ return -EBUSY;
69615+ if (file->f_mode & FMODE_READ) {
69616+ int retval = 0;
69617+ mutex_lock(&gr_learn_user_mutex);
69618+ if (learn_buffer == NULL)
69619+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
69620+ if (learn_buffer_user == NULL)
69621+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
69622+ if (learn_buffer == NULL) {
69623+ retval = -ENOMEM;
69624+ goto out_error;
69625+ }
69626+ if (learn_buffer_user == NULL) {
69627+ retval = -ENOMEM;
69628+ goto out_error;
69629+ }
69630+ learn_buffer_len = 0;
69631+ learn_buffer_user_len = 0;
69632+ gr_learn_attached = 1;
69633+out_error:
69634+ mutex_unlock(&gr_learn_user_mutex);
69635+ return retval;
69636+ }
69637+ return 0;
69638+}
69639+
69640+static int
69641+close_learn(struct inode *inode, struct file *file)
69642+{
69643+ if (file->f_mode & FMODE_READ) {
69644+ char *tmp = NULL;
69645+ mutex_lock(&gr_learn_user_mutex);
69646+ spin_lock(&gr_learn_lock);
69647+ tmp = learn_buffer;
69648+ learn_buffer = NULL;
69649+ spin_unlock(&gr_learn_lock);
69650+ if (tmp)
69651+ vfree(tmp);
69652+ if (learn_buffer_user != NULL) {
69653+ vfree(learn_buffer_user);
69654+ learn_buffer_user = NULL;
69655+ }
69656+ learn_buffer_len = 0;
69657+ learn_buffer_user_len = 0;
69658+ gr_learn_attached = 0;
69659+ mutex_unlock(&gr_learn_user_mutex);
69660+ }
69661+
69662+ return 0;
69663+}
69664+
69665+const struct file_operations grsec_fops = {
69666+ .read = read_learn,
69667+ .write = write_grsec_handler,
69668+ .open = open_learn,
69669+ .release = close_learn,
69670+ .poll = poll_learn,
69671+};
69672diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
69673new file mode 100644
69674index 0000000..361a099
69675--- /dev/null
69676+++ b/grsecurity/gracl_policy.c
69677@@ -0,0 +1,1782 @@
69678+#include <linux/kernel.h>
69679+#include <linux/module.h>
69680+#include <linux/sched.h>
69681+#include <linux/mm.h>
69682+#include <linux/file.h>
69683+#include <linux/fs.h>
69684+#include <linux/namei.h>
69685+#include <linux/mount.h>
69686+#include <linux/tty.h>
69687+#include <linux/proc_fs.h>
69688+#include <linux/lglock.h>
69689+#include <linux/slab.h>
69690+#include <linux/vmalloc.h>
69691+#include <linux/types.h>
69692+#include <linux/sysctl.h>
69693+#include <linux/netdevice.h>
69694+#include <linux/ptrace.h>
69695+#include <linux/gracl.h>
69696+#include <linux/gralloc.h>
69697+#include <linux/security.h>
69698+#include <linux/grinternal.h>
69699+#include <linux/pid_namespace.h>
69700+#include <linux/stop_machine.h>
69701+#include <linux/fdtable.h>
69702+#include <linux/percpu.h>
69703+#include <linux/lglock.h>
69704+#include <linux/hugetlb.h>
69705+#include <linux/posix-timers.h>
69706+#include "../fs/mount.h"
69707+
69708+#include <asm/uaccess.h>
69709+#include <asm/errno.h>
69710+#include <asm/mman.h>
69711+
69712+extern struct gr_policy_state *polstate;
69713+
69714+#define FOR_EACH_ROLE_START(role) \
69715+ role = polstate->role_list; \
69716+ while (role) {
69717+
69718+#define FOR_EACH_ROLE_END(role) \
69719+ role = role->prev; \
69720+ }
69721+
69722+struct path gr_real_root;
69723+
69724+extern struct gr_alloc_state *current_alloc_state;
69725+
69726+u16 acl_sp_role_value;
69727+
69728+static DEFINE_MUTEX(gr_dev_mutex);
69729+
69730+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
69731+extern void gr_clear_learn_entries(void);
69732+
69733+static struct gr_arg gr_usermode;
69734+static unsigned char gr_system_salt[GR_SALT_LEN];
69735+static unsigned char gr_system_sum[GR_SHA_LEN];
69736+
69737+static unsigned int gr_auth_attempts = 0;
69738+static unsigned long gr_auth_expires = 0UL;
69739+
69740+struct acl_object_label *fakefs_obj_rw;
69741+struct acl_object_label *fakefs_obj_rwx;
69742+
69743+extern int gr_init_uidset(void);
69744+extern void gr_free_uidset(void);
69745+extern void gr_remove_uid(uid_t uid);
69746+extern int gr_find_uid(uid_t uid);
69747+
69748+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
69749+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
69750+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
69751+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
69752+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
69753+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
69754+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
69755+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
69756+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
69757+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
69758+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
69759+extern void assign_special_role(const char *rolename);
69760+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
69761+extern int gr_rbac_disable(void *unused);
69762+extern void gr_enable_rbac_system(void);
69763+
69764+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
69765+{
69766+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
69767+ return -EFAULT;
69768+
69769+ return 0;
69770+}
69771+
69772+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
69773+{
69774+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
69775+ return -EFAULT;
69776+
69777+ return 0;
69778+}
69779+
69780+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
69781+{
69782+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
69783+ return -EFAULT;
69784+
69785+ return 0;
69786+}
69787+
69788+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
69789+{
69790+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
69791+ return -EFAULT;
69792+
69793+ return 0;
69794+}
69795+
69796+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
69797+{
69798+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
69799+ return -EFAULT;
69800+
69801+ return 0;
69802+}
69803+
69804+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
69805+{
69806+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
69807+ return -EFAULT;
69808+
69809+ return 0;
69810+}
69811+
69812+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
69813+{
69814+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
69815+ return -EFAULT;
69816+
69817+ return 0;
69818+}
69819+
69820+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
69821+{
69822+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
69823+ return -EFAULT;
69824+
69825+ return 0;
69826+}
69827+
69828+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
69829+{
69830+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
69831+ return -EFAULT;
69832+
69833+ return 0;
69834+}
69835+
69836+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
69837+{
69838+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
69839+ return -EFAULT;
69840+
69841+ if (((uwrap->version != GRSECURITY_VERSION) &&
69842+ (uwrap->version != 0x2901)) ||
69843+ (uwrap->size != sizeof(struct gr_arg)))
69844+ return -EINVAL;
69845+
69846+ return 0;
69847+}
69848+
69849+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
69850+{
69851+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
69852+ return -EFAULT;
69853+
69854+ return 0;
69855+}
69856+
69857+static size_t get_gr_arg_wrapper_size_normal(void)
69858+{
69859+ return sizeof(struct gr_arg_wrapper);
69860+}
69861+
69862+#ifdef CONFIG_COMPAT
69863+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
69864+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
69865+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
69866+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
69867+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
69868+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
69869+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
69870+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
69871+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
69872+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
69873+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
69874+extern size_t get_gr_arg_wrapper_size_compat(void);
69875+
69876+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
69877+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
69878+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
69879+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
69880+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
69881+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
69882+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
69883+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
69884+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
69885+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
69886+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
69887+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
69888+
69889+#else
69890+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
69891+#define copy_gr_arg copy_gr_arg_normal
69892+#define copy_gr_hash_struct copy_gr_hash_struct_normal
69893+#define copy_acl_object_label copy_acl_object_label_normal
69894+#define copy_acl_subject_label copy_acl_subject_label_normal
69895+#define copy_acl_role_label copy_acl_role_label_normal
69896+#define copy_acl_ip_label copy_acl_ip_label_normal
69897+#define copy_pointer_from_array copy_pointer_from_array_normal
69898+#define copy_sprole_pw copy_sprole_pw_normal
69899+#define copy_role_transition copy_role_transition_normal
69900+#define copy_role_allowed_ip copy_role_allowed_ip_normal
69901+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
69902+#endif
69903+
69904+static struct acl_subject_label *
69905+lookup_subject_map(const struct acl_subject_label *userp)
69906+{
69907+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
69908+ struct subject_map *match;
69909+
69910+ match = polstate->subj_map_set.s_hash[index];
69911+
69912+ while (match && match->user != userp)
69913+ match = match->next;
69914+
69915+ if (match != NULL)
69916+ return match->kernel;
69917+ else
69918+ return NULL;
69919+}
69920+
69921+static void
69922+insert_subj_map_entry(struct subject_map *subjmap)
69923+{
69924+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
69925+ struct subject_map **curr;
69926+
69927+ subjmap->prev = NULL;
69928+
69929+ curr = &polstate->subj_map_set.s_hash[index];
69930+ if (*curr != NULL)
69931+ (*curr)->prev = subjmap;
69932+
69933+ subjmap->next = *curr;
69934+ *curr = subjmap;
69935+
69936+ return;
69937+}
69938+
69939+static void
69940+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
69941+{
69942+ unsigned int index =
69943+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
69944+ struct acl_role_label **curr;
69945+ struct acl_role_label *tmp, *tmp2;
69946+
69947+ curr = &polstate->acl_role_set.r_hash[index];
69948+
69949+ /* simple case, slot is empty, just set it to our role */
69950+ if (*curr == NULL) {
69951+ *curr = role;
69952+ } else {
69953+ /* example:
69954+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
69955+ 2 -> 3
69956+ */
69957+ /* first check to see if we can already be reached via this slot */
69958+ tmp = *curr;
69959+ while (tmp && tmp != role)
69960+ tmp = tmp->next;
69961+ if (tmp == role) {
69962+ /* we don't need to add ourselves to this slot's chain */
69963+ return;
69964+ }
69965+ /* we need to add ourselves to this chain, two cases */
69966+ if (role->next == NULL) {
69967+ /* simple case, append the current chain to our role */
69968+ role->next = *curr;
69969+ *curr = role;
69970+ } else {
69971+ /* 1 -> 2 -> 3 -> 4
69972+ 2 -> 3 -> 4
69973+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
69974+ */
69975+ /* trickier case: walk our role's chain until we find
69976+ the role for the start of the current slot's chain */
69977+ tmp = role;
69978+ tmp2 = *curr;
69979+ while (tmp->next && tmp->next != tmp2)
69980+ tmp = tmp->next;
69981+ if (tmp->next == tmp2) {
69982+ /* from example above, we found 3, so just
69983+ replace this slot's chain with ours */
69984+ *curr = role;
69985+ } else {
69986+ /* we didn't find a subset of our role's chain
69987+ in the current slot's chain, so append their
69988+ chain to ours, and set us as the first role in
69989+ the slot's chain
69990+
69991+ we could fold this case with the case above,
69992+ but making it explicit for clarity
69993+ */
69994+ tmp->next = tmp2;
69995+ *curr = role;
69996+ }
69997+ }
69998+ }
69999+
70000+ return;
70001+}
70002+
70003+static void
70004+insert_acl_role_label(struct acl_role_label *role)
70005+{
70006+ int i;
70007+
70008+ if (polstate->role_list == NULL) {
70009+ polstate->role_list = role;
70010+ role->prev = NULL;
70011+ } else {
70012+ role->prev = polstate->role_list;
70013+ polstate->role_list = role;
70014+ }
70015+
70016+ /* used for hash chains */
70017+ role->next = NULL;
70018+
70019+ if (role->roletype & GR_ROLE_DOMAIN) {
70020+ for (i = 0; i < role->domain_child_num; i++)
70021+ __insert_acl_role_label(role, role->domain_children[i]);
70022+ } else
70023+ __insert_acl_role_label(role, role->uidgid);
70024+}
70025+
70026+static int
70027+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
70028+{
70029+ struct name_entry **curr, *nentry;
70030+ struct inodev_entry *ientry;
70031+ unsigned int len = strlen(name);
70032+ unsigned int key = full_name_hash(name, len);
70033+ unsigned int index = key % polstate->name_set.n_size;
70034+
70035+ curr = &polstate->name_set.n_hash[index];
70036+
70037+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
70038+ curr = &((*curr)->next);
70039+
70040+ if (*curr != NULL)
70041+ return 1;
70042+
70043+ nentry = acl_alloc(sizeof (struct name_entry));
70044+ if (nentry == NULL)
70045+ return 0;
70046+ ientry = acl_alloc(sizeof (struct inodev_entry));
70047+ if (ientry == NULL)
70048+ return 0;
70049+ ientry->nentry = nentry;
70050+
70051+ nentry->key = key;
70052+ nentry->name = name;
70053+ nentry->inode = inode;
70054+ nentry->device = device;
70055+ nentry->len = len;
70056+ nentry->deleted = deleted;
70057+
70058+ nentry->prev = NULL;
70059+ curr = &polstate->name_set.n_hash[index];
70060+ if (*curr != NULL)
70061+ (*curr)->prev = nentry;
70062+ nentry->next = *curr;
70063+ *curr = nentry;
70064+
70065+ /* insert us into the table searchable by inode/dev */
70066+ __insert_inodev_entry(polstate, ientry);
70067+
70068+ return 1;
70069+}
70070+
70071+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
70072+
70073+static void *
70074+create_table(__u32 * len, int elementsize)
70075+{
70076+ unsigned int table_sizes[] = {
70077+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
70078+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
70079+ 4194301, 8388593, 16777213, 33554393, 67108859
70080+ };
70081+ void *newtable = NULL;
70082+ unsigned int pwr = 0;
70083+
70084+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
70085+ table_sizes[pwr] <= *len)
70086+ pwr++;
70087+
70088+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
70089+ return newtable;
70090+
70091+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
70092+ newtable =
70093+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
70094+ else
70095+ newtable = vmalloc(table_sizes[pwr] * elementsize);
70096+
70097+ *len = table_sizes[pwr];
70098+
70099+ return newtable;
70100+}
70101+
70102+static int
70103+init_variables(const struct gr_arg *arg, bool reload)
70104+{
70105+ struct task_struct *reaper = init_pid_ns.child_reaper;
70106+ unsigned int stacksize;
70107+
70108+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
70109+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
70110+ polstate->name_set.n_size = arg->role_db.num_objects;
70111+ polstate->inodev_set.i_size = arg->role_db.num_objects;
70112+
70113+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
70114+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
70115+ return 1;
70116+
70117+ if (!reload) {
70118+ if (!gr_init_uidset())
70119+ return 1;
70120+ }
70121+
70122+ /* set up the stack that holds allocation info */
70123+
70124+ stacksize = arg->role_db.num_pointers + 5;
70125+
70126+ if (!acl_alloc_stack_init(stacksize))
70127+ return 1;
70128+
70129+ if (!reload) {
70130+ /* grab reference for the real root dentry and vfsmount */
70131+ get_fs_root(reaper->fs, &gr_real_root);
70132+
70133+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70134+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
70135+#endif
70136+
70137+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
70138+ if (fakefs_obj_rw == NULL)
70139+ return 1;
70140+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
70141+
70142+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
70143+ if (fakefs_obj_rwx == NULL)
70144+ return 1;
70145+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
70146+ }
70147+
70148+ polstate->subj_map_set.s_hash =
70149+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
70150+ polstate->acl_role_set.r_hash =
70151+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
70152+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
70153+ polstate->inodev_set.i_hash =
70154+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
70155+
70156+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
70157+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
70158+ return 1;
70159+
70160+ memset(polstate->subj_map_set.s_hash, 0,
70161+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
70162+ memset(polstate->acl_role_set.r_hash, 0,
70163+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
70164+ memset(polstate->name_set.n_hash, 0,
70165+ sizeof (struct name_entry *) * polstate->name_set.n_size);
70166+ memset(polstate->inodev_set.i_hash, 0,
70167+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
70168+
70169+ return 0;
70170+}
70171+
70172+/* free information not needed after startup
70173+ currently contains user->kernel pointer mappings for subjects
70174+*/
70175+
70176+static void
70177+free_init_variables(void)
70178+{
70179+ __u32 i;
70180+
70181+ if (polstate->subj_map_set.s_hash) {
70182+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
70183+ if (polstate->subj_map_set.s_hash[i]) {
70184+ kfree(polstate->subj_map_set.s_hash[i]);
70185+ polstate->subj_map_set.s_hash[i] = NULL;
70186+ }
70187+ }
70188+
70189+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
70190+ PAGE_SIZE)
70191+ kfree(polstate->subj_map_set.s_hash);
70192+ else
70193+ vfree(polstate->subj_map_set.s_hash);
70194+ }
70195+
70196+ return;
70197+}
70198+
70199+static void
70200+free_variables(bool reload)
70201+{
70202+ struct acl_subject_label *s;
70203+ struct acl_role_label *r;
70204+ struct task_struct *task, *task2;
70205+ unsigned int x;
70206+
70207+ if (!reload) {
70208+ gr_clear_learn_entries();
70209+
70210+ read_lock(&tasklist_lock);
70211+ do_each_thread(task2, task) {
70212+ task->acl_sp_role = 0;
70213+ task->acl_role_id = 0;
70214+ task->inherited = 0;
70215+ task->acl = NULL;
70216+ task->role = NULL;
70217+ } while_each_thread(task2, task);
70218+ read_unlock(&tasklist_lock);
70219+
70220+ kfree(fakefs_obj_rw);
70221+ fakefs_obj_rw = NULL;
70222+ kfree(fakefs_obj_rwx);
70223+ fakefs_obj_rwx = NULL;
70224+
70225+ /* release the reference to the real root dentry and vfsmount */
70226+ path_put(&gr_real_root);
70227+ memset(&gr_real_root, 0, sizeof(gr_real_root));
70228+ }
70229+
70230+ /* free all object hash tables */
70231+
70232+ FOR_EACH_ROLE_START(r)
70233+ if (r->subj_hash == NULL)
70234+ goto next_role;
70235+ FOR_EACH_SUBJECT_START(r, s, x)
70236+ if (s->obj_hash == NULL)
70237+ break;
70238+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
70239+ kfree(s->obj_hash);
70240+ else
70241+ vfree(s->obj_hash);
70242+ FOR_EACH_SUBJECT_END(s, x)
70243+ FOR_EACH_NESTED_SUBJECT_START(r, s)
70244+ if (s->obj_hash == NULL)
70245+ break;
70246+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
70247+ kfree(s->obj_hash);
70248+ else
70249+ vfree(s->obj_hash);
70250+ FOR_EACH_NESTED_SUBJECT_END(s)
70251+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
70252+ kfree(r->subj_hash);
70253+ else
70254+ vfree(r->subj_hash);
70255+ r->subj_hash = NULL;
70256+next_role:
70257+ FOR_EACH_ROLE_END(r)
70258+
70259+ acl_free_all();
70260+
70261+ if (polstate->acl_role_set.r_hash) {
70262+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
70263+ PAGE_SIZE)
70264+ kfree(polstate->acl_role_set.r_hash);
70265+ else
70266+ vfree(polstate->acl_role_set.r_hash);
70267+ }
70268+ if (polstate->name_set.n_hash) {
70269+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
70270+ PAGE_SIZE)
70271+ kfree(polstate->name_set.n_hash);
70272+ else
70273+ vfree(polstate->name_set.n_hash);
70274+ }
70275+
70276+ if (polstate->inodev_set.i_hash) {
70277+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
70278+ PAGE_SIZE)
70279+ kfree(polstate->inodev_set.i_hash);
70280+ else
70281+ vfree(polstate->inodev_set.i_hash);
70282+ }
70283+
70284+ if (!reload)
70285+ gr_free_uidset();
70286+
70287+ memset(&polstate->name_set, 0, sizeof (struct name_db));
70288+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
70289+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
70290+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
70291+
70292+ polstate->default_role = NULL;
70293+ polstate->kernel_role = NULL;
70294+ polstate->role_list = NULL;
70295+
70296+ return;
70297+}
70298+
70299+static struct acl_subject_label *
70300+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
70301+
70302+static int alloc_and_copy_string(char **name, unsigned int maxlen)
70303+{
70304+ unsigned int len = strnlen_user(*name, maxlen);
70305+ char *tmp;
70306+
70307+ if (!len || len >= maxlen)
70308+ return -EINVAL;
70309+
70310+ if ((tmp = (char *) acl_alloc(len)) == NULL)
70311+ return -ENOMEM;
70312+
70313+ if (copy_from_user(tmp, *name, len))
70314+ return -EFAULT;
70315+
70316+ tmp[len-1] = '\0';
70317+ *name = tmp;
70318+
70319+ return 0;
70320+}
70321+
70322+static int
70323+copy_user_glob(struct acl_object_label *obj)
70324+{
70325+ struct acl_object_label *g_tmp, **guser;
70326+ int error;
70327+
70328+ if (obj->globbed == NULL)
70329+ return 0;
70330+
70331+ guser = &obj->globbed;
70332+ while (*guser) {
70333+ g_tmp = (struct acl_object_label *)
70334+ acl_alloc(sizeof (struct acl_object_label));
70335+ if (g_tmp == NULL)
70336+ return -ENOMEM;
70337+
70338+ if (copy_acl_object_label(g_tmp, *guser))
70339+ return -EFAULT;
70340+
70341+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
70342+ if (error)
70343+ return error;
70344+
70345+ *guser = g_tmp;
70346+ guser = &(g_tmp->next);
70347+ }
70348+
70349+ return 0;
70350+}
70351+
70352+static int
70353+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
70354+ struct acl_role_label *role)
70355+{
70356+ struct acl_object_label *o_tmp;
70357+ int ret;
70358+
70359+ while (userp) {
70360+ if ((o_tmp = (struct acl_object_label *)
70361+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
70362+ return -ENOMEM;
70363+
70364+ if (copy_acl_object_label(o_tmp, userp))
70365+ return -EFAULT;
70366+
70367+ userp = o_tmp->prev;
70368+
70369+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
70370+ if (ret)
70371+ return ret;
70372+
70373+ insert_acl_obj_label(o_tmp, subj);
70374+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
70375+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
70376+ return -ENOMEM;
70377+
70378+ ret = copy_user_glob(o_tmp);
70379+ if (ret)
70380+ return ret;
70381+
70382+ if (o_tmp->nested) {
70383+ int already_copied;
70384+
70385+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
70386+ if (IS_ERR(o_tmp->nested))
70387+ return PTR_ERR(o_tmp->nested);
70388+
70389+ /* insert into nested subject list if we haven't copied this one yet
70390+ to prevent duplicate entries */
70391+ if (!already_copied) {
70392+ o_tmp->nested->next = role->hash->first;
70393+ role->hash->first = o_tmp->nested;
70394+ }
70395+ }
70396+ }
70397+
70398+ return 0;
70399+}
70400+
70401+static __u32
70402+count_user_subjs(struct acl_subject_label *userp)
70403+{
70404+ struct acl_subject_label s_tmp;
70405+ __u32 num = 0;
70406+
70407+ while (userp) {
70408+ if (copy_acl_subject_label(&s_tmp, userp))
70409+ break;
70410+
70411+ userp = s_tmp.prev;
70412+ }
70413+
70414+ return num;
70415+}
70416+
70417+static int
70418+copy_user_allowedips(struct acl_role_label *rolep)
70419+{
70420+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
70421+
70422+ ruserip = rolep->allowed_ips;
70423+
70424+ while (ruserip) {
70425+ rlast = rtmp;
70426+
70427+ if ((rtmp = (struct role_allowed_ip *)
70428+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
70429+ return -ENOMEM;
70430+
70431+ if (copy_role_allowed_ip(rtmp, ruserip))
70432+ return -EFAULT;
70433+
70434+ ruserip = rtmp->prev;
70435+
70436+ if (!rlast) {
70437+ rtmp->prev = NULL;
70438+ rolep->allowed_ips = rtmp;
70439+ } else {
70440+ rlast->next = rtmp;
70441+ rtmp->prev = rlast;
70442+ }
70443+
70444+ if (!ruserip)
70445+ rtmp->next = NULL;
70446+ }
70447+
70448+ return 0;
70449+}
70450+
70451+static int
70452+copy_user_transitions(struct acl_role_label *rolep)
70453+{
70454+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
70455+ int error;
70456+
70457+ rusertp = rolep->transitions;
70458+
70459+ while (rusertp) {
70460+ rlast = rtmp;
70461+
70462+ if ((rtmp = (struct role_transition *)
70463+ acl_alloc(sizeof (struct role_transition))) == NULL)
70464+ return -ENOMEM;
70465+
70466+ if (copy_role_transition(rtmp, rusertp))
70467+ return -EFAULT;
70468+
70469+ rusertp = rtmp->prev;
70470+
70471+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
70472+ if (error)
70473+ return error;
70474+
70475+ if (!rlast) {
70476+ rtmp->prev = NULL;
70477+ rolep->transitions = rtmp;
70478+ } else {
70479+ rlast->next = rtmp;
70480+ rtmp->prev = rlast;
70481+ }
70482+
70483+ if (!rusertp)
70484+ rtmp->next = NULL;
70485+ }
70486+
70487+ return 0;
70488+}
70489+
70490+static __u32 count_user_objs(const struct acl_object_label __user *userp)
70491+{
70492+ struct acl_object_label o_tmp;
70493+ __u32 num = 0;
70494+
70495+ while (userp) {
70496+ if (copy_acl_object_label(&o_tmp, userp))
70497+ break;
70498+
70499+ userp = o_tmp.prev;
70500+ num++;
70501+ }
70502+
70503+ return num;
70504+}
70505+
70506+static struct acl_subject_label *
70507+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
70508+{
70509+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
70510+ __u32 num_objs;
70511+ struct acl_ip_label **i_tmp, *i_utmp2;
70512+ struct gr_hash_struct ghash;
70513+ struct subject_map *subjmap;
70514+ unsigned int i_num;
70515+ int err;
70516+
70517+ if (already_copied != NULL)
70518+ *already_copied = 0;
70519+
70520+ s_tmp = lookup_subject_map(userp);
70521+
70522+ /* we've already copied this subject into the kernel, just return
70523+ the reference to it, and don't copy it over again
70524+ */
70525+ if (s_tmp) {
70526+ if (already_copied != NULL)
70527+ *already_copied = 1;
70528+ return(s_tmp);
70529+ }
70530+
70531+ if ((s_tmp = (struct acl_subject_label *)
70532+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
70533+ return ERR_PTR(-ENOMEM);
70534+
70535+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
70536+ if (subjmap == NULL)
70537+ return ERR_PTR(-ENOMEM);
70538+
70539+ subjmap->user = userp;
70540+ subjmap->kernel = s_tmp;
70541+ insert_subj_map_entry(subjmap);
70542+
70543+ if (copy_acl_subject_label(s_tmp, userp))
70544+ return ERR_PTR(-EFAULT);
70545+
70546+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
70547+ if (err)
70548+ return ERR_PTR(err);
70549+
70550+ if (!strcmp(s_tmp->filename, "/"))
70551+ role->root_label = s_tmp;
70552+
70553+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
70554+ return ERR_PTR(-EFAULT);
70555+
70556+ /* copy user and group transition tables */
70557+
70558+ if (s_tmp->user_trans_num) {
70559+ uid_t *uidlist;
70560+
70561+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
70562+ if (uidlist == NULL)
70563+ return ERR_PTR(-ENOMEM);
70564+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
70565+ return ERR_PTR(-EFAULT);
70566+
70567+ s_tmp->user_transitions = uidlist;
70568+ }
70569+
70570+ if (s_tmp->group_trans_num) {
70571+ gid_t *gidlist;
70572+
70573+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
70574+ if (gidlist == NULL)
70575+ return ERR_PTR(-ENOMEM);
70576+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
70577+ return ERR_PTR(-EFAULT);
70578+
70579+ s_tmp->group_transitions = gidlist;
70580+ }
70581+
70582+ /* set up object hash table */
70583+ num_objs = count_user_objs(ghash.first);
70584+
70585+ s_tmp->obj_hash_size = num_objs;
70586+ s_tmp->obj_hash =
70587+ (struct acl_object_label **)
70588+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
70589+
70590+ if (!s_tmp->obj_hash)
70591+ return ERR_PTR(-ENOMEM);
70592+
70593+ memset(s_tmp->obj_hash, 0,
70594+ s_tmp->obj_hash_size *
70595+ sizeof (struct acl_object_label *));
70596+
70597+ /* add in objects */
70598+ err = copy_user_objs(ghash.first, s_tmp, role);
70599+
70600+ if (err)
70601+ return ERR_PTR(err);
70602+
70603+ /* set pointer for parent subject */
70604+ if (s_tmp->parent_subject) {
70605+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
70606+
70607+ if (IS_ERR(s_tmp2))
70608+ return s_tmp2;
70609+
70610+ s_tmp->parent_subject = s_tmp2;
70611+ }
70612+
70613+ /* add in ip acls */
70614+
70615+ if (!s_tmp->ip_num) {
70616+ s_tmp->ips = NULL;
70617+ goto insert;
70618+ }
70619+
70620+ i_tmp =
70621+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
70622+ sizeof (struct acl_ip_label *));
70623+
70624+ if (!i_tmp)
70625+ return ERR_PTR(-ENOMEM);
70626+
70627+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
70628+ *(i_tmp + i_num) =
70629+ (struct acl_ip_label *)
70630+ acl_alloc(sizeof (struct acl_ip_label));
70631+ if (!*(i_tmp + i_num))
70632+ return ERR_PTR(-ENOMEM);
70633+
70634+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
70635+ return ERR_PTR(-EFAULT);
70636+
70637+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
70638+ return ERR_PTR(-EFAULT);
70639+
70640+ if ((*(i_tmp + i_num))->iface == NULL)
70641+ continue;
70642+
70643+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
70644+ if (err)
70645+ return ERR_PTR(err);
70646+ }
70647+
70648+ s_tmp->ips = i_tmp;
70649+
70650+insert:
70651+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
70652+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
70653+ return ERR_PTR(-ENOMEM);
70654+
70655+ return s_tmp;
70656+}
70657+
70658+static int
70659+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
70660+{
70661+ struct acl_subject_label s_pre;
70662+ struct acl_subject_label * ret;
70663+ int err;
70664+
70665+ while (userp) {
70666+ if (copy_acl_subject_label(&s_pre, userp))
70667+ return -EFAULT;
70668+
70669+ ret = do_copy_user_subj(userp, role, NULL);
70670+
70671+ err = PTR_ERR(ret);
70672+ if (IS_ERR(ret))
70673+ return err;
70674+
70675+ insert_acl_subj_label(ret, role);
70676+
70677+ userp = s_pre.prev;
70678+ }
70679+
70680+ return 0;
70681+}
70682+
70683+static int
70684+copy_user_acl(struct gr_arg *arg)
70685+{
70686+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
70687+ struct acl_subject_label *subj_list;
70688+ struct sprole_pw *sptmp;
70689+ struct gr_hash_struct *ghash;
70690+ uid_t *domainlist;
70691+ unsigned int r_num;
70692+ int err = 0;
70693+ __u16 i;
70694+ __u32 num_subjs;
70695+
70696+ /* we need a default and kernel role */
70697+ if (arg->role_db.num_roles < 2)
70698+ return -EINVAL;
70699+
70700+ /* copy special role authentication info from userspace */
70701+
70702+ polstate->num_sprole_pws = arg->num_sprole_pws;
70703+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
70704+
70705+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
70706+ return -ENOMEM;
70707+
70708+ for (i = 0; i < polstate->num_sprole_pws; i++) {
70709+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
70710+ if (!sptmp)
70711+ return -ENOMEM;
70712+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
70713+ return -EFAULT;
70714+
70715+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
70716+ if (err)
70717+ return err;
70718+
70719+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70720+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
70721+#endif
70722+
70723+ polstate->acl_special_roles[i] = sptmp;
70724+ }
70725+
70726+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
70727+
70728+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
70729+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
70730+
70731+ if (!r_tmp)
70732+ return -ENOMEM;
70733+
70734+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
70735+ return -EFAULT;
70736+
70737+ if (copy_acl_role_label(r_tmp, r_utmp2))
70738+ return -EFAULT;
70739+
70740+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
70741+ if (err)
70742+ return err;
70743+
70744+ if (!strcmp(r_tmp->rolename, "default")
70745+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
70746+ polstate->default_role = r_tmp;
70747+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
70748+ polstate->kernel_role = r_tmp;
70749+ }
70750+
70751+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
70752+ return -ENOMEM;
70753+
70754+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
70755+ return -EFAULT;
70756+
70757+ r_tmp->hash = ghash;
70758+
70759+ num_subjs = count_user_subjs(r_tmp->hash->first);
70760+
70761+ r_tmp->subj_hash_size = num_subjs;
70762+ r_tmp->subj_hash =
70763+ (struct acl_subject_label **)
70764+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
70765+
70766+ if (!r_tmp->subj_hash)
70767+ return -ENOMEM;
70768+
70769+ err = copy_user_allowedips(r_tmp);
70770+ if (err)
70771+ return err;
70772+
70773+ /* copy domain info */
70774+ if (r_tmp->domain_children != NULL) {
70775+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
70776+ if (domainlist == NULL)
70777+ return -ENOMEM;
70778+
70779+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
70780+ return -EFAULT;
70781+
70782+ r_tmp->domain_children = domainlist;
70783+ }
70784+
70785+ err = copy_user_transitions(r_tmp);
70786+ if (err)
70787+ return err;
70788+
70789+ memset(r_tmp->subj_hash, 0,
70790+ r_tmp->subj_hash_size *
70791+ sizeof (struct acl_subject_label *));
70792+
70793+ /* acquire the list of subjects, then NULL out
70794+ the list prior to parsing the subjects for this role,
70795+ as during this parsing the list is replaced with a list
70796+ of *nested* subjects for the role
70797+ */
70798+ subj_list = r_tmp->hash->first;
70799+
70800+ /* set nested subject list to null */
70801+ r_tmp->hash->first = NULL;
70802+
70803+ err = copy_user_subjs(subj_list, r_tmp);
70804+
70805+ if (err)
70806+ return err;
70807+
70808+ insert_acl_role_label(r_tmp);
70809+ }
70810+
70811+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
70812+ return -EINVAL;
70813+
70814+ return err;
70815+}
70816+
70817+static int gracl_reload_apply_policies(void *reload)
70818+{
70819+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
70820+ struct task_struct *task, *task2;
70821+ struct acl_role_label *role, *rtmp;
70822+ struct acl_subject_label *subj;
70823+ const struct cred *cred;
70824+ int role_applied;
70825+ int ret = 0;
70826+
70827+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
70828+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
70829+
70830+ /* first make sure we'll be able to apply the new policy cleanly */
70831+ do_each_thread(task2, task) {
70832+ if (task->exec_file == NULL)
70833+ continue;
70834+ role_applied = 0;
70835+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
70836+ /* preserve special roles */
70837+ FOR_EACH_ROLE_START(role)
70838+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
70839+ rtmp = task->role;
70840+ task->role = role;
70841+ role_applied = 1;
70842+ break;
70843+ }
70844+ FOR_EACH_ROLE_END(role)
70845+ }
70846+ if (!role_applied) {
70847+ cred = __task_cred(task);
70848+ rtmp = task->role;
70849+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
70850+ }
70851+ /* this handles non-nested inherited subjects, nested subjects will still
70852+ be dropped currently */
70853+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
70854+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
70855+ /* change the role back so that we've made no modifications to the policy */
70856+ task->role = rtmp;
70857+
70858+ if (subj == NULL || task->tmpacl == NULL) {
70859+ ret = -EINVAL;
70860+ goto out;
70861+ }
70862+ } while_each_thread(task2, task);
70863+
70864+ /* now actually apply the policy */
70865+
70866+ do_each_thread(task2, task) {
70867+ if (task->exec_file) {
70868+ role_applied = 0;
70869+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
70870+ /* preserve special roles */
70871+ FOR_EACH_ROLE_START(role)
70872+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
70873+ task->role = role;
70874+ role_applied = 1;
70875+ break;
70876+ }
70877+ FOR_EACH_ROLE_END(role)
70878+ }
70879+ if (!role_applied) {
70880+ cred = __task_cred(task);
70881+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
70882+ }
70883+ /* this handles non-nested inherited subjects, nested subjects will still
70884+ be dropped currently */
70885+ if (!reload_state->oldmode && task->inherited)
70886+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
70887+ else {
70888+ /* looked up and tagged to the task previously */
70889+ subj = task->tmpacl;
70890+ }
70891+ /* subj will be non-null */
70892+ __gr_apply_subject_to_task(polstate, task, subj);
70893+ if (reload_state->oldmode) {
70894+ task->acl_role_id = 0;
70895+ task->acl_sp_role = 0;
70896+ task->inherited = 0;
70897+ }
70898+ } else {
70899+ // it's a kernel process
70900+ task->role = polstate->kernel_role;
70901+ task->acl = polstate->kernel_role->root_label;
70902+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
70903+ task->acl->mode &= ~GR_PROCFIND;
70904+#endif
70905+ }
70906+ } while_each_thread(task2, task);
70907+
70908+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
70909+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
70910+
70911+out:
70912+
70913+ return ret;
70914+}
70915+
70916+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
70917+{
70918+ struct gr_reload_state new_reload_state = { };
70919+ int err;
70920+
70921+ new_reload_state.oldpolicy_ptr = polstate;
70922+ new_reload_state.oldalloc_ptr = current_alloc_state;
70923+ new_reload_state.oldmode = oldmode;
70924+
70925+ current_alloc_state = &new_reload_state.newalloc;
70926+ polstate = &new_reload_state.newpolicy;
70927+
70928+ /* everything relevant is now saved off, copy in the new policy */
70929+ if (init_variables(args, true)) {
70930+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
70931+ err = -ENOMEM;
70932+ goto error;
70933+ }
70934+
70935+ err = copy_user_acl(args);
70936+ free_init_variables();
70937+ if (err)
70938+ goto error;
70939+ /* the new policy is copied in, with the old policy available via saved_state
70940+ first go through applying roles, making sure to preserve special roles
70941+ then apply new subjects, making sure to preserve inherited and nested subjects,
70942+ though currently only inherited subjects will be preserved
70943+ */
70944+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
70945+ if (err)
70946+ goto error;
70947+
70948+ /* we've now applied the new policy, so restore the old policy state to free it */
70949+ polstate = &new_reload_state.oldpolicy;
70950+ current_alloc_state = &new_reload_state.oldalloc;
70951+ free_variables(true);
70952+
70953+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
70954+ to running_polstate/current_alloc_state inside stop_machine
70955+ */
70956+ err = 0;
70957+ goto out;
70958+error:
70959+ /* on error of loading the new policy, we'll just keep the previous
70960+ policy set around
70961+ */
70962+ free_variables(true);
70963+
70964+ /* doesn't affect runtime, but maintains consistent state */
70965+out:
70966+ polstate = new_reload_state.oldpolicy_ptr;
70967+ current_alloc_state = new_reload_state.oldalloc_ptr;
70968+
70969+ return err;
70970+}
70971+
70972+static int
70973+gracl_init(struct gr_arg *args)
70974+{
70975+ int error = 0;
70976+
70977+ memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
70978+ memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
70979+
70980+ if (init_variables(args, false)) {
70981+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
70982+ error = -ENOMEM;
70983+ goto out;
70984+ }
70985+
70986+ error = copy_user_acl(args);
70987+ free_init_variables();
70988+ if (error)
70989+ goto out;
70990+
70991+ error = gr_set_acls(0);
70992+ if (error)
70993+ goto out;
70994+
70995+ gr_enable_rbac_system();
70996+
70997+ return 0;
70998+
70999+out:
71000+ free_variables(false);
71001+ return error;
71002+}
71003+
71004+static int
71005+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
71006+ unsigned char **sum)
71007+{
71008+ struct acl_role_label *r;
71009+ struct role_allowed_ip *ipp;
71010+ struct role_transition *trans;
71011+ unsigned int i;
71012+ int found = 0;
71013+ u32 curr_ip = current->signal->curr_ip;
71014+
71015+ current->signal->saved_ip = curr_ip;
71016+
71017+ /* check transition table */
71018+
71019+ for (trans = current->role->transitions; trans; trans = trans->next) {
71020+ if (!strcmp(rolename, trans->rolename)) {
71021+ found = 1;
71022+ break;
71023+ }
71024+ }
71025+
71026+ if (!found)
71027+ return 0;
71028+
71029+ /* handle special roles that do not require authentication
71030+ and check ip */
71031+
71032+ FOR_EACH_ROLE_START(r)
71033+ if (!strcmp(rolename, r->rolename) &&
71034+ (r->roletype & GR_ROLE_SPECIAL)) {
71035+ found = 0;
71036+ if (r->allowed_ips != NULL) {
71037+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
71038+ if ((ntohl(curr_ip) & ipp->netmask) ==
71039+ (ntohl(ipp->addr) & ipp->netmask))
71040+ found = 1;
71041+ }
71042+ } else
71043+ found = 2;
71044+ if (!found)
71045+ return 0;
71046+
71047+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
71048+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
71049+ *salt = NULL;
71050+ *sum = NULL;
71051+ return 1;
71052+ }
71053+ }
71054+ FOR_EACH_ROLE_END(r)
71055+
71056+ for (i = 0; i < polstate->num_sprole_pws; i++) {
71057+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
71058+ *salt = polstate->acl_special_roles[i]->salt;
71059+ *sum = polstate->acl_special_roles[i]->sum;
71060+ return 1;
71061+ }
71062+ }
71063+
71064+ return 0;
71065+}
71066+
71067+int gr_check_secure_terminal(struct task_struct *task)
71068+{
71069+ struct task_struct *p, *p2, *p3;
71070+ struct files_struct *files;
71071+ struct fdtable *fdt;
71072+ struct file *our_file = NULL, *file;
71073+ int i;
71074+
71075+ if (task->signal->tty == NULL)
71076+ return 1;
71077+
71078+ files = get_files_struct(task);
71079+ if (files != NULL) {
71080+ rcu_read_lock();
71081+ fdt = files_fdtable(files);
71082+ for (i=0; i < fdt->max_fds; i++) {
71083+ file = fcheck_files(files, i);
71084+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
71085+ get_file(file);
71086+ our_file = file;
71087+ }
71088+ }
71089+ rcu_read_unlock();
71090+ put_files_struct(files);
71091+ }
71092+
71093+ if (our_file == NULL)
71094+ return 1;
71095+
71096+ read_lock(&tasklist_lock);
71097+ do_each_thread(p2, p) {
71098+ files = get_files_struct(p);
71099+ if (files == NULL ||
71100+ (p->signal && p->signal->tty == task->signal->tty)) {
71101+ if (files != NULL)
71102+ put_files_struct(files);
71103+ continue;
71104+ }
71105+ rcu_read_lock();
71106+ fdt = files_fdtable(files);
71107+ for (i=0; i < fdt->max_fds; i++) {
71108+ file = fcheck_files(files, i);
71109+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
71110+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
71111+ p3 = task;
71112+ while (task_pid_nr(p3) > 0) {
71113+ if (p3 == p)
71114+ break;
71115+ p3 = p3->real_parent;
71116+ }
71117+ if (p3 == p)
71118+ break;
71119+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
71120+ gr_handle_alertkill(p);
71121+ rcu_read_unlock();
71122+ put_files_struct(files);
71123+ read_unlock(&tasklist_lock);
71124+ fput(our_file);
71125+ return 0;
71126+ }
71127+ }
71128+ rcu_read_unlock();
71129+ put_files_struct(files);
71130+ } while_each_thread(p2, p);
71131+ read_unlock(&tasklist_lock);
71132+
71133+ fput(our_file);
71134+ return 1;
71135+}
71136+
71137+ssize_t
71138+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
71139+{
71140+ struct gr_arg_wrapper uwrap;
71141+ unsigned char *sprole_salt = NULL;
71142+ unsigned char *sprole_sum = NULL;
71143+ int error = 0;
71144+ int error2 = 0;
71145+ size_t req_count = 0;
71146+ unsigned char oldmode = 0;
71147+
71148+ mutex_lock(&gr_dev_mutex);
71149+
71150+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
71151+ error = -EPERM;
71152+ goto out;
71153+ }
71154+
71155+#ifdef CONFIG_COMPAT
71156+ pax_open_kernel();
71157+ if (is_compat_task()) {
71158+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
71159+ copy_gr_arg = &copy_gr_arg_compat;
71160+ copy_acl_object_label = &copy_acl_object_label_compat;
71161+ copy_acl_subject_label = &copy_acl_subject_label_compat;
71162+ copy_acl_role_label = &copy_acl_role_label_compat;
71163+ copy_acl_ip_label = &copy_acl_ip_label_compat;
71164+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
71165+ copy_role_transition = &copy_role_transition_compat;
71166+ copy_sprole_pw = &copy_sprole_pw_compat;
71167+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
71168+ copy_pointer_from_array = &copy_pointer_from_array_compat;
71169+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
71170+ } else {
71171+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
71172+ copy_gr_arg = &copy_gr_arg_normal;
71173+ copy_acl_object_label = &copy_acl_object_label_normal;
71174+ copy_acl_subject_label = &copy_acl_subject_label_normal;
71175+ copy_acl_role_label = &copy_acl_role_label_normal;
71176+ copy_acl_ip_label = &copy_acl_ip_label_normal;
71177+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
71178+ copy_role_transition = &copy_role_transition_normal;
71179+ copy_sprole_pw = &copy_sprole_pw_normal;
71180+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
71181+ copy_pointer_from_array = &copy_pointer_from_array_normal;
71182+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
71183+ }
71184+ pax_close_kernel();
71185+#endif
71186+
71187+ req_count = get_gr_arg_wrapper_size();
71188+
71189+ if (count != req_count) {
71190+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
71191+ error = -EINVAL;
71192+ goto out;
71193+ }
71194+
71195+
71196+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
71197+ gr_auth_expires = 0;
71198+ gr_auth_attempts = 0;
71199+ }
71200+
71201+ error = copy_gr_arg_wrapper(buf, &uwrap);
71202+ if (error)
71203+ goto out;
71204+
71205+ error = copy_gr_arg(uwrap.arg, &gr_usermode);
71206+ if (error)
71207+ goto out;
71208+
71209+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
71210+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
71211+ time_after(gr_auth_expires, get_seconds())) {
71212+ error = -EBUSY;
71213+ goto out;
71214+ }
71215+
71216+ /* if non-root trying to do anything other than use a special role,
71217+ do not attempt authentication, do not count towards authentication
71218+ locking
71219+ */
71220+
71221+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
71222+ gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
71223+ gr_is_global_nonroot(current_uid())) {
71224+ error = -EPERM;
71225+ goto out;
71226+ }
71227+
71228+ /* ensure pw and special role name are null terminated */
71229+
71230+ gr_usermode.pw[GR_PW_LEN - 1] = '\0';
71231+ gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
71232+
71233+ /* Okay.
71234+ * We have our enough of the argument structure..(we have yet
71235+ * to copy_from_user the tables themselves) . Copy the tables
71236+ * only if we need them, i.e. for loading operations. */
71237+
71238+ switch (gr_usermode.mode) {
71239+ case GR_STATUS:
71240+ if (gr_acl_is_enabled()) {
71241+ error = 1;
71242+ if (!gr_check_secure_terminal(current))
71243+ error = 3;
71244+ } else
71245+ error = 2;
71246+ goto out;
71247+ case GR_SHUTDOWN:
71248+ if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71249+ stop_machine(gr_rbac_disable, NULL, NULL);
71250+ free_variables(false);
71251+ memset(&gr_usermode, 0, sizeof(gr_usermode));
71252+ memset(&gr_system_salt, 0, sizeof(gr_system_salt));
71253+ memset(&gr_system_sum, 0, sizeof(gr_system_sum));
71254+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
71255+ } else if (gr_acl_is_enabled()) {
71256+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
71257+ error = -EPERM;
71258+ } else {
71259+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
71260+ error = -EAGAIN;
71261+ }
71262+ break;
71263+ case GR_ENABLE:
71264+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
71265+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
71266+ else {
71267+ if (gr_acl_is_enabled())
71268+ error = -EAGAIN;
71269+ else
71270+ error = error2;
71271+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
71272+ }
71273+ break;
71274+ case GR_OLDRELOAD:
71275+ oldmode = 1;
71276+ case GR_RELOAD:
71277+ if (!gr_acl_is_enabled()) {
71278+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
71279+ error = -EAGAIN;
71280+ } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71281+ error2 = gracl_reload(&gr_usermode, oldmode);
71282+ if (!error2)
71283+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
71284+ else {
71285+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
71286+ error = error2;
71287+ }
71288+ } else {
71289+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
71290+ error = -EPERM;
71291+ }
71292+ break;
71293+ case GR_SEGVMOD:
71294+ if (unlikely(!gr_acl_is_enabled())) {
71295+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
71296+ error = -EAGAIN;
71297+ break;
71298+ }
71299+
71300+ if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71301+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
71302+ if (gr_usermode.segv_device && gr_usermode.segv_inode) {
71303+ struct acl_subject_label *segvacl;
71304+ segvacl =
71305+ lookup_acl_subj_label(gr_usermode.segv_inode,
71306+ gr_usermode.segv_device,
71307+ current->role);
71308+ if (segvacl) {
71309+ segvacl->crashes = 0;
71310+ segvacl->expires = 0;
71311+ }
71312+ } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
71313+ gr_remove_uid(gr_usermode.segv_uid);
71314+ }
71315+ } else {
71316+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
71317+ error = -EPERM;
71318+ }
71319+ break;
71320+ case GR_SPROLE:
71321+ case GR_SPROLEPAM:
71322+ if (unlikely(!gr_acl_is_enabled())) {
71323+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
71324+ error = -EAGAIN;
71325+ break;
71326+ }
71327+
71328+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
71329+ current->role->expires = 0;
71330+ current->role->auth_attempts = 0;
71331+ }
71332+
71333+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
71334+ time_after(current->role->expires, get_seconds())) {
71335+ error = -EBUSY;
71336+ goto out;
71337+ }
71338+
71339+ if (lookup_special_role_auth
71340+ (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
71341+ && ((!sprole_salt && !sprole_sum)
71342+ || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
71343+ char *p = "";
71344+ assign_special_role(gr_usermode.sp_role);
71345+ read_lock(&tasklist_lock);
71346+ if (current->real_parent)
71347+ p = current->real_parent->role->rolename;
71348+ read_unlock(&tasklist_lock);
71349+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
71350+ p, acl_sp_role_value);
71351+ } else {
71352+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
71353+ error = -EPERM;
71354+ if(!(current->role->auth_attempts++))
71355+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
71356+
71357+ goto out;
71358+ }
71359+ break;
71360+ case GR_UNSPROLE:
71361+ if (unlikely(!gr_acl_is_enabled())) {
71362+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
71363+ error = -EAGAIN;
71364+ break;
71365+ }
71366+
71367+ if (current->role->roletype & GR_ROLE_SPECIAL) {
71368+ char *p = "";
71369+ int i = 0;
71370+
71371+ read_lock(&tasklist_lock);
71372+ if (current->real_parent) {
71373+ p = current->real_parent->role->rolename;
71374+ i = current->real_parent->acl_role_id;
71375+ }
71376+ read_unlock(&tasklist_lock);
71377+
71378+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
71379+ gr_set_acls(1);
71380+ } else {
71381+ error = -EPERM;
71382+ goto out;
71383+ }
71384+ break;
71385+ default:
71386+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
71387+ error = -EINVAL;
71388+ break;
71389+ }
71390+
71391+ if (error != -EPERM)
71392+ goto out;
71393+
71394+ if(!(gr_auth_attempts++))
71395+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
71396+
71397+ out:
71398+ mutex_unlock(&gr_dev_mutex);
71399+
71400+ if (!error)
71401+ error = req_count;
71402+
71403+ return error;
71404+}
71405+
71406+int
71407+gr_set_acls(const int type)
71408+{
71409+ struct task_struct *task, *task2;
71410+ struct acl_role_label *role = current->role;
71411+ struct acl_subject_label *subj;
71412+ __u16 acl_role_id = current->acl_role_id;
71413+ const struct cred *cred;
71414+ int ret;
71415+
71416+ rcu_read_lock();
71417+ read_lock(&tasklist_lock);
71418+ read_lock(&grsec_exec_file_lock);
71419+ do_each_thread(task2, task) {
71420+ /* check to see if we're called from the exit handler,
71421+ if so, only replace ACLs that have inherited the admin
71422+ ACL */
71423+
71424+ if (type && (task->role != role ||
71425+ task->acl_role_id != acl_role_id))
71426+ continue;
71427+
71428+ task->acl_role_id = 0;
71429+ task->acl_sp_role = 0;
71430+ task->inherited = 0;
71431+
71432+ if (task->exec_file) {
71433+ cred = __task_cred(task);
71434+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
71435+ subj = __gr_get_subject_for_task(polstate, task, NULL);
71436+ if (subj == NULL) {
71437+ ret = -EINVAL;
71438+ read_unlock(&grsec_exec_file_lock);
71439+ read_unlock(&tasklist_lock);
71440+ rcu_read_unlock();
71441+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
71442+ return ret;
71443+ }
71444+ __gr_apply_subject_to_task(polstate, task, subj);
71445+ } else {
71446+ // it's a kernel process
71447+ task->role = polstate->kernel_role;
71448+ task->acl = polstate->kernel_role->root_label;
71449+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
71450+ task->acl->mode &= ~GR_PROCFIND;
71451+#endif
71452+ }
71453+ } while_each_thread(task2, task);
71454+ read_unlock(&grsec_exec_file_lock);
71455+ read_unlock(&tasklist_lock);
71456+ rcu_read_unlock();
71457+
71458+ return 0;
71459+}
71460diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
71461new file mode 100644
71462index 0000000..39645c9
71463--- /dev/null
71464+++ b/grsecurity/gracl_res.c
71465@@ -0,0 +1,68 @@
71466+#include <linux/kernel.h>
71467+#include <linux/sched.h>
71468+#include <linux/gracl.h>
71469+#include <linux/grinternal.h>
71470+
71471+static const char *restab_log[] = {
71472+ [RLIMIT_CPU] = "RLIMIT_CPU",
71473+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
71474+ [RLIMIT_DATA] = "RLIMIT_DATA",
71475+ [RLIMIT_STACK] = "RLIMIT_STACK",
71476+ [RLIMIT_CORE] = "RLIMIT_CORE",
71477+ [RLIMIT_RSS] = "RLIMIT_RSS",
71478+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
71479+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
71480+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
71481+ [RLIMIT_AS] = "RLIMIT_AS",
71482+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
71483+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
71484+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
71485+ [RLIMIT_NICE] = "RLIMIT_NICE",
71486+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
71487+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
71488+ [GR_CRASH_RES] = "RLIMIT_CRASH"
71489+};
71490+
71491+void
71492+gr_log_resource(const struct task_struct *task,
71493+ const int res, const unsigned long wanted, const int gt)
71494+{
71495+ const struct cred *cred;
71496+ unsigned long rlim;
71497+
71498+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
71499+ return;
71500+
71501+ // not yet supported resource
71502+ if (unlikely(!restab_log[res]))
71503+ return;
71504+
71505+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
71506+ rlim = task_rlimit_max(task, res);
71507+ else
71508+ rlim = task_rlimit(task, res);
71509+
71510+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
71511+ return;
71512+
71513+ rcu_read_lock();
71514+ cred = __task_cred(task);
71515+
71516+ if (res == RLIMIT_NPROC &&
71517+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
71518+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
71519+ goto out_rcu_unlock;
71520+ else if (res == RLIMIT_MEMLOCK &&
71521+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
71522+ goto out_rcu_unlock;
71523+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
71524+ goto out_rcu_unlock;
71525+ rcu_read_unlock();
71526+
71527+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
71528+
71529+ return;
71530+out_rcu_unlock:
71531+ rcu_read_unlock();
71532+ return;
71533+}
71534diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
71535new file mode 100644
71536index 0000000..2040e61
71537--- /dev/null
71538+++ b/grsecurity/gracl_segv.c
71539@@ -0,0 +1,313 @@
71540+#include <linux/kernel.h>
71541+#include <linux/mm.h>
71542+#include <asm/uaccess.h>
71543+#include <asm/errno.h>
71544+#include <asm/mman.h>
71545+#include <net/sock.h>
71546+#include <linux/file.h>
71547+#include <linux/fs.h>
71548+#include <linux/net.h>
71549+#include <linux/in.h>
71550+#include <linux/slab.h>
71551+#include <linux/types.h>
71552+#include <linux/sched.h>
71553+#include <linux/timer.h>
71554+#include <linux/gracl.h>
71555+#include <linux/grsecurity.h>
71556+#include <linux/grinternal.h>
71557+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
71558+#include <linux/magic.h>
71559+#include <linux/pagemap.h>
71560+#include "../fs/btrfs/async-thread.h"
71561+#include "../fs/btrfs/ctree.h"
71562+#include "../fs/btrfs/btrfs_inode.h"
71563+#endif
71564+
71565+static struct crash_uid *uid_set;
71566+static unsigned short uid_used;
71567+static DEFINE_SPINLOCK(gr_uid_lock);
71568+extern rwlock_t gr_inode_lock;
71569+extern struct acl_subject_label *
71570+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
71571+ struct acl_role_label *role);
71572+
71573+static inline dev_t __get_dev(const struct dentry *dentry)
71574+{
71575+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
71576+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
71577+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
71578+ else
71579+#endif
71580+ return dentry->d_sb->s_dev;
71581+}
71582+
71583+int
71584+gr_init_uidset(void)
71585+{
71586+ uid_set =
71587+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
71588+ uid_used = 0;
71589+
71590+ return uid_set ? 1 : 0;
71591+}
71592+
71593+void
71594+gr_free_uidset(void)
71595+{
71596+ if (uid_set) {
71597+ struct crash_uid *tmpset;
71598+ spin_lock(&gr_uid_lock);
71599+ tmpset = uid_set;
71600+ uid_set = NULL;
71601+ uid_used = 0;
71602+ spin_unlock(&gr_uid_lock);
71603+ if (tmpset)
71604+ kfree(tmpset);
71605+ }
71606+
71607+ return;
71608+}
71609+
71610+int
71611+gr_find_uid(const uid_t uid)
71612+{
71613+ struct crash_uid *tmp = uid_set;
71614+ uid_t buid;
71615+ int low = 0, high = uid_used - 1, mid;
71616+
71617+ while (high >= low) {
71618+ mid = (low + high) >> 1;
71619+ buid = tmp[mid].uid;
71620+ if (buid == uid)
71621+ return mid;
71622+ if (buid > uid)
71623+ high = mid - 1;
71624+ if (buid < uid)
71625+ low = mid + 1;
71626+ }
71627+
71628+ return -1;
71629+}
71630+
71631+static __inline__ void
71632+gr_insertsort(void)
71633+{
71634+ unsigned short i, j;
71635+ struct crash_uid index;
71636+
71637+ for (i = 1; i < uid_used; i++) {
71638+ index = uid_set[i];
71639+ j = i;
71640+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
71641+ uid_set[j] = uid_set[j - 1];
71642+ j--;
71643+ }
71644+ uid_set[j] = index;
71645+ }
71646+
71647+ return;
71648+}
71649+
71650+static __inline__ void
71651+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
71652+{
71653+ int loc;
71654+ uid_t uid = GR_GLOBAL_UID(kuid);
71655+
71656+ if (uid_used == GR_UIDTABLE_MAX)
71657+ return;
71658+
71659+ loc = gr_find_uid(uid);
71660+
71661+ if (loc >= 0) {
71662+ uid_set[loc].expires = expires;
71663+ return;
71664+ }
71665+
71666+ uid_set[uid_used].uid = uid;
71667+ uid_set[uid_used].expires = expires;
71668+ uid_used++;
71669+
71670+ gr_insertsort();
71671+
71672+ return;
71673+}
71674+
71675+void
71676+gr_remove_uid(const unsigned short loc)
71677+{
71678+ unsigned short i;
71679+
71680+ for (i = loc + 1; i < uid_used; i++)
71681+ uid_set[i - 1] = uid_set[i];
71682+
71683+ uid_used--;
71684+
71685+ return;
71686+}
71687+
71688+int
71689+gr_check_crash_uid(const kuid_t kuid)
71690+{
71691+ int loc;
71692+ int ret = 0;
71693+ uid_t uid;
71694+
71695+ if (unlikely(!gr_acl_is_enabled()))
71696+ return 0;
71697+
71698+ uid = GR_GLOBAL_UID(kuid);
71699+
71700+ spin_lock(&gr_uid_lock);
71701+ loc = gr_find_uid(uid);
71702+
71703+ if (loc < 0)
71704+ goto out_unlock;
71705+
71706+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
71707+ gr_remove_uid(loc);
71708+ else
71709+ ret = 1;
71710+
71711+out_unlock:
71712+ spin_unlock(&gr_uid_lock);
71713+ return ret;
71714+}
71715+
71716+static __inline__ int
71717+proc_is_setxid(const struct cred *cred)
71718+{
71719+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
71720+ !uid_eq(cred->uid, cred->fsuid))
71721+ return 1;
71722+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
71723+ !gid_eq(cred->gid, cred->fsgid))
71724+ return 1;
71725+
71726+ return 0;
71727+}
71728+
71729+extern int gr_fake_force_sig(int sig, struct task_struct *t);
71730+
71731+void
71732+gr_handle_crash(struct task_struct *task, const int sig)
71733+{
71734+ struct acl_subject_label *curr;
71735+ struct task_struct *tsk, *tsk2;
71736+ const struct cred *cred;
71737+ const struct cred *cred2;
71738+
71739+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
71740+ return;
71741+
71742+ if (unlikely(!gr_acl_is_enabled()))
71743+ return;
71744+
71745+ curr = task->acl;
71746+
71747+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
71748+ return;
71749+
71750+ if (time_before_eq(curr->expires, get_seconds())) {
71751+ curr->expires = 0;
71752+ curr->crashes = 0;
71753+ }
71754+
71755+ curr->crashes++;
71756+
71757+ if (!curr->expires)
71758+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
71759+
71760+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
71761+ time_after(curr->expires, get_seconds())) {
71762+ rcu_read_lock();
71763+ cred = __task_cred(task);
71764+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
71765+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
71766+ spin_lock(&gr_uid_lock);
71767+ gr_insert_uid(cred->uid, curr->expires);
71768+ spin_unlock(&gr_uid_lock);
71769+ curr->expires = 0;
71770+ curr->crashes = 0;
71771+ read_lock(&tasklist_lock);
71772+ do_each_thread(tsk2, tsk) {
71773+ cred2 = __task_cred(tsk);
71774+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
71775+ gr_fake_force_sig(SIGKILL, tsk);
71776+ } while_each_thread(tsk2, tsk);
71777+ read_unlock(&tasklist_lock);
71778+ } else {
71779+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
71780+ read_lock(&tasklist_lock);
71781+ read_lock(&grsec_exec_file_lock);
71782+ do_each_thread(tsk2, tsk) {
71783+ if (likely(tsk != task)) {
71784+ // if this thread has the same subject as the one that triggered
71785+ // RES_CRASH and it's the same binary, kill it
71786+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
71787+ gr_fake_force_sig(SIGKILL, tsk);
71788+ }
71789+ } while_each_thread(tsk2, tsk);
71790+ read_unlock(&grsec_exec_file_lock);
71791+ read_unlock(&tasklist_lock);
71792+ }
71793+ rcu_read_unlock();
71794+ }
71795+
71796+ return;
71797+}
71798+
71799+int
71800+gr_check_crash_exec(const struct file *filp)
71801+{
71802+ struct acl_subject_label *curr;
71803+
71804+ if (unlikely(!gr_acl_is_enabled()))
71805+ return 0;
71806+
71807+ read_lock(&gr_inode_lock);
71808+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
71809+ __get_dev(filp->f_path.dentry),
71810+ current->role);
71811+ read_unlock(&gr_inode_lock);
71812+
71813+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
71814+ (!curr->crashes && !curr->expires))
71815+ return 0;
71816+
71817+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
71818+ time_after(curr->expires, get_seconds()))
71819+ return 1;
71820+ else if (time_before_eq(curr->expires, get_seconds())) {
71821+ curr->crashes = 0;
71822+ curr->expires = 0;
71823+ }
71824+
71825+ return 0;
71826+}
71827+
71828+void
71829+gr_handle_alertkill(struct task_struct *task)
71830+{
71831+ struct acl_subject_label *curracl;
71832+ __u32 curr_ip;
71833+ struct task_struct *p, *p2;
71834+
71835+ if (unlikely(!gr_acl_is_enabled()))
71836+ return;
71837+
71838+ curracl = task->acl;
71839+ curr_ip = task->signal->curr_ip;
71840+
71841+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
71842+ read_lock(&tasklist_lock);
71843+ do_each_thread(p2, p) {
71844+ if (p->signal->curr_ip == curr_ip)
71845+ gr_fake_force_sig(SIGKILL, p);
71846+ } while_each_thread(p2, p);
71847+ read_unlock(&tasklist_lock);
71848+ } else if (curracl->mode & GR_KILLPROC)
71849+ gr_fake_force_sig(SIGKILL, task);
71850+
71851+ return;
71852+}
71853diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
71854new file mode 100644
71855index 0000000..98011b0
71856--- /dev/null
71857+++ b/grsecurity/gracl_shm.c
71858@@ -0,0 +1,40 @@
71859+#include <linux/kernel.h>
71860+#include <linux/mm.h>
71861+#include <linux/sched.h>
71862+#include <linux/file.h>
71863+#include <linux/ipc.h>
71864+#include <linux/gracl.h>
71865+#include <linux/grsecurity.h>
71866+#include <linux/grinternal.h>
71867+
71868+int
71869+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71870+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
71871+{
71872+ struct task_struct *task;
71873+
71874+ if (!gr_acl_is_enabled())
71875+ return 1;
71876+
71877+ rcu_read_lock();
71878+ read_lock(&tasklist_lock);
71879+
71880+ task = find_task_by_vpid(shm_cprid);
71881+
71882+ if (unlikely(!task))
71883+ task = find_task_by_vpid(shm_lapid);
71884+
71885+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
71886+ (task_pid_nr(task) == shm_lapid)) &&
71887+ (task->acl->mode & GR_PROTSHM) &&
71888+ (task->acl != current->acl))) {
71889+ read_unlock(&tasklist_lock);
71890+ rcu_read_unlock();
71891+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
71892+ return 0;
71893+ }
71894+ read_unlock(&tasklist_lock);
71895+ rcu_read_unlock();
71896+
71897+ return 1;
71898+}
71899diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
71900new file mode 100644
71901index 0000000..bc0be01
71902--- /dev/null
71903+++ b/grsecurity/grsec_chdir.c
71904@@ -0,0 +1,19 @@
71905+#include <linux/kernel.h>
71906+#include <linux/sched.h>
71907+#include <linux/fs.h>
71908+#include <linux/file.h>
71909+#include <linux/grsecurity.h>
71910+#include <linux/grinternal.h>
71911+
71912+void
71913+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
71914+{
71915+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71916+ if ((grsec_enable_chdir && grsec_enable_group &&
71917+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
71918+ !grsec_enable_group)) {
71919+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
71920+ }
71921+#endif
71922+ return;
71923+}
71924diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
71925new file mode 100644
71926index 0000000..e10b319
71927--- /dev/null
71928+++ b/grsecurity/grsec_chroot.c
71929@@ -0,0 +1,370 @@
71930+#include <linux/kernel.h>
71931+#include <linux/module.h>
71932+#include <linux/sched.h>
71933+#include <linux/file.h>
71934+#include <linux/fs.h>
71935+#include <linux/mount.h>
71936+#include <linux/types.h>
71937+#include "../fs/mount.h"
71938+#include <linux/grsecurity.h>
71939+#include <linux/grinternal.h>
71940+
71941+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
71942+int gr_init_ran;
71943+#endif
71944+
71945+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
71946+{
71947+#ifdef CONFIG_GRKERNSEC
71948+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
71949+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
71950+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
71951+ && gr_init_ran
71952+#endif
71953+ )
71954+ task->gr_is_chrooted = 1;
71955+ else {
71956+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
71957+ if (task_pid_nr(task) == 1 && !gr_init_ran)
71958+ gr_init_ran = 1;
71959+#endif
71960+ task->gr_is_chrooted = 0;
71961+ }
71962+
71963+ task->gr_chroot_dentry = path->dentry;
71964+#endif
71965+ return;
71966+}
71967+
71968+void gr_clear_chroot_entries(struct task_struct *task)
71969+{
71970+#ifdef CONFIG_GRKERNSEC
71971+ task->gr_is_chrooted = 0;
71972+ task->gr_chroot_dentry = NULL;
71973+#endif
71974+ return;
71975+}
71976+
71977+int
71978+gr_handle_chroot_unix(const pid_t pid)
71979+{
71980+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
71981+ struct task_struct *p;
71982+
71983+ if (unlikely(!grsec_enable_chroot_unix))
71984+ return 1;
71985+
71986+ if (likely(!proc_is_chrooted(current)))
71987+ return 1;
71988+
71989+ rcu_read_lock();
71990+ read_lock(&tasklist_lock);
71991+ p = find_task_by_vpid_unrestricted(pid);
71992+ if (unlikely(p && !have_same_root(current, p))) {
71993+ read_unlock(&tasklist_lock);
71994+ rcu_read_unlock();
71995+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
71996+ return 0;
71997+ }
71998+ read_unlock(&tasklist_lock);
71999+ rcu_read_unlock();
72000+#endif
72001+ return 1;
72002+}
72003+
72004+int
72005+gr_handle_chroot_nice(void)
72006+{
72007+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72008+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
72009+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
72010+ return -EPERM;
72011+ }
72012+#endif
72013+ return 0;
72014+}
72015+
72016+int
72017+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
72018+{
72019+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72020+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
72021+ && proc_is_chrooted(current)) {
72022+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
72023+ return -EACCES;
72024+ }
72025+#endif
72026+ return 0;
72027+}
72028+
72029+int
72030+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
72031+{
72032+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72033+ struct task_struct *p;
72034+ int ret = 0;
72035+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
72036+ return ret;
72037+
72038+ read_lock(&tasklist_lock);
72039+ do_each_pid_task(pid, type, p) {
72040+ if (!have_same_root(current, p)) {
72041+ ret = 1;
72042+ goto out;
72043+ }
72044+ } while_each_pid_task(pid, type, p);
72045+out:
72046+ read_unlock(&tasklist_lock);
72047+ return ret;
72048+#endif
72049+ return 0;
72050+}
72051+
72052+int
72053+gr_pid_is_chrooted(struct task_struct *p)
72054+{
72055+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72056+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
72057+ return 0;
72058+
72059+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
72060+ !have_same_root(current, p)) {
72061+ return 1;
72062+ }
72063+#endif
72064+ return 0;
72065+}
72066+
72067+EXPORT_SYMBOL(gr_pid_is_chrooted);
72068+
72069+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
72070+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
72071+{
72072+ struct path path, currentroot;
72073+ int ret = 0;
72074+
72075+ path.dentry = (struct dentry *)u_dentry;
72076+ path.mnt = (struct vfsmount *)u_mnt;
72077+ get_fs_root(current->fs, &currentroot);
72078+ if (path_is_under(&path, &currentroot))
72079+ ret = 1;
72080+ path_put(&currentroot);
72081+
72082+ return ret;
72083+}
72084+#endif
72085+
72086+int
72087+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
72088+{
72089+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
72090+ if (!grsec_enable_chroot_fchdir)
72091+ return 1;
72092+
72093+ if (!proc_is_chrooted(current))
72094+ return 1;
72095+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
72096+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
72097+ return 0;
72098+ }
72099+#endif
72100+ return 1;
72101+}
72102+
72103+int
72104+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72105+ const time_t shm_createtime)
72106+{
72107+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
72108+ struct task_struct *p;
72109+ time_t starttime;
72110+
72111+ if (unlikely(!grsec_enable_chroot_shmat))
72112+ return 1;
72113+
72114+ if (likely(!proc_is_chrooted(current)))
72115+ return 1;
72116+
72117+ rcu_read_lock();
72118+ read_lock(&tasklist_lock);
72119+
72120+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
72121+ starttime = p->start_time.tv_sec;
72122+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
72123+ if (have_same_root(current, p)) {
72124+ goto allow;
72125+ } else {
72126+ read_unlock(&tasklist_lock);
72127+ rcu_read_unlock();
72128+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
72129+ return 0;
72130+ }
72131+ }
72132+ /* creator exited, pid reuse, fall through to next check */
72133+ }
72134+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
72135+ if (unlikely(!have_same_root(current, p))) {
72136+ read_unlock(&tasklist_lock);
72137+ rcu_read_unlock();
72138+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
72139+ return 0;
72140+ }
72141+ }
72142+
72143+allow:
72144+ read_unlock(&tasklist_lock);
72145+ rcu_read_unlock();
72146+#endif
72147+ return 1;
72148+}
72149+
72150+void
72151+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
72152+{
72153+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
72154+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
72155+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
72156+#endif
72157+ return;
72158+}
72159+
72160+int
72161+gr_handle_chroot_mknod(const struct dentry *dentry,
72162+ const struct vfsmount *mnt, const int mode)
72163+{
72164+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
72165+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
72166+ proc_is_chrooted(current)) {
72167+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
72168+ return -EPERM;
72169+ }
72170+#endif
72171+ return 0;
72172+}
72173+
72174+int
72175+gr_handle_chroot_mount(const struct dentry *dentry,
72176+ const struct vfsmount *mnt, const char *dev_name)
72177+{
72178+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
72179+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
72180+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
72181+ return -EPERM;
72182+ }
72183+#endif
72184+ return 0;
72185+}
72186+
72187+int
72188+gr_handle_chroot_pivot(void)
72189+{
72190+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
72191+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
72192+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
72193+ return -EPERM;
72194+ }
72195+#endif
72196+ return 0;
72197+}
72198+
72199+int
72200+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
72201+{
72202+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
72203+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
72204+ !gr_is_outside_chroot(dentry, mnt)) {
72205+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
72206+ return -EPERM;
72207+ }
72208+#endif
72209+ return 0;
72210+}
72211+
72212+extern const char *captab_log[];
72213+extern int captab_log_entries;
72214+
72215+int
72216+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72217+{
72218+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72219+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
72220+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
72221+ if (cap_raised(chroot_caps, cap)) {
72222+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
72223+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
72224+ }
72225+ return 0;
72226+ }
72227+ }
72228+#endif
72229+ return 1;
72230+}
72231+
72232+int
72233+gr_chroot_is_capable(const int cap)
72234+{
72235+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72236+ return gr_task_chroot_is_capable(current, current_cred(), cap);
72237+#endif
72238+ return 1;
72239+}
72240+
72241+int
72242+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
72243+{
72244+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72245+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
72246+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
72247+ if (cap_raised(chroot_caps, cap)) {
72248+ return 0;
72249+ }
72250+ }
72251+#endif
72252+ return 1;
72253+}
72254+
72255+int
72256+gr_chroot_is_capable_nolog(const int cap)
72257+{
72258+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72259+ return gr_task_chroot_is_capable_nolog(current, cap);
72260+#endif
72261+ return 1;
72262+}
72263+
72264+int
72265+gr_handle_chroot_sysctl(const int op)
72266+{
72267+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
72268+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
72269+ proc_is_chrooted(current))
72270+ return -EACCES;
72271+#endif
72272+ return 0;
72273+}
72274+
72275+void
72276+gr_handle_chroot_chdir(const struct path *path)
72277+{
72278+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
72279+ if (grsec_enable_chroot_chdir)
72280+ set_fs_pwd(current->fs, path);
72281+#endif
72282+ return;
72283+}
72284+
72285+int
72286+gr_handle_chroot_chmod(const struct dentry *dentry,
72287+ const struct vfsmount *mnt, const int mode)
72288+{
72289+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
72290+ /* allow chmod +s on directories, but not files */
72291+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
72292+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
72293+ proc_is_chrooted(current)) {
72294+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
72295+ return -EPERM;
72296+ }
72297+#endif
72298+ return 0;
72299+}
72300diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
72301new file mode 100644
72302index 0000000..52b3e30
72303--- /dev/null
72304+++ b/grsecurity/grsec_disabled.c
72305@@ -0,0 +1,433 @@
72306+#include <linux/kernel.h>
72307+#include <linux/module.h>
72308+#include <linux/sched.h>
72309+#include <linux/file.h>
72310+#include <linux/fs.h>
72311+#include <linux/kdev_t.h>
72312+#include <linux/net.h>
72313+#include <linux/in.h>
72314+#include <linux/ip.h>
72315+#include <linux/skbuff.h>
72316+#include <linux/sysctl.h>
72317+
72318+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
72319+void
72320+pax_set_initial_flags(struct linux_binprm *bprm)
72321+{
72322+ return;
72323+}
72324+#endif
72325+
72326+#ifdef CONFIG_SYSCTL
72327+__u32
72328+gr_handle_sysctl(const struct ctl_table * table, const int op)
72329+{
72330+ return 0;
72331+}
72332+#endif
72333+
72334+#ifdef CONFIG_TASKSTATS
72335+int gr_is_taskstats_denied(int pid)
72336+{
72337+ return 0;
72338+}
72339+#endif
72340+
72341+int
72342+gr_acl_is_enabled(void)
72343+{
72344+ return 0;
72345+}
72346+
72347+void
72348+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72349+{
72350+ return;
72351+}
72352+
72353+int
72354+gr_handle_rawio(const struct inode *inode)
72355+{
72356+ return 0;
72357+}
72358+
72359+void
72360+gr_acl_handle_psacct(struct task_struct *task, const long code)
72361+{
72362+ return;
72363+}
72364+
72365+int
72366+gr_handle_ptrace(struct task_struct *task, const long request)
72367+{
72368+ return 0;
72369+}
72370+
72371+int
72372+gr_handle_proc_ptrace(struct task_struct *task)
72373+{
72374+ return 0;
72375+}
72376+
72377+int
72378+gr_set_acls(const int type)
72379+{
72380+ return 0;
72381+}
72382+
72383+int
72384+gr_check_hidden_task(const struct task_struct *tsk)
72385+{
72386+ return 0;
72387+}
72388+
72389+int
72390+gr_check_protected_task(const struct task_struct *task)
72391+{
72392+ return 0;
72393+}
72394+
72395+int
72396+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72397+{
72398+ return 0;
72399+}
72400+
72401+void
72402+gr_copy_label(struct task_struct *tsk)
72403+{
72404+ return;
72405+}
72406+
72407+void
72408+gr_set_pax_flags(struct task_struct *task)
72409+{
72410+ return;
72411+}
72412+
72413+int
72414+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72415+ const int unsafe_share)
72416+{
72417+ return 0;
72418+}
72419+
72420+void
72421+gr_handle_delete(const ino_t ino, const dev_t dev)
72422+{
72423+ return;
72424+}
72425+
72426+void
72427+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72428+{
72429+ return;
72430+}
72431+
72432+void
72433+gr_handle_crash(struct task_struct *task, const int sig)
72434+{
72435+ return;
72436+}
72437+
72438+int
72439+gr_check_crash_exec(const struct file *filp)
72440+{
72441+ return 0;
72442+}
72443+
72444+int
72445+gr_check_crash_uid(const kuid_t uid)
72446+{
72447+ return 0;
72448+}
72449+
72450+void
72451+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72452+ struct dentry *old_dentry,
72453+ struct dentry *new_dentry,
72454+ struct vfsmount *mnt, const __u8 replace)
72455+{
72456+ return;
72457+}
72458+
72459+int
72460+gr_search_socket(const int family, const int type, const int protocol)
72461+{
72462+ return 1;
72463+}
72464+
72465+int
72466+gr_search_connectbind(const int mode, const struct socket *sock,
72467+ const struct sockaddr_in *addr)
72468+{
72469+ return 0;
72470+}
72471+
72472+void
72473+gr_handle_alertkill(struct task_struct *task)
72474+{
72475+ return;
72476+}
72477+
72478+__u32
72479+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
72480+{
72481+ return 1;
72482+}
72483+
72484+__u32
72485+gr_acl_handle_hidden_file(const struct dentry * dentry,
72486+ const struct vfsmount * mnt)
72487+{
72488+ return 1;
72489+}
72490+
72491+__u32
72492+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72493+ int acc_mode)
72494+{
72495+ return 1;
72496+}
72497+
72498+__u32
72499+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72500+{
72501+ return 1;
72502+}
72503+
72504+__u32
72505+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
72506+{
72507+ return 1;
72508+}
72509+
72510+int
72511+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
72512+ unsigned int *vm_flags)
72513+{
72514+ return 1;
72515+}
72516+
72517+__u32
72518+gr_acl_handle_truncate(const struct dentry * dentry,
72519+ const struct vfsmount * mnt)
72520+{
72521+ return 1;
72522+}
72523+
72524+__u32
72525+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
72526+{
72527+ return 1;
72528+}
72529+
72530+__u32
72531+gr_acl_handle_access(const struct dentry * dentry,
72532+ const struct vfsmount * mnt, const int fmode)
72533+{
72534+ return 1;
72535+}
72536+
72537+__u32
72538+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
72539+ umode_t *mode)
72540+{
72541+ return 1;
72542+}
72543+
72544+__u32
72545+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
72546+{
72547+ return 1;
72548+}
72549+
72550+__u32
72551+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
72552+{
72553+ return 1;
72554+}
72555+
72556+__u32
72557+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
72558+{
72559+ return 1;
72560+}
72561+
72562+void
72563+grsecurity_init(void)
72564+{
72565+ return;
72566+}
72567+
72568+umode_t gr_acl_umask(void)
72569+{
72570+ return 0;
72571+}
72572+
72573+__u32
72574+gr_acl_handle_mknod(const struct dentry * new_dentry,
72575+ const struct dentry * parent_dentry,
72576+ const struct vfsmount * parent_mnt,
72577+ const int mode)
72578+{
72579+ return 1;
72580+}
72581+
72582+__u32
72583+gr_acl_handle_mkdir(const struct dentry * new_dentry,
72584+ const struct dentry * parent_dentry,
72585+ const struct vfsmount * parent_mnt)
72586+{
72587+ return 1;
72588+}
72589+
72590+__u32
72591+gr_acl_handle_symlink(const struct dentry * new_dentry,
72592+ const struct dentry * parent_dentry,
72593+ const struct vfsmount * parent_mnt, const struct filename *from)
72594+{
72595+ return 1;
72596+}
72597+
72598+__u32
72599+gr_acl_handle_link(const struct dentry * new_dentry,
72600+ const struct dentry * parent_dentry,
72601+ const struct vfsmount * parent_mnt,
72602+ const struct dentry * old_dentry,
72603+ const struct vfsmount * old_mnt, const struct filename *to)
72604+{
72605+ return 1;
72606+}
72607+
72608+int
72609+gr_acl_handle_rename(const struct dentry *new_dentry,
72610+ const struct dentry *parent_dentry,
72611+ const struct vfsmount *parent_mnt,
72612+ const struct dentry *old_dentry,
72613+ const struct inode *old_parent_inode,
72614+ const struct vfsmount *old_mnt, const struct filename *newname)
72615+{
72616+ return 0;
72617+}
72618+
72619+int
72620+gr_acl_handle_filldir(const struct file *file, const char *name,
72621+ const int namelen, const ino_t ino)
72622+{
72623+ return 1;
72624+}
72625+
72626+int
72627+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72628+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
72629+{
72630+ return 1;
72631+}
72632+
72633+int
72634+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
72635+{
72636+ return 0;
72637+}
72638+
72639+int
72640+gr_search_accept(const struct socket *sock)
72641+{
72642+ return 0;
72643+}
72644+
72645+int
72646+gr_search_listen(const struct socket *sock)
72647+{
72648+ return 0;
72649+}
72650+
72651+int
72652+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
72653+{
72654+ return 0;
72655+}
72656+
72657+__u32
72658+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
72659+{
72660+ return 1;
72661+}
72662+
72663+__u32
72664+gr_acl_handle_creat(const struct dentry * dentry,
72665+ const struct dentry * p_dentry,
72666+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72667+ const int imode)
72668+{
72669+ return 1;
72670+}
72671+
72672+void
72673+gr_acl_handle_exit(void)
72674+{
72675+ return;
72676+}
72677+
72678+int
72679+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72680+{
72681+ return 1;
72682+}
72683+
72684+void
72685+gr_set_role_label(const kuid_t uid, const kgid_t gid)
72686+{
72687+ return;
72688+}
72689+
72690+int
72691+gr_acl_handle_procpidmem(const struct task_struct *task)
72692+{
72693+ return 0;
72694+}
72695+
72696+int
72697+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
72698+{
72699+ return 0;
72700+}
72701+
72702+int
72703+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
72704+{
72705+ return 0;
72706+}
72707+
72708+int
72709+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72710+{
72711+ return 0;
72712+}
72713+
72714+int
72715+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72716+{
72717+ return 0;
72718+}
72719+
72720+int gr_acl_enable_at_secure(void)
72721+{
72722+ return 0;
72723+}
72724+
72725+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
72726+{
72727+ return dentry->d_sb->s_dev;
72728+}
72729+
72730+void gr_put_exec_file(struct task_struct *task)
72731+{
72732+ return;
72733+}
72734+
72735+#ifdef CONFIG_SECURITY
72736+EXPORT_SYMBOL(gr_check_user_change);
72737+EXPORT_SYMBOL(gr_check_group_change);
72738+#endif
72739diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
72740new file mode 100644
72741index 0000000..387032b
72742--- /dev/null
72743+++ b/grsecurity/grsec_exec.c
72744@@ -0,0 +1,187 @@
72745+#include <linux/kernel.h>
72746+#include <linux/sched.h>
72747+#include <linux/file.h>
72748+#include <linux/binfmts.h>
72749+#include <linux/fs.h>
72750+#include <linux/types.h>
72751+#include <linux/grdefs.h>
72752+#include <linux/grsecurity.h>
72753+#include <linux/grinternal.h>
72754+#include <linux/capability.h>
72755+#include <linux/module.h>
72756+#include <linux/compat.h>
72757+
72758+#include <asm/uaccess.h>
72759+
72760+#ifdef CONFIG_GRKERNSEC_EXECLOG
72761+static char gr_exec_arg_buf[132];
72762+static DEFINE_MUTEX(gr_exec_arg_mutex);
72763+#endif
72764+
72765+struct user_arg_ptr {
72766+#ifdef CONFIG_COMPAT
72767+ bool is_compat;
72768+#endif
72769+ union {
72770+ const char __user *const __user *native;
72771+#ifdef CONFIG_COMPAT
72772+ const compat_uptr_t __user *compat;
72773+#endif
72774+ } ptr;
72775+};
72776+
72777+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
72778+
72779+void
72780+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
72781+{
72782+#ifdef CONFIG_GRKERNSEC_EXECLOG
72783+ char *grarg = gr_exec_arg_buf;
72784+ unsigned int i, x, execlen = 0;
72785+ char c;
72786+
72787+ if (!((grsec_enable_execlog && grsec_enable_group &&
72788+ in_group_p(grsec_audit_gid))
72789+ || (grsec_enable_execlog && !grsec_enable_group)))
72790+ return;
72791+
72792+ mutex_lock(&gr_exec_arg_mutex);
72793+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
72794+
72795+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
72796+ const char __user *p;
72797+ unsigned int len;
72798+
72799+ p = get_user_arg_ptr(argv, i);
72800+ if (IS_ERR(p))
72801+ goto log;
72802+
72803+ len = strnlen_user(p, 128 - execlen);
72804+ if (len > 128 - execlen)
72805+ len = 128 - execlen;
72806+ else if (len > 0)
72807+ len--;
72808+ if (copy_from_user(grarg + execlen, p, len))
72809+ goto log;
72810+
72811+ /* rewrite unprintable characters */
72812+ for (x = 0; x < len; x++) {
72813+ c = *(grarg + execlen + x);
72814+ if (c < 32 || c > 126)
72815+ *(grarg + execlen + x) = ' ';
72816+ }
72817+
72818+ execlen += len;
72819+ *(grarg + execlen) = ' ';
72820+ *(grarg + execlen + 1) = '\0';
72821+ execlen++;
72822+ }
72823+
72824+ log:
72825+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
72826+ bprm->file->f_path.mnt, grarg);
72827+ mutex_unlock(&gr_exec_arg_mutex);
72828+#endif
72829+ return;
72830+}
72831+
72832+#ifdef CONFIG_GRKERNSEC
72833+extern int gr_acl_is_capable(const int cap);
72834+extern int gr_acl_is_capable_nolog(const int cap);
72835+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
72836+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
72837+extern int gr_chroot_is_capable(const int cap);
72838+extern int gr_chroot_is_capable_nolog(const int cap);
72839+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
72840+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
72841+#endif
72842+
72843+const char *captab_log[] = {
72844+ "CAP_CHOWN",
72845+ "CAP_DAC_OVERRIDE",
72846+ "CAP_DAC_READ_SEARCH",
72847+ "CAP_FOWNER",
72848+ "CAP_FSETID",
72849+ "CAP_KILL",
72850+ "CAP_SETGID",
72851+ "CAP_SETUID",
72852+ "CAP_SETPCAP",
72853+ "CAP_LINUX_IMMUTABLE",
72854+ "CAP_NET_BIND_SERVICE",
72855+ "CAP_NET_BROADCAST",
72856+ "CAP_NET_ADMIN",
72857+ "CAP_NET_RAW",
72858+ "CAP_IPC_LOCK",
72859+ "CAP_IPC_OWNER",
72860+ "CAP_SYS_MODULE",
72861+ "CAP_SYS_RAWIO",
72862+ "CAP_SYS_CHROOT",
72863+ "CAP_SYS_PTRACE",
72864+ "CAP_SYS_PACCT",
72865+ "CAP_SYS_ADMIN",
72866+ "CAP_SYS_BOOT",
72867+ "CAP_SYS_NICE",
72868+ "CAP_SYS_RESOURCE",
72869+ "CAP_SYS_TIME",
72870+ "CAP_SYS_TTY_CONFIG",
72871+ "CAP_MKNOD",
72872+ "CAP_LEASE",
72873+ "CAP_AUDIT_WRITE",
72874+ "CAP_AUDIT_CONTROL",
72875+ "CAP_SETFCAP",
72876+ "CAP_MAC_OVERRIDE",
72877+ "CAP_MAC_ADMIN",
72878+ "CAP_SYSLOG",
72879+ "CAP_WAKE_ALARM"
72880+};
72881+
72882+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
72883+
72884+int gr_is_capable(const int cap)
72885+{
72886+#ifdef CONFIG_GRKERNSEC
72887+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
72888+ return 1;
72889+ return 0;
72890+#else
72891+ return 1;
72892+#endif
72893+}
72894+
72895+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72896+{
72897+#ifdef CONFIG_GRKERNSEC
72898+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
72899+ return 1;
72900+ return 0;
72901+#else
72902+ return 1;
72903+#endif
72904+}
72905+
72906+int gr_is_capable_nolog(const int cap)
72907+{
72908+#ifdef CONFIG_GRKERNSEC
72909+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
72910+ return 1;
72911+ return 0;
72912+#else
72913+ return 1;
72914+#endif
72915+}
72916+
72917+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
72918+{
72919+#ifdef CONFIG_GRKERNSEC
72920+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
72921+ return 1;
72922+ return 0;
72923+#else
72924+ return 1;
72925+#endif
72926+}
72927+
72928+EXPORT_SYMBOL(gr_is_capable);
72929+EXPORT_SYMBOL(gr_is_capable_nolog);
72930+EXPORT_SYMBOL(gr_task_is_capable);
72931+EXPORT_SYMBOL(gr_task_is_capable_nolog);
72932diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
72933new file mode 100644
72934index 0000000..06cc6ea
72935--- /dev/null
72936+++ b/grsecurity/grsec_fifo.c
72937@@ -0,0 +1,24 @@
72938+#include <linux/kernel.h>
72939+#include <linux/sched.h>
72940+#include <linux/fs.h>
72941+#include <linux/file.h>
72942+#include <linux/grinternal.h>
72943+
72944+int
72945+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
72946+ const struct dentry *dir, const int flag, const int acc_mode)
72947+{
72948+#ifdef CONFIG_GRKERNSEC_FIFO
72949+ const struct cred *cred = current_cred();
72950+
72951+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
72952+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
72953+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
72954+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
72955+ if (!inode_permission(dentry->d_inode, acc_mode))
72956+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
72957+ return -EACCES;
72958+ }
72959+#endif
72960+ return 0;
72961+}
72962diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
72963new file mode 100644
72964index 0000000..8ca18bf
72965--- /dev/null
72966+++ b/grsecurity/grsec_fork.c
72967@@ -0,0 +1,23 @@
72968+#include <linux/kernel.h>
72969+#include <linux/sched.h>
72970+#include <linux/grsecurity.h>
72971+#include <linux/grinternal.h>
72972+#include <linux/errno.h>
72973+
72974+void
72975+gr_log_forkfail(const int retval)
72976+{
72977+#ifdef CONFIG_GRKERNSEC_FORKFAIL
72978+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
72979+ switch (retval) {
72980+ case -EAGAIN:
72981+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
72982+ break;
72983+ case -ENOMEM:
72984+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
72985+ break;
72986+ }
72987+ }
72988+#endif
72989+ return;
72990+}
72991diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
72992new file mode 100644
72993index 0000000..a88e901
72994--- /dev/null
72995+++ b/grsecurity/grsec_init.c
72996@@ -0,0 +1,272 @@
72997+#include <linux/kernel.h>
72998+#include <linux/sched.h>
72999+#include <linux/mm.h>
73000+#include <linux/gracl.h>
73001+#include <linux/slab.h>
73002+#include <linux/vmalloc.h>
73003+#include <linux/percpu.h>
73004+#include <linux/module.h>
73005+
73006+int grsec_enable_ptrace_readexec;
73007+int grsec_enable_setxid;
73008+int grsec_enable_symlinkown;
73009+kgid_t grsec_symlinkown_gid;
73010+int grsec_enable_brute;
73011+int grsec_enable_link;
73012+int grsec_enable_dmesg;
73013+int grsec_enable_harden_ptrace;
73014+int grsec_enable_harden_ipc;
73015+int grsec_enable_fifo;
73016+int grsec_enable_execlog;
73017+int grsec_enable_signal;
73018+int grsec_enable_forkfail;
73019+int grsec_enable_audit_ptrace;
73020+int grsec_enable_time;
73021+int grsec_enable_group;
73022+kgid_t grsec_audit_gid;
73023+int grsec_enable_chdir;
73024+int grsec_enable_mount;
73025+int grsec_enable_rofs;
73026+int grsec_deny_new_usb;
73027+int grsec_enable_chroot_findtask;
73028+int grsec_enable_chroot_mount;
73029+int grsec_enable_chroot_shmat;
73030+int grsec_enable_chroot_fchdir;
73031+int grsec_enable_chroot_double;
73032+int grsec_enable_chroot_pivot;
73033+int grsec_enable_chroot_chdir;
73034+int grsec_enable_chroot_chmod;
73035+int grsec_enable_chroot_mknod;
73036+int grsec_enable_chroot_nice;
73037+int grsec_enable_chroot_execlog;
73038+int grsec_enable_chroot_caps;
73039+int grsec_enable_chroot_sysctl;
73040+int grsec_enable_chroot_unix;
73041+int grsec_enable_tpe;
73042+kgid_t grsec_tpe_gid;
73043+int grsec_enable_blackhole;
73044+#ifdef CONFIG_IPV6_MODULE
73045+EXPORT_SYMBOL(grsec_enable_blackhole);
73046+#endif
73047+int grsec_lastack_retries;
73048+int grsec_enable_tpe_all;
73049+int grsec_enable_tpe_invert;
73050+int grsec_enable_socket_all;
73051+kgid_t grsec_socket_all_gid;
73052+int grsec_enable_socket_client;
73053+kgid_t grsec_socket_client_gid;
73054+int grsec_enable_socket_server;
73055+kgid_t grsec_socket_server_gid;
73056+int grsec_resource_logging;
73057+int grsec_disable_privio;
73058+int grsec_enable_log_rwxmaps;
73059+int grsec_lock;
73060+
73061+DEFINE_SPINLOCK(grsec_alert_lock);
73062+unsigned long grsec_alert_wtime = 0;
73063+unsigned long grsec_alert_fyet = 0;
73064+
73065+DEFINE_SPINLOCK(grsec_audit_lock);
73066+
73067+DEFINE_RWLOCK(grsec_exec_file_lock);
73068+
73069+char *gr_shared_page[4];
73070+
73071+char *gr_alert_log_fmt;
73072+char *gr_audit_log_fmt;
73073+char *gr_alert_log_buf;
73074+char *gr_audit_log_buf;
73075+
73076+void __init
73077+grsecurity_init(void)
73078+{
73079+ int j;
73080+ /* create the per-cpu shared pages */
73081+
73082+#ifdef CONFIG_X86
73083+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
73084+#endif
73085+
73086+ for (j = 0; j < 4; j++) {
73087+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
73088+ if (gr_shared_page[j] == NULL) {
73089+ panic("Unable to allocate grsecurity shared page");
73090+ return;
73091+ }
73092+ }
73093+
73094+ /* allocate log buffers */
73095+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
73096+ if (!gr_alert_log_fmt) {
73097+ panic("Unable to allocate grsecurity alert log format buffer");
73098+ return;
73099+ }
73100+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
73101+ if (!gr_audit_log_fmt) {
73102+ panic("Unable to allocate grsecurity audit log format buffer");
73103+ return;
73104+ }
73105+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
73106+ if (!gr_alert_log_buf) {
73107+ panic("Unable to allocate grsecurity alert log buffer");
73108+ return;
73109+ }
73110+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
73111+ if (!gr_audit_log_buf) {
73112+ panic("Unable to allocate grsecurity audit log buffer");
73113+ return;
73114+ }
73115+
73116+#ifdef CONFIG_GRKERNSEC_IO
73117+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
73118+ grsec_disable_privio = 1;
73119+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
73120+ grsec_disable_privio = 1;
73121+#else
73122+ grsec_disable_privio = 0;
73123+#endif
73124+#endif
73125+
73126+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
73127+ /* for backward compatibility, tpe_invert always defaults to on if
73128+ enabled in the kernel
73129+ */
73130+ grsec_enable_tpe_invert = 1;
73131+#endif
73132+
73133+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
73134+#ifndef CONFIG_GRKERNSEC_SYSCTL
73135+ grsec_lock = 1;
73136+#endif
73137+
73138+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73139+ grsec_enable_log_rwxmaps = 1;
73140+#endif
73141+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
73142+ grsec_enable_group = 1;
73143+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
73144+#endif
73145+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
73146+ grsec_enable_ptrace_readexec = 1;
73147+#endif
73148+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
73149+ grsec_enable_chdir = 1;
73150+#endif
73151+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73152+ grsec_enable_harden_ptrace = 1;
73153+#endif
73154+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73155+ grsec_enable_harden_ipc = 1;
73156+#endif
73157+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73158+ grsec_enable_mount = 1;
73159+#endif
73160+#ifdef CONFIG_GRKERNSEC_LINK
73161+ grsec_enable_link = 1;
73162+#endif
73163+#ifdef CONFIG_GRKERNSEC_BRUTE
73164+ grsec_enable_brute = 1;
73165+#endif
73166+#ifdef CONFIG_GRKERNSEC_DMESG
73167+ grsec_enable_dmesg = 1;
73168+#endif
73169+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73170+ grsec_enable_blackhole = 1;
73171+ grsec_lastack_retries = 4;
73172+#endif
73173+#ifdef CONFIG_GRKERNSEC_FIFO
73174+ grsec_enable_fifo = 1;
73175+#endif
73176+#ifdef CONFIG_GRKERNSEC_EXECLOG
73177+ grsec_enable_execlog = 1;
73178+#endif
73179+#ifdef CONFIG_GRKERNSEC_SETXID
73180+ grsec_enable_setxid = 1;
73181+#endif
73182+#ifdef CONFIG_GRKERNSEC_SIGNAL
73183+ grsec_enable_signal = 1;
73184+#endif
73185+#ifdef CONFIG_GRKERNSEC_FORKFAIL
73186+ grsec_enable_forkfail = 1;
73187+#endif
73188+#ifdef CONFIG_GRKERNSEC_TIME
73189+ grsec_enable_time = 1;
73190+#endif
73191+#ifdef CONFIG_GRKERNSEC_RESLOG
73192+ grsec_resource_logging = 1;
73193+#endif
73194+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73195+ grsec_enable_chroot_findtask = 1;
73196+#endif
73197+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
73198+ grsec_enable_chroot_unix = 1;
73199+#endif
73200+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
73201+ grsec_enable_chroot_mount = 1;
73202+#endif
73203+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
73204+ grsec_enable_chroot_fchdir = 1;
73205+#endif
73206+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
73207+ grsec_enable_chroot_shmat = 1;
73208+#endif
73209+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73210+ grsec_enable_audit_ptrace = 1;
73211+#endif
73212+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
73213+ grsec_enable_chroot_double = 1;
73214+#endif
73215+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
73216+ grsec_enable_chroot_pivot = 1;
73217+#endif
73218+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
73219+ grsec_enable_chroot_chdir = 1;
73220+#endif
73221+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
73222+ grsec_enable_chroot_chmod = 1;
73223+#endif
73224+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
73225+ grsec_enable_chroot_mknod = 1;
73226+#endif
73227+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
73228+ grsec_enable_chroot_nice = 1;
73229+#endif
73230+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
73231+ grsec_enable_chroot_execlog = 1;
73232+#endif
73233+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73234+ grsec_enable_chroot_caps = 1;
73235+#endif
73236+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
73237+ grsec_enable_chroot_sysctl = 1;
73238+#endif
73239+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
73240+ grsec_enable_symlinkown = 1;
73241+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
73242+#endif
73243+#ifdef CONFIG_GRKERNSEC_TPE
73244+ grsec_enable_tpe = 1;
73245+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
73246+#ifdef CONFIG_GRKERNSEC_TPE_ALL
73247+ grsec_enable_tpe_all = 1;
73248+#endif
73249+#endif
73250+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
73251+ grsec_enable_socket_all = 1;
73252+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
73253+#endif
73254+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
73255+ grsec_enable_socket_client = 1;
73256+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
73257+#endif
73258+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
73259+ grsec_enable_socket_server = 1;
73260+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
73261+#endif
73262+#endif
73263+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
73264+ grsec_deny_new_usb = 1;
73265+#endif
73266+
73267+ return;
73268+}
73269diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
73270new file mode 100644
73271index 0000000..78d1680
73272--- /dev/null
73273+++ b/grsecurity/grsec_ipc.c
73274@@ -0,0 +1,48 @@
73275+#include <linux/kernel.h>
73276+#include <linux/mm.h>
73277+#include <linux/sched.h>
73278+#include <linux/file.h>
73279+#include <linux/ipc.h>
73280+#include <linux/ipc_namespace.h>
73281+#include <linux/grsecurity.h>
73282+#include <linux/grinternal.h>
73283+
73284+int
73285+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
73286+{
73287+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73288+ int write;
73289+ int orig_granted_mode;
73290+ kuid_t euid;
73291+ kgid_t egid;
73292+
73293+ if (!grsec_enable_harden_ipc)
73294+ return 0;
73295+
73296+ euid = current_euid();
73297+ egid = current_egid();
73298+
73299+ write = requested_mode & 00002;
73300+ orig_granted_mode = ipcp->mode;
73301+
73302+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
73303+ orig_granted_mode >>= 6;
73304+ else {
73305+ /* if likely wrong permissions, lock to user */
73306+ if (orig_granted_mode & 0007)
73307+ orig_granted_mode = 0;
73308+ /* otherwise do a egid-only check */
73309+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
73310+ orig_granted_mode >>= 3;
73311+ /* otherwise, no access */
73312+ else
73313+ orig_granted_mode = 0;
73314+ }
73315+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
73316+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
73317+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
73318+ return 0;
73319+ }
73320+#endif
73321+ return 1;
73322+}
73323diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
73324new file mode 100644
73325index 0000000..5e05e20
73326--- /dev/null
73327+++ b/grsecurity/grsec_link.c
73328@@ -0,0 +1,58 @@
73329+#include <linux/kernel.h>
73330+#include <linux/sched.h>
73331+#include <linux/fs.h>
73332+#include <linux/file.h>
73333+#include <linux/grinternal.h>
73334+
73335+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
73336+{
73337+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
73338+ const struct inode *link_inode = link->dentry->d_inode;
73339+
73340+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
73341+ /* ignore root-owned links, e.g. /proc/self */
73342+ gr_is_global_nonroot(link_inode->i_uid) && target &&
73343+ !uid_eq(link_inode->i_uid, target->i_uid)) {
73344+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
73345+ return 1;
73346+ }
73347+#endif
73348+ return 0;
73349+}
73350+
73351+int
73352+gr_handle_follow_link(const struct inode *parent,
73353+ const struct inode *inode,
73354+ const struct dentry *dentry, const struct vfsmount *mnt)
73355+{
73356+#ifdef CONFIG_GRKERNSEC_LINK
73357+ const struct cred *cred = current_cred();
73358+
73359+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
73360+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
73361+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
73362+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
73363+ return -EACCES;
73364+ }
73365+#endif
73366+ return 0;
73367+}
73368+
73369+int
73370+gr_handle_hardlink(const struct dentry *dentry,
73371+ const struct vfsmount *mnt,
73372+ struct inode *inode, const int mode, const struct filename *to)
73373+{
73374+#ifdef CONFIG_GRKERNSEC_LINK
73375+ const struct cred *cred = current_cred();
73376+
73377+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
73378+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
73379+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
73380+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
73381+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
73382+ return -EPERM;
73383+ }
73384+#endif
73385+ return 0;
73386+}
73387diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
73388new file mode 100644
73389index 0000000..dbe0a6b
73390--- /dev/null
73391+++ b/grsecurity/grsec_log.c
73392@@ -0,0 +1,341 @@
73393+#include <linux/kernel.h>
73394+#include <linux/sched.h>
73395+#include <linux/file.h>
73396+#include <linux/tty.h>
73397+#include <linux/fs.h>
73398+#include <linux/mm.h>
73399+#include <linux/grinternal.h>
73400+
73401+#ifdef CONFIG_TREE_PREEMPT_RCU
73402+#define DISABLE_PREEMPT() preempt_disable()
73403+#define ENABLE_PREEMPT() preempt_enable()
73404+#else
73405+#define DISABLE_PREEMPT()
73406+#define ENABLE_PREEMPT()
73407+#endif
73408+
73409+#define BEGIN_LOCKS(x) \
73410+ DISABLE_PREEMPT(); \
73411+ rcu_read_lock(); \
73412+ read_lock(&tasklist_lock); \
73413+ read_lock(&grsec_exec_file_lock); \
73414+ if (x != GR_DO_AUDIT) \
73415+ spin_lock(&grsec_alert_lock); \
73416+ else \
73417+ spin_lock(&grsec_audit_lock)
73418+
73419+#define END_LOCKS(x) \
73420+ if (x != GR_DO_AUDIT) \
73421+ spin_unlock(&grsec_alert_lock); \
73422+ else \
73423+ spin_unlock(&grsec_audit_lock); \
73424+ read_unlock(&grsec_exec_file_lock); \
73425+ read_unlock(&tasklist_lock); \
73426+ rcu_read_unlock(); \
73427+ ENABLE_PREEMPT(); \
73428+ if (x == GR_DONT_AUDIT) \
73429+ gr_handle_alertkill(current)
73430+
73431+enum {
73432+ FLOODING,
73433+ NO_FLOODING
73434+};
73435+
73436+extern char *gr_alert_log_fmt;
73437+extern char *gr_audit_log_fmt;
73438+extern char *gr_alert_log_buf;
73439+extern char *gr_audit_log_buf;
73440+
73441+static int gr_log_start(int audit)
73442+{
73443+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
73444+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
73445+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73446+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
73447+ unsigned long curr_secs = get_seconds();
73448+
73449+ if (audit == GR_DO_AUDIT)
73450+ goto set_fmt;
73451+
73452+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
73453+ grsec_alert_wtime = curr_secs;
73454+ grsec_alert_fyet = 0;
73455+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
73456+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
73457+ grsec_alert_fyet++;
73458+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
73459+ grsec_alert_wtime = curr_secs;
73460+ grsec_alert_fyet++;
73461+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
73462+ return FLOODING;
73463+ }
73464+ else return FLOODING;
73465+
73466+set_fmt:
73467+#endif
73468+ memset(buf, 0, PAGE_SIZE);
73469+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
73470+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
73471+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
73472+ } else if (current->signal->curr_ip) {
73473+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
73474+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
73475+ } else if (gr_acl_is_enabled()) {
73476+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
73477+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
73478+ } else {
73479+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
73480+ strcpy(buf, fmt);
73481+ }
73482+
73483+ return NO_FLOODING;
73484+}
73485+
73486+static void gr_log_middle(int audit, const char *msg, va_list ap)
73487+ __attribute__ ((format (printf, 2, 0)));
73488+
73489+static void gr_log_middle(int audit, const char *msg, va_list ap)
73490+{
73491+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73492+ unsigned int len = strlen(buf);
73493+
73494+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
73495+
73496+ return;
73497+}
73498+
73499+static void gr_log_middle_varargs(int audit, const char *msg, ...)
73500+ __attribute__ ((format (printf, 2, 3)));
73501+
73502+static void gr_log_middle_varargs(int audit, const char *msg, ...)
73503+{
73504+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73505+ unsigned int len = strlen(buf);
73506+ va_list ap;
73507+
73508+ va_start(ap, msg);
73509+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
73510+ va_end(ap);
73511+
73512+ return;
73513+}
73514+
73515+static void gr_log_end(int audit, int append_default)
73516+{
73517+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73518+ if (append_default) {
73519+ struct task_struct *task = current;
73520+ struct task_struct *parent = task->real_parent;
73521+ const struct cred *cred = __task_cred(task);
73522+ const struct cred *pcred = __task_cred(parent);
73523+ unsigned int len = strlen(buf);
73524+
73525+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73526+ }
73527+
73528+ printk("%s\n", buf);
73529+
73530+ return;
73531+}
73532+
73533+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
73534+{
73535+ int logtype;
73536+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
73537+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
73538+ void *voidptr = NULL;
73539+ int num1 = 0, num2 = 0;
73540+ unsigned long ulong1 = 0, ulong2 = 0;
73541+ struct dentry *dentry = NULL;
73542+ struct vfsmount *mnt = NULL;
73543+ struct file *file = NULL;
73544+ struct task_struct *task = NULL;
73545+ struct vm_area_struct *vma = NULL;
73546+ const struct cred *cred, *pcred;
73547+ va_list ap;
73548+
73549+ BEGIN_LOCKS(audit);
73550+ logtype = gr_log_start(audit);
73551+ if (logtype == FLOODING) {
73552+ END_LOCKS(audit);
73553+ return;
73554+ }
73555+ va_start(ap, argtypes);
73556+ switch (argtypes) {
73557+ case GR_TTYSNIFF:
73558+ task = va_arg(ap, struct task_struct *);
73559+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
73560+ break;
73561+ case GR_SYSCTL_HIDDEN:
73562+ str1 = va_arg(ap, char *);
73563+ gr_log_middle_varargs(audit, msg, result, str1);
73564+ break;
73565+ case GR_RBAC:
73566+ dentry = va_arg(ap, struct dentry *);
73567+ mnt = va_arg(ap, struct vfsmount *);
73568+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
73569+ break;
73570+ case GR_RBAC_STR:
73571+ dentry = va_arg(ap, struct dentry *);
73572+ mnt = va_arg(ap, struct vfsmount *);
73573+ str1 = va_arg(ap, char *);
73574+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
73575+ break;
73576+ case GR_STR_RBAC:
73577+ str1 = va_arg(ap, char *);
73578+ dentry = va_arg(ap, struct dentry *);
73579+ mnt = va_arg(ap, struct vfsmount *);
73580+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
73581+ break;
73582+ case GR_RBAC_MODE2:
73583+ dentry = va_arg(ap, struct dentry *);
73584+ mnt = va_arg(ap, struct vfsmount *);
73585+ str1 = va_arg(ap, char *);
73586+ str2 = va_arg(ap, char *);
73587+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
73588+ break;
73589+ case GR_RBAC_MODE3:
73590+ dentry = va_arg(ap, struct dentry *);
73591+ mnt = va_arg(ap, struct vfsmount *);
73592+ str1 = va_arg(ap, char *);
73593+ str2 = va_arg(ap, char *);
73594+ str3 = va_arg(ap, char *);
73595+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
73596+ break;
73597+ case GR_FILENAME:
73598+ dentry = va_arg(ap, struct dentry *);
73599+ mnt = va_arg(ap, struct vfsmount *);
73600+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
73601+ break;
73602+ case GR_STR_FILENAME:
73603+ str1 = va_arg(ap, char *);
73604+ dentry = va_arg(ap, struct dentry *);
73605+ mnt = va_arg(ap, struct vfsmount *);
73606+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
73607+ break;
73608+ case GR_FILENAME_STR:
73609+ dentry = va_arg(ap, struct dentry *);
73610+ mnt = va_arg(ap, struct vfsmount *);
73611+ str1 = va_arg(ap, char *);
73612+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
73613+ break;
73614+ case GR_FILENAME_TWO_INT:
73615+ dentry = va_arg(ap, struct dentry *);
73616+ mnt = va_arg(ap, struct vfsmount *);
73617+ num1 = va_arg(ap, int);
73618+ num2 = va_arg(ap, int);
73619+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
73620+ break;
73621+ case GR_FILENAME_TWO_INT_STR:
73622+ dentry = va_arg(ap, struct dentry *);
73623+ mnt = va_arg(ap, struct vfsmount *);
73624+ num1 = va_arg(ap, int);
73625+ num2 = va_arg(ap, int);
73626+ str1 = va_arg(ap, char *);
73627+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
73628+ break;
73629+ case GR_TEXTREL:
73630+ file = va_arg(ap, struct file *);
73631+ ulong1 = va_arg(ap, unsigned long);
73632+ ulong2 = va_arg(ap, unsigned long);
73633+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
73634+ break;
73635+ case GR_PTRACE:
73636+ task = va_arg(ap, struct task_struct *);
73637+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
73638+ break;
73639+ case GR_RESOURCE:
73640+ task = va_arg(ap, struct task_struct *);
73641+ cred = __task_cred(task);
73642+ pcred = __task_cred(task->real_parent);
73643+ ulong1 = va_arg(ap, unsigned long);
73644+ str1 = va_arg(ap, char *);
73645+ ulong2 = va_arg(ap, unsigned long);
73646+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73647+ break;
73648+ case GR_CAP:
73649+ task = va_arg(ap, struct task_struct *);
73650+ cred = __task_cred(task);
73651+ pcred = __task_cred(task->real_parent);
73652+ str1 = va_arg(ap, char *);
73653+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73654+ break;
73655+ case GR_SIG:
73656+ str1 = va_arg(ap, char *);
73657+ voidptr = va_arg(ap, void *);
73658+ gr_log_middle_varargs(audit, msg, str1, voidptr);
73659+ break;
73660+ case GR_SIG2:
73661+ task = va_arg(ap, struct task_struct *);
73662+ cred = __task_cred(task);
73663+ pcred = __task_cred(task->real_parent);
73664+ num1 = va_arg(ap, int);
73665+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73666+ break;
73667+ case GR_CRASH1:
73668+ task = va_arg(ap, struct task_struct *);
73669+ cred = __task_cred(task);
73670+ pcred = __task_cred(task->real_parent);
73671+ ulong1 = va_arg(ap, unsigned long);
73672+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
73673+ break;
73674+ case GR_CRASH2:
73675+ task = va_arg(ap, struct task_struct *);
73676+ cred = __task_cred(task);
73677+ pcred = __task_cred(task->real_parent);
73678+ ulong1 = va_arg(ap, unsigned long);
73679+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
73680+ break;
73681+ case GR_RWXMAP:
73682+ file = va_arg(ap, struct file *);
73683+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
73684+ break;
73685+ case GR_RWXMAPVMA:
73686+ vma = va_arg(ap, struct vm_area_struct *);
73687+ if (vma->vm_file)
73688+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
73689+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
73690+ str1 = "<stack>";
73691+ else if (vma->vm_start <= current->mm->brk &&
73692+ vma->vm_end >= current->mm->start_brk)
73693+ str1 = "<heap>";
73694+ else
73695+ str1 = "<anonymous mapping>";
73696+ gr_log_middle_varargs(audit, msg, str1);
73697+ break;
73698+ case GR_PSACCT:
73699+ {
73700+ unsigned int wday, cday;
73701+ __u8 whr, chr;
73702+ __u8 wmin, cmin;
73703+ __u8 wsec, csec;
73704+ char cur_tty[64] = { 0 };
73705+ char parent_tty[64] = { 0 };
73706+
73707+ task = va_arg(ap, struct task_struct *);
73708+ wday = va_arg(ap, unsigned int);
73709+ cday = va_arg(ap, unsigned int);
73710+ whr = va_arg(ap, int);
73711+ chr = va_arg(ap, int);
73712+ wmin = va_arg(ap, int);
73713+ cmin = va_arg(ap, int);
73714+ wsec = va_arg(ap, int);
73715+ csec = va_arg(ap, int);
73716+ ulong1 = va_arg(ap, unsigned long);
73717+ cred = __task_cred(task);
73718+ pcred = __task_cred(task->real_parent);
73719+
73720+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73721+ }
73722+ break;
73723+ default:
73724+ gr_log_middle(audit, msg, ap);
73725+ }
73726+ va_end(ap);
73727+ // these don't need DEFAULTSECARGS printed on the end
73728+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
73729+ gr_log_end(audit, 0);
73730+ else
73731+ gr_log_end(audit, 1);
73732+ END_LOCKS(audit);
73733+}
73734diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
73735new file mode 100644
73736index 0000000..f536303
73737--- /dev/null
73738+++ b/grsecurity/grsec_mem.c
73739@@ -0,0 +1,40 @@
73740+#include <linux/kernel.h>
73741+#include <linux/sched.h>
73742+#include <linux/mm.h>
73743+#include <linux/mman.h>
73744+#include <linux/grinternal.h>
73745+
73746+void
73747+gr_handle_ioperm(void)
73748+{
73749+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
73750+ return;
73751+}
73752+
73753+void
73754+gr_handle_iopl(void)
73755+{
73756+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
73757+ return;
73758+}
73759+
73760+void
73761+gr_handle_mem_readwrite(u64 from, u64 to)
73762+{
73763+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
73764+ return;
73765+}
73766+
73767+void
73768+gr_handle_vm86(void)
73769+{
73770+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
73771+ return;
73772+}
73773+
73774+void
73775+gr_log_badprocpid(const char *entry)
73776+{
73777+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
73778+ return;
73779+}
73780diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
73781new file mode 100644
73782index 0000000..cd9e124
73783--- /dev/null
73784+++ b/grsecurity/grsec_mount.c
73785@@ -0,0 +1,65 @@
73786+#include <linux/kernel.h>
73787+#include <linux/sched.h>
73788+#include <linux/mount.h>
73789+#include <linux/major.h>
73790+#include <linux/grsecurity.h>
73791+#include <linux/grinternal.h>
73792+
73793+void
73794+gr_log_remount(const char *devname, const int retval)
73795+{
73796+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73797+ if (grsec_enable_mount && (retval >= 0))
73798+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
73799+#endif
73800+ return;
73801+}
73802+
73803+void
73804+gr_log_unmount(const char *devname, const int retval)
73805+{
73806+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73807+ if (grsec_enable_mount && (retval >= 0))
73808+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
73809+#endif
73810+ return;
73811+}
73812+
73813+void
73814+gr_log_mount(const char *from, const char *to, const int retval)
73815+{
73816+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73817+ if (grsec_enable_mount && (retval >= 0))
73818+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
73819+#endif
73820+ return;
73821+}
73822+
73823+int
73824+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
73825+{
73826+#ifdef CONFIG_GRKERNSEC_ROFS
73827+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
73828+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
73829+ return -EPERM;
73830+ } else
73831+ return 0;
73832+#endif
73833+ return 0;
73834+}
73835+
73836+int
73837+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
73838+{
73839+#ifdef CONFIG_GRKERNSEC_ROFS
73840+ struct inode *inode = dentry->d_inode;
73841+
73842+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
73843+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
73844+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
73845+ return -EPERM;
73846+ } else
73847+ return 0;
73848+#endif
73849+ return 0;
73850+}
73851diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
73852new file mode 100644
73853index 0000000..6ee9d50
73854--- /dev/null
73855+++ b/grsecurity/grsec_pax.c
73856@@ -0,0 +1,45 @@
73857+#include <linux/kernel.h>
73858+#include <linux/sched.h>
73859+#include <linux/mm.h>
73860+#include <linux/file.h>
73861+#include <linux/grinternal.h>
73862+#include <linux/grsecurity.h>
73863+
73864+void
73865+gr_log_textrel(struct vm_area_struct * vma)
73866+{
73867+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73868+ if (grsec_enable_log_rwxmaps)
73869+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
73870+#endif
73871+ return;
73872+}
73873+
73874+void gr_log_ptgnustack(struct file *file)
73875+{
73876+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73877+ if (grsec_enable_log_rwxmaps)
73878+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
73879+#endif
73880+ return;
73881+}
73882+
73883+void
73884+gr_log_rwxmmap(struct file *file)
73885+{
73886+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73887+ if (grsec_enable_log_rwxmaps)
73888+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
73889+#endif
73890+ return;
73891+}
73892+
73893+void
73894+gr_log_rwxmprotect(struct vm_area_struct *vma)
73895+{
73896+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73897+ if (grsec_enable_log_rwxmaps)
73898+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
73899+#endif
73900+ return;
73901+}
73902diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
73903new file mode 100644
73904index 0000000..f7f29aa
73905--- /dev/null
73906+++ b/grsecurity/grsec_ptrace.c
73907@@ -0,0 +1,30 @@
73908+#include <linux/kernel.h>
73909+#include <linux/sched.h>
73910+#include <linux/grinternal.h>
73911+#include <linux/security.h>
73912+
73913+void
73914+gr_audit_ptrace(struct task_struct *task)
73915+{
73916+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73917+ if (grsec_enable_audit_ptrace)
73918+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
73919+#endif
73920+ return;
73921+}
73922+
73923+int
73924+gr_ptrace_readexec(struct file *file, int unsafe_flags)
73925+{
73926+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
73927+ const struct dentry *dentry = file->f_path.dentry;
73928+ const struct vfsmount *mnt = file->f_path.mnt;
73929+
73930+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
73931+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
73932+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
73933+ return -EACCES;
73934+ }
73935+#endif
73936+ return 0;
73937+}
73938diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
73939new file mode 100644
73940index 0000000..3860c7e
73941--- /dev/null
73942+++ b/grsecurity/grsec_sig.c
73943@@ -0,0 +1,236 @@
73944+#include <linux/kernel.h>
73945+#include <linux/sched.h>
73946+#include <linux/fs.h>
73947+#include <linux/delay.h>
73948+#include <linux/grsecurity.h>
73949+#include <linux/grinternal.h>
73950+#include <linux/hardirq.h>
73951+
73952+char *signames[] = {
73953+ [SIGSEGV] = "Segmentation fault",
73954+ [SIGILL] = "Illegal instruction",
73955+ [SIGABRT] = "Abort",
73956+ [SIGBUS] = "Invalid alignment/Bus error"
73957+};
73958+
73959+void
73960+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
73961+{
73962+#ifdef CONFIG_GRKERNSEC_SIGNAL
73963+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
73964+ (sig == SIGABRT) || (sig == SIGBUS))) {
73965+ if (task_pid_nr(t) == task_pid_nr(current)) {
73966+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
73967+ } else {
73968+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
73969+ }
73970+ }
73971+#endif
73972+ return;
73973+}
73974+
73975+int
73976+gr_handle_signal(const struct task_struct *p, const int sig)
73977+{
73978+#ifdef CONFIG_GRKERNSEC
73979+ /* ignore the 0 signal for protected task checks */
73980+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
73981+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
73982+ return -EPERM;
73983+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
73984+ return -EPERM;
73985+ }
73986+#endif
73987+ return 0;
73988+}
73989+
73990+#ifdef CONFIG_GRKERNSEC
73991+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
73992+
73993+int gr_fake_force_sig(int sig, struct task_struct *t)
73994+{
73995+ unsigned long int flags;
73996+ int ret, blocked, ignored;
73997+ struct k_sigaction *action;
73998+
73999+ spin_lock_irqsave(&t->sighand->siglock, flags);
74000+ action = &t->sighand->action[sig-1];
74001+ ignored = action->sa.sa_handler == SIG_IGN;
74002+ blocked = sigismember(&t->blocked, sig);
74003+ if (blocked || ignored) {
74004+ action->sa.sa_handler = SIG_DFL;
74005+ if (blocked) {
74006+ sigdelset(&t->blocked, sig);
74007+ recalc_sigpending_and_wake(t);
74008+ }
74009+ }
74010+ if (action->sa.sa_handler == SIG_DFL)
74011+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
74012+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
74013+
74014+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
74015+
74016+ return ret;
74017+}
74018+#endif
74019+
74020+#define GR_USER_BAN_TIME (15 * 60)
74021+#define GR_DAEMON_BRUTE_TIME (30 * 60)
74022+
74023+void gr_handle_brute_attach(int dumpable)
74024+{
74025+#ifdef CONFIG_GRKERNSEC_BRUTE
74026+ struct task_struct *p = current;
74027+ kuid_t uid = GLOBAL_ROOT_UID;
74028+ int daemon = 0;
74029+
74030+ if (!grsec_enable_brute)
74031+ return;
74032+
74033+ rcu_read_lock();
74034+ read_lock(&tasklist_lock);
74035+ read_lock(&grsec_exec_file_lock);
74036+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
74037+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
74038+ p->real_parent->brute = 1;
74039+ daemon = 1;
74040+ } else {
74041+ const struct cred *cred = __task_cred(p), *cred2;
74042+ struct task_struct *tsk, *tsk2;
74043+
74044+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
74045+ struct user_struct *user;
74046+
74047+ uid = cred->uid;
74048+
74049+ /* this is put upon execution past expiration */
74050+ user = find_user(uid);
74051+ if (user == NULL)
74052+ goto unlock;
74053+ user->suid_banned = 1;
74054+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
74055+ if (user->suid_ban_expires == ~0UL)
74056+ user->suid_ban_expires--;
74057+
74058+ /* only kill other threads of the same binary, from the same user */
74059+ do_each_thread(tsk2, tsk) {
74060+ cred2 = __task_cred(tsk);
74061+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
74062+ gr_fake_force_sig(SIGKILL, tsk);
74063+ } while_each_thread(tsk2, tsk);
74064+ }
74065+ }
74066+unlock:
74067+ read_unlock(&grsec_exec_file_lock);
74068+ read_unlock(&tasklist_lock);
74069+ rcu_read_unlock();
74070+
74071+ if (gr_is_global_nonroot(uid))
74072+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
74073+ else if (daemon)
74074+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
74075+
74076+#endif
74077+ return;
74078+}
74079+
74080+void gr_handle_brute_check(void)
74081+{
74082+#ifdef CONFIG_GRKERNSEC_BRUTE
74083+ struct task_struct *p = current;
74084+
74085+ if (unlikely(p->brute)) {
74086+ if (!grsec_enable_brute)
74087+ p->brute = 0;
74088+ else if (time_before(get_seconds(), p->brute_expires))
74089+ msleep(30 * 1000);
74090+ }
74091+#endif
74092+ return;
74093+}
74094+
74095+void gr_handle_kernel_exploit(void)
74096+{
74097+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74098+ const struct cred *cred;
74099+ struct task_struct *tsk, *tsk2;
74100+ struct user_struct *user;
74101+ kuid_t uid;
74102+
74103+ if (in_irq() || in_serving_softirq() || in_nmi())
74104+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
74105+
74106+ uid = current_uid();
74107+
74108+ if (gr_is_global_root(uid))
74109+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
74110+ else {
74111+ /* kill all the processes of this user, hold a reference
74112+ to their creds struct, and prevent them from creating
74113+ another process until system reset
74114+ */
74115+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
74116+ GR_GLOBAL_UID(uid));
74117+ /* we intentionally leak this ref */
74118+ user = get_uid(current->cred->user);
74119+ if (user)
74120+ user->kernel_banned = 1;
74121+
74122+ /* kill all processes of this user */
74123+ read_lock(&tasklist_lock);
74124+ do_each_thread(tsk2, tsk) {
74125+ cred = __task_cred(tsk);
74126+ if (uid_eq(cred->uid, uid))
74127+ gr_fake_force_sig(SIGKILL, tsk);
74128+ } while_each_thread(tsk2, tsk);
74129+ read_unlock(&tasklist_lock);
74130+ }
74131+#endif
74132+}
74133+
74134+#ifdef CONFIG_GRKERNSEC_BRUTE
74135+static bool suid_ban_expired(struct user_struct *user)
74136+{
74137+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
74138+ user->suid_banned = 0;
74139+ user->suid_ban_expires = 0;
74140+ free_uid(user);
74141+ return true;
74142+ }
74143+
74144+ return false;
74145+}
74146+#endif
74147+
74148+int gr_process_kernel_exec_ban(void)
74149+{
74150+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74151+ if (unlikely(current->cred->user->kernel_banned))
74152+ return -EPERM;
74153+#endif
74154+ return 0;
74155+}
74156+
74157+int gr_process_kernel_setuid_ban(struct user_struct *user)
74158+{
74159+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74160+ if (unlikely(user->kernel_banned))
74161+ gr_fake_force_sig(SIGKILL, current);
74162+#endif
74163+ return 0;
74164+}
74165+
74166+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
74167+{
74168+#ifdef CONFIG_GRKERNSEC_BRUTE
74169+ struct user_struct *user = current->cred->user;
74170+ if (unlikely(user->suid_banned)) {
74171+ if (suid_ban_expired(user))
74172+ return 0;
74173+ /* disallow execution of suid binaries only */
74174+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
74175+ return -EPERM;
74176+ }
74177+#endif
74178+ return 0;
74179+}
74180diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
74181new file mode 100644
74182index 0000000..4030d57
74183--- /dev/null
74184+++ b/grsecurity/grsec_sock.c
74185@@ -0,0 +1,244 @@
74186+#include <linux/kernel.h>
74187+#include <linux/module.h>
74188+#include <linux/sched.h>
74189+#include <linux/file.h>
74190+#include <linux/net.h>
74191+#include <linux/in.h>
74192+#include <linux/ip.h>
74193+#include <net/sock.h>
74194+#include <net/inet_sock.h>
74195+#include <linux/grsecurity.h>
74196+#include <linux/grinternal.h>
74197+#include <linux/gracl.h>
74198+
74199+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
74200+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
74201+
74202+EXPORT_SYMBOL(gr_search_udp_recvmsg);
74203+EXPORT_SYMBOL(gr_search_udp_sendmsg);
74204+
74205+#ifdef CONFIG_UNIX_MODULE
74206+EXPORT_SYMBOL(gr_acl_handle_unix);
74207+EXPORT_SYMBOL(gr_acl_handle_mknod);
74208+EXPORT_SYMBOL(gr_handle_chroot_unix);
74209+EXPORT_SYMBOL(gr_handle_create);
74210+#endif
74211+
74212+#ifdef CONFIG_GRKERNSEC
74213+#define gr_conn_table_size 32749
74214+struct conn_table_entry {
74215+ struct conn_table_entry *next;
74216+ struct signal_struct *sig;
74217+};
74218+
74219+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
74220+DEFINE_SPINLOCK(gr_conn_table_lock);
74221+
74222+extern const char * gr_socktype_to_name(unsigned char type);
74223+extern const char * gr_proto_to_name(unsigned char proto);
74224+extern const char * gr_sockfamily_to_name(unsigned char family);
74225+
74226+static __inline__ int
74227+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
74228+{
74229+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
74230+}
74231+
74232+static __inline__ int
74233+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
74234+ __u16 sport, __u16 dport)
74235+{
74236+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
74237+ sig->gr_sport == sport && sig->gr_dport == dport))
74238+ return 1;
74239+ else
74240+ return 0;
74241+}
74242+
74243+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
74244+{
74245+ struct conn_table_entry **match;
74246+ unsigned int index;
74247+
74248+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
74249+ sig->gr_sport, sig->gr_dport,
74250+ gr_conn_table_size);
74251+
74252+ newent->sig = sig;
74253+
74254+ match = &gr_conn_table[index];
74255+ newent->next = *match;
74256+ *match = newent;
74257+
74258+ return;
74259+}
74260+
74261+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
74262+{
74263+ struct conn_table_entry *match, *last = NULL;
74264+ unsigned int index;
74265+
74266+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
74267+ sig->gr_sport, sig->gr_dport,
74268+ gr_conn_table_size);
74269+
74270+ match = gr_conn_table[index];
74271+ while (match && !conn_match(match->sig,
74272+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
74273+ sig->gr_dport)) {
74274+ last = match;
74275+ match = match->next;
74276+ }
74277+
74278+ if (match) {
74279+ if (last)
74280+ last->next = match->next;
74281+ else
74282+ gr_conn_table[index] = NULL;
74283+ kfree(match);
74284+ }
74285+
74286+ return;
74287+}
74288+
74289+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
74290+ __u16 sport, __u16 dport)
74291+{
74292+ struct conn_table_entry *match;
74293+ unsigned int index;
74294+
74295+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
74296+
74297+ match = gr_conn_table[index];
74298+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
74299+ match = match->next;
74300+
74301+ if (match)
74302+ return match->sig;
74303+ else
74304+ return NULL;
74305+}
74306+
74307+#endif
74308+
74309+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
74310+{
74311+#ifdef CONFIG_GRKERNSEC
74312+ struct signal_struct *sig = task->signal;
74313+ struct conn_table_entry *newent;
74314+
74315+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
74316+ if (newent == NULL)
74317+ return;
74318+ /* no bh lock needed since we are called with bh disabled */
74319+ spin_lock(&gr_conn_table_lock);
74320+ gr_del_task_from_ip_table_nolock(sig);
74321+ sig->gr_saddr = inet->inet_rcv_saddr;
74322+ sig->gr_daddr = inet->inet_daddr;
74323+ sig->gr_sport = inet->inet_sport;
74324+ sig->gr_dport = inet->inet_dport;
74325+ gr_add_to_task_ip_table_nolock(sig, newent);
74326+ spin_unlock(&gr_conn_table_lock);
74327+#endif
74328+ return;
74329+}
74330+
74331+void gr_del_task_from_ip_table(struct task_struct *task)
74332+{
74333+#ifdef CONFIG_GRKERNSEC
74334+ spin_lock_bh(&gr_conn_table_lock);
74335+ gr_del_task_from_ip_table_nolock(task->signal);
74336+ spin_unlock_bh(&gr_conn_table_lock);
74337+#endif
74338+ return;
74339+}
74340+
74341+void
74342+gr_attach_curr_ip(const struct sock *sk)
74343+{
74344+#ifdef CONFIG_GRKERNSEC
74345+ struct signal_struct *p, *set;
74346+ const struct inet_sock *inet = inet_sk(sk);
74347+
74348+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
74349+ return;
74350+
74351+ set = current->signal;
74352+
74353+ spin_lock_bh(&gr_conn_table_lock);
74354+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
74355+ inet->inet_dport, inet->inet_sport);
74356+ if (unlikely(p != NULL)) {
74357+ set->curr_ip = p->curr_ip;
74358+ set->used_accept = 1;
74359+ gr_del_task_from_ip_table_nolock(p);
74360+ spin_unlock_bh(&gr_conn_table_lock);
74361+ return;
74362+ }
74363+ spin_unlock_bh(&gr_conn_table_lock);
74364+
74365+ set->curr_ip = inet->inet_daddr;
74366+ set->used_accept = 1;
74367+#endif
74368+ return;
74369+}
74370+
74371+int
74372+gr_handle_sock_all(const int family, const int type, const int protocol)
74373+{
74374+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
74375+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
74376+ (family != AF_UNIX)) {
74377+ if (family == AF_INET)
74378+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
74379+ else
74380+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
74381+ return -EACCES;
74382+ }
74383+#endif
74384+ return 0;
74385+}
74386+
74387+int
74388+gr_handle_sock_server(const struct sockaddr *sck)
74389+{
74390+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74391+ if (grsec_enable_socket_server &&
74392+ in_group_p(grsec_socket_server_gid) &&
74393+ sck && (sck->sa_family != AF_UNIX) &&
74394+ (sck->sa_family != AF_LOCAL)) {
74395+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
74396+ return -EACCES;
74397+ }
74398+#endif
74399+ return 0;
74400+}
74401+
74402+int
74403+gr_handle_sock_server_other(const struct sock *sck)
74404+{
74405+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74406+ if (grsec_enable_socket_server &&
74407+ in_group_p(grsec_socket_server_gid) &&
74408+ sck && (sck->sk_family != AF_UNIX) &&
74409+ (sck->sk_family != AF_LOCAL)) {
74410+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
74411+ return -EACCES;
74412+ }
74413+#endif
74414+ return 0;
74415+}
74416+
74417+int
74418+gr_handle_sock_client(const struct sockaddr *sck)
74419+{
74420+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
74421+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
74422+ sck && (sck->sa_family != AF_UNIX) &&
74423+ (sck->sa_family != AF_LOCAL)) {
74424+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
74425+ return -EACCES;
74426+ }
74427+#endif
74428+ return 0;
74429+}
74430diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
74431new file mode 100644
74432index 0000000..8159888
74433--- /dev/null
74434+++ b/grsecurity/grsec_sysctl.c
74435@@ -0,0 +1,479 @@
74436+#include <linux/kernel.h>
74437+#include <linux/sched.h>
74438+#include <linux/sysctl.h>
74439+#include <linux/grsecurity.h>
74440+#include <linux/grinternal.h>
74441+
74442+int
74443+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
74444+{
74445+#ifdef CONFIG_GRKERNSEC_SYSCTL
74446+ if (dirname == NULL || name == NULL)
74447+ return 0;
74448+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
74449+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
74450+ return -EACCES;
74451+ }
74452+#endif
74453+ return 0;
74454+}
74455+
74456+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
74457+static int __maybe_unused __read_only one = 1;
74458+#endif
74459+
74460+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
74461+ defined(CONFIG_GRKERNSEC_DENYUSB)
74462+struct ctl_table grsecurity_table[] = {
74463+#ifdef CONFIG_GRKERNSEC_SYSCTL
74464+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
74465+#ifdef CONFIG_GRKERNSEC_IO
74466+ {
74467+ .procname = "disable_priv_io",
74468+ .data = &grsec_disable_privio,
74469+ .maxlen = sizeof(int),
74470+ .mode = 0600,
74471+ .proc_handler = &proc_dointvec,
74472+ },
74473+#endif
74474+#endif
74475+#ifdef CONFIG_GRKERNSEC_LINK
74476+ {
74477+ .procname = "linking_restrictions",
74478+ .data = &grsec_enable_link,
74479+ .maxlen = sizeof(int),
74480+ .mode = 0600,
74481+ .proc_handler = &proc_dointvec,
74482+ },
74483+#endif
74484+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
74485+ {
74486+ .procname = "enforce_symlinksifowner",
74487+ .data = &grsec_enable_symlinkown,
74488+ .maxlen = sizeof(int),
74489+ .mode = 0600,
74490+ .proc_handler = &proc_dointvec,
74491+ },
74492+ {
74493+ .procname = "symlinkown_gid",
74494+ .data = &grsec_symlinkown_gid,
74495+ .maxlen = sizeof(int),
74496+ .mode = 0600,
74497+ .proc_handler = &proc_dointvec,
74498+ },
74499+#endif
74500+#ifdef CONFIG_GRKERNSEC_BRUTE
74501+ {
74502+ .procname = "deter_bruteforce",
74503+ .data = &grsec_enable_brute,
74504+ .maxlen = sizeof(int),
74505+ .mode = 0600,
74506+ .proc_handler = &proc_dointvec,
74507+ },
74508+#endif
74509+#ifdef CONFIG_GRKERNSEC_FIFO
74510+ {
74511+ .procname = "fifo_restrictions",
74512+ .data = &grsec_enable_fifo,
74513+ .maxlen = sizeof(int),
74514+ .mode = 0600,
74515+ .proc_handler = &proc_dointvec,
74516+ },
74517+#endif
74518+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
74519+ {
74520+ .procname = "ptrace_readexec",
74521+ .data = &grsec_enable_ptrace_readexec,
74522+ .maxlen = sizeof(int),
74523+ .mode = 0600,
74524+ .proc_handler = &proc_dointvec,
74525+ },
74526+#endif
74527+#ifdef CONFIG_GRKERNSEC_SETXID
74528+ {
74529+ .procname = "consistent_setxid",
74530+ .data = &grsec_enable_setxid,
74531+ .maxlen = sizeof(int),
74532+ .mode = 0600,
74533+ .proc_handler = &proc_dointvec,
74534+ },
74535+#endif
74536+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74537+ {
74538+ .procname = "ip_blackhole",
74539+ .data = &grsec_enable_blackhole,
74540+ .maxlen = sizeof(int),
74541+ .mode = 0600,
74542+ .proc_handler = &proc_dointvec,
74543+ },
74544+ {
74545+ .procname = "lastack_retries",
74546+ .data = &grsec_lastack_retries,
74547+ .maxlen = sizeof(int),
74548+ .mode = 0600,
74549+ .proc_handler = &proc_dointvec,
74550+ },
74551+#endif
74552+#ifdef CONFIG_GRKERNSEC_EXECLOG
74553+ {
74554+ .procname = "exec_logging",
74555+ .data = &grsec_enable_execlog,
74556+ .maxlen = sizeof(int),
74557+ .mode = 0600,
74558+ .proc_handler = &proc_dointvec,
74559+ },
74560+#endif
74561+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74562+ {
74563+ .procname = "rwxmap_logging",
74564+ .data = &grsec_enable_log_rwxmaps,
74565+ .maxlen = sizeof(int),
74566+ .mode = 0600,
74567+ .proc_handler = &proc_dointvec,
74568+ },
74569+#endif
74570+#ifdef CONFIG_GRKERNSEC_SIGNAL
74571+ {
74572+ .procname = "signal_logging",
74573+ .data = &grsec_enable_signal,
74574+ .maxlen = sizeof(int),
74575+ .mode = 0600,
74576+ .proc_handler = &proc_dointvec,
74577+ },
74578+#endif
74579+#ifdef CONFIG_GRKERNSEC_FORKFAIL
74580+ {
74581+ .procname = "forkfail_logging",
74582+ .data = &grsec_enable_forkfail,
74583+ .maxlen = sizeof(int),
74584+ .mode = 0600,
74585+ .proc_handler = &proc_dointvec,
74586+ },
74587+#endif
74588+#ifdef CONFIG_GRKERNSEC_TIME
74589+ {
74590+ .procname = "timechange_logging",
74591+ .data = &grsec_enable_time,
74592+ .maxlen = sizeof(int),
74593+ .mode = 0600,
74594+ .proc_handler = &proc_dointvec,
74595+ },
74596+#endif
74597+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
74598+ {
74599+ .procname = "chroot_deny_shmat",
74600+ .data = &grsec_enable_chroot_shmat,
74601+ .maxlen = sizeof(int),
74602+ .mode = 0600,
74603+ .proc_handler = &proc_dointvec,
74604+ },
74605+#endif
74606+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
74607+ {
74608+ .procname = "chroot_deny_unix",
74609+ .data = &grsec_enable_chroot_unix,
74610+ .maxlen = sizeof(int),
74611+ .mode = 0600,
74612+ .proc_handler = &proc_dointvec,
74613+ },
74614+#endif
74615+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
74616+ {
74617+ .procname = "chroot_deny_mount",
74618+ .data = &grsec_enable_chroot_mount,
74619+ .maxlen = sizeof(int),
74620+ .mode = 0600,
74621+ .proc_handler = &proc_dointvec,
74622+ },
74623+#endif
74624+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
74625+ {
74626+ .procname = "chroot_deny_fchdir",
74627+ .data = &grsec_enable_chroot_fchdir,
74628+ .maxlen = sizeof(int),
74629+ .mode = 0600,
74630+ .proc_handler = &proc_dointvec,
74631+ },
74632+#endif
74633+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
74634+ {
74635+ .procname = "chroot_deny_chroot",
74636+ .data = &grsec_enable_chroot_double,
74637+ .maxlen = sizeof(int),
74638+ .mode = 0600,
74639+ .proc_handler = &proc_dointvec,
74640+ },
74641+#endif
74642+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
74643+ {
74644+ .procname = "chroot_deny_pivot",
74645+ .data = &grsec_enable_chroot_pivot,
74646+ .maxlen = sizeof(int),
74647+ .mode = 0600,
74648+ .proc_handler = &proc_dointvec,
74649+ },
74650+#endif
74651+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
74652+ {
74653+ .procname = "chroot_enforce_chdir",
74654+ .data = &grsec_enable_chroot_chdir,
74655+ .maxlen = sizeof(int),
74656+ .mode = 0600,
74657+ .proc_handler = &proc_dointvec,
74658+ },
74659+#endif
74660+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
74661+ {
74662+ .procname = "chroot_deny_chmod",
74663+ .data = &grsec_enable_chroot_chmod,
74664+ .maxlen = sizeof(int),
74665+ .mode = 0600,
74666+ .proc_handler = &proc_dointvec,
74667+ },
74668+#endif
74669+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
74670+ {
74671+ .procname = "chroot_deny_mknod",
74672+ .data = &grsec_enable_chroot_mknod,
74673+ .maxlen = sizeof(int),
74674+ .mode = 0600,
74675+ .proc_handler = &proc_dointvec,
74676+ },
74677+#endif
74678+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
74679+ {
74680+ .procname = "chroot_restrict_nice",
74681+ .data = &grsec_enable_chroot_nice,
74682+ .maxlen = sizeof(int),
74683+ .mode = 0600,
74684+ .proc_handler = &proc_dointvec,
74685+ },
74686+#endif
74687+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
74688+ {
74689+ .procname = "chroot_execlog",
74690+ .data = &grsec_enable_chroot_execlog,
74691+ .maxlen = sizeof(int),
74692+ .mode = 0600,
74693+ .proc_handler = &proc_dointvec,
74694+ },
74695+#endif
74696+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74697+ {
74698+ .procname = "chroot_caps",
74699+ .data = &grsec_enable_chroot_caps,
74700+ .maxlen = sizeof(int),
74701+ .mode = 0600,
74702+ .proc_handler = &proc_dointvec,
74703+ },
74704+#endif
74705+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
74706+ {
74707+ .procname = "chroot_deny_sysctl",
74708+ .data = &grsec_enable_chroot_sysctl,
74709+ .maxlen = sizeof(int),
74710+ .mode = 0600,
74711+ .proc_handler = &proc_dointvec,
74712+ },
74713+#endif
74714+#ifdef CONFIG_GRKERNSEC_TPE
74715+ {
74716+ .procname = "tpe",
74717+ .data = &grsec_enable_tpe,
74718+ .maxlen = sizeof(int),
74719+ .mode = 0600,
74720+ .proc_handler = &proc_dointvec,
74721+ },
74722+ {
74723+ .procname = "tpe_gid",
74724+ .data = &grsec_tpe_gid,
74725+ .maxlen = sizeof(int),
74726+ .mode = 0600,
74727+ .proc_handler = &proc_dointvec,
74728+ },
74729+#endif
74730+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
74731+ {
74732+ .procname = "tpe_invert",
74733+ .data = &grsec_enable_tpe_invert,
74734+ .maxlen = sizeof(int),
74735+ .mode = 0600,
74736+ .proc_handler = &proc_dointvec,
74737+ },
74738+#endif
74739+#ifdef CONFIG_GRKERNSEC_TPE_ALL
74740+ {
74741+ .procname = "tpe_restrict_all",
74742+ .data = &grsec_enable_tpe_all,
74743+ .maxlen = sizeof(int),
74744+ .mode = 0600,
74745+ .proc_handler = &proc_dointvec,
74746+ },
74747+#endif
74748+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
74749+ {
74750+ .procname = "socket_all",
74751+ .data = &grsec_enable_socket_all,
74752+ .maxlen = sizeof(int),
74753+ .mode = 0600,
74754+ .proc_handler = &proc_dointvec,
74755+ },
74756+ {
74757+ .procname = "socket_all_gid",
74758+ .data = &grsec_socket_all_gid,
74759+ .maxlen = sizeof(int),
74760+ .mode = 0600,
74761+ .proc_handler = &proc_dointvec,
74762+ },
74763+#endif
74764+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
74765+ {
74766+ .procname = "socket_client",
74767+ .data = &grsec_enable_socket_client,
74768+ .maxlen = sizeof(int),
74769+ .mode = 0600,
74770+ .proc_handler = &proc_dointvec,
74771+ },
74772+ {
74773+ .procname = "socket_client_gid",
74774+ .data = &grsec_socket_client_gid,
74775+ .maxlen = sizeof(int),
74776+ .mode = 0600,
74777+ .proc_handler = &proc_dointvec,
74778+ },
74779+#endif
74780+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74781+ {
74782+ .procname = "socket_server",
74783+ .data = &grsec_enable_socket_server,
74784+ .maxlen = sizeof(int),
74785+ .mode = 0600,
74786+ .proc_handler = &proc_dointvec,
74787+ },
74788+ {
74789+ .procname = "socket_server_gid",
74790+ .data = &grsec_socket_server_gid,
74791+ .maxlen = sizeof(int),
74792+ .mode = 0600,
74793+ .proc_handler = &proc_dointvec,
74794+ },
74795+#endif
74796+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
74797+ {
74798+ .procname = "audit_group",
74799+ .data = &grsec_enable_group,
74800+ .maxlen = sizeof(int),
74801+ .mode = 0600,
74802+ .proc_handler = &proc_dointvec,
74803+ },
74804+ {
74805+ .procname = "audit_gid",
74806+ .data = &grsec_audit_gid,
74807+ .maxlen = sizeof(int),
74808+ .mode = 0600,
74809+ .proc_handler = &proc_dointvec,
74810+ },
74811+#endif
74812+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
74813+ {
74814+ .procname = "audit_chdir",
74815+ .data = &grsec_enable_chdir,
74816+ .maxlen = sizeof(int),
74817+ .mode = 0600,
74818+ .proc_handler = &proc_dointvec,
74819+ },
74820+#endif
74821+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74822+ {
74823+ .procname = "audit_mount",
74824+ .data = &grsec_enable_mount,
74825+ .maxlen = sizeof(int),
74826+ .mode = 0600,
74827+ .proc_handler = &proc_dointvec,
74828+ },
74829+#endif
74830+#ifdef CONFIG_GRKERNSEC_DMESG
74831+ {
74832+ .procname = "dmesg",
74833+ .data = &grsec_enable_dmesg,
74834+ .maxlen = sizeof(int),
74835+ .mode = 0600,
74836+ .proc_handler = &proc_dointvec,
74837+ },
74838+#endif
74839+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74840+ {
74841+ .procname = "chroot_findtask",
74842+ .data = &grsec_enable_chroot_findtask,
74843+ .maxlen = sizeof(int),
74844+ .mode = 0600,
74845+ .proc_handler = &proc_dointvec,
74846+ },
74847+#endif
74848+#ifdef CONFIG_GRKERNSEC_RESLOG
74849+ {
74850+ .procname = "resource_logging",
74851+ .data = &grsec_resource_logging,
74852+ .maxlen = sizeof(int),
74853+ .mode = 0600,
74854+ .proc_handler = &proc_dointvec,
74855+ },
74856+#endif
74857+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
74858+ {
74859+ .procname = "audit_ptrace",
74860+ .data = &grsec_enable_audit_ptrace,
74861+ .maxlen = sizeof(int),
74862+ .mode = 0600,
74863+ .proc_handler = &proc_dointvec,
74864+ },
74865+#endif
74866+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
74867+ {
74868+ .procname = "harden_ptrace",
74869+ .data = &grsec_enable_harden_ptrace,
74870+ .maxlen = sizeof(int),
74871+ .mode = 0600,
74872+ .proc_handler = &proc_dointvec,
74873+ },
74874+#endif
74875+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
74876+ {
74877+ .procname = "harden_ipc",
74878+ .data = &grsec_enable_harden_ipc,
74879+ .maxlen = sizeof(int),
74880+ .mode = 0600,
74881+ .proc_handler = &proc_dointvec,
74882+ },
74883+#endif
74884+ {
74885+ .procname = "grsec_lock",
74886+ .data = &grsec_lock,
74887+ .maxlen = sizeof(int),
74888+ .mode = 0600,
74889+ .proc_handler = &proc_dointvec,
74890+ },
74891+#endif
74892+#ifdef CONFIG_GRKERNSEC_ROFS
74893+ {
74894+ .procname = "romount_protect",
74895+ .data = &grsec_enable_rofs,
74896+ .maxlen = sizeof(int),
74897+ .mode = 0600,
74898+ .proc_handler = &proc_dointvec_minmax,
74899+ .extra1 = &one,
74900+ .extra2 = &one,
74901+ },
74902+#endif
74903+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
74904+ {
74905+ .procname = "deny_new_usb",
74906+ .data = &grsec_deny_new_usb,
74907+ .maxlen = sizeof(int),
74908+ .mode = 0600,
74909+ .proc_handler = &proc_dointvec,
74910+ },
74911+#endif
74912+ { }
74913+};
74914+#endif
74915diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
74916new file mode 100644
74917index 0000000..0dc13c3
74918--- /dev/null
74919+++ b/grsecurity/grsec_time.c
74920@@ -0,0 +1,16 @@
74921+#include <linux/kernel.h>
74922+#include <linux/sched.h>
74923+#include <linux/grinternal.h>
74924+#include <linux/module.h>
74925+
74926+void
74927+gr_log_timechange(void)
74928+{
74929+#ifdef CONFIG_GRKERNSEC_TIME
74930+ if (grsec_enable_time)
74931+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
74932+#endif
74933+ return;
74934+}
74935+
74936+EXPORT_SYMBOL(gr_log_timechange);
74937diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
74938new file mode 100644
74939index 0000000..ee57dcf
74940--- /dev/null
74941+++ b/grsecurity/grsec_tpe.c
74942@@ -0,0 +1,73 @@
74943+#include <linux/kernel.h>
74944+#include <linux/sched.h>
74945+#include <linux/file.h>
74946+#include <linux/fs.h>
74947+#include <linux/grinternal.h>
74948+
74949+extern int gr_acl_tpe_check(void);
74950+
74951+int
74952+gr_tpe_allow(const struct file *file)
74953+{
74954+#ifdef CONFIG_GRKERNSEC
74955+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
74956+ const struct cred *cred = current_cred();
74957+ char *msg = NULL;
74958+ char *msg2 = NULL;
74959+
74960+ // never restrict root
74961+ if (gr_is_global_root(cred->uid))
74962+ return 1;
74963+
74964+ if (grsec_enable_tpe) {
74965+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
74966+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
74967+ msg = "not being in trusted group";
74968+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
74969+ msg = "being in untrusted group";
74970+#else
74971+ if (in_group_p(grsec_tpe_gid))
74972+ msg = "being in untrusted group";
74973+#endif
74974+ }
74975+ if (!msg && gr_acl_tpe_check())
74976+ msg = "being in untrusted role";
74977+
74978+ // not in any affected group/role
74979+ if (!msg)
74980+ goto next_check;
74981+
74982+ if (gr_is_global_nonroot(inode->i_uid))
74983+ msg2 = "file in non-root-owned directory";
74984+ else if (inode->i_mode & S_IWOTH)
74985+ msg2 = "file in world-writable directory";
74986+ else if (inode->i_mode & S_IWGRP)
74987+ msg2 = "file in group-writable directory";
74988+
74989+ if (msg && msg2) {
74990+ char fullmsg[70] = {0};
74991+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
74992+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
74993+ return 0;
74994+ }
74995+ msg = NULL;
74996+next_check:
74997+#ifdef CONFIG_GRKERNSEC_TPE_ALL
74998+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
74999+ return 1;
75000+
75001+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
75002+ msg = "directory not owned by user";
75003+ else if (inode->i_mode & S_IWOTH)
75004+ msg = "file in world-writable directory";
75005+ else if (inode->i_mode & S_IWGRP)
75006+ msg = "file in group-writable directory";
75007+
75008+ if (msg) {
75009+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
75010+ return 0;
75011+ }
75012+#endif
75013+#endif
75014+ return 1;
75015+}
75016diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
75017new file mode 100644
75018index 0000000..ae02d8e
75019--- /dev/null
75020+++ b/grsecurity/grsec_usb.c
75021@@ -0,0 +1,15 @@
75022+#include <linux/kernel.h>
75023+#include <linux/grinternal.h>
75024+#include <linux/module.h>
75025+
75026+int gr_handle_new_usb(void)
75027+{
75028+#ifdef CONFIG_GRKERNSEC_DENYUSB
75029+ if (grsec_deny_new_usb) {
75030+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
75031+ return 1;
75032+ }
75033+#endif
75034+ return 0;
75035+}
75036+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
75037diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
75038new file mode 100644
75039index 0000000..9f7b1ac
75040--- /dev/null
75041+++ b/grsecurity/grsum.c
75042@@ -0,0 +1,61 @@
75043+#include <linux/err.h>
75044+#include <linux/kernel.h>
75045+#include <linux/sched.h>
75046+#include <linux/mm.h>
75047+#include <linux/scatterlist.h>
75048+#include <linux/crypto.h>
75049+#include <linux/gracl.h>
75050+
75051+
75052+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
75053+#error "crypto and sha256 must be built into the kernel"
75054+#endif
75055+
75056+int
75057+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
75058+{
75059+ char *p;
75060+ struct crypto_hash *tfm;
75061+ struct hash_desc desc;
75062+ struct scatterlist sg;
75063+ unsigned char temp_sum[GR_SHA_LEN];
75064+ volatile int retval = 0;
75065+ volatile int dummy = 0;
75066+ unsigned int i;
75067+
75068+ sg_init_table(&sg, 1);
75069+
75070+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
75071+ if (IS_ERR(tfm)) {
75072+ /* should never happen, since sha256 should be built in */
75073+ return 1;
75074+ }
75075+
75076+ desc.tfm = tfm;
75077+ desc.flags = 0;
75078+
75079+ crypto_hash_init(&desc);
75080+
75081+ p = salt;
75082+ sg_set_buf(&sg, p, GR_SALT_LEN);
75083+ crypto_hash_update(&desc, &sg, sg.length);
75084+
75085+ p = entry->pw;
75086+ sg_set_buf(&sg, p, strlen(p));
75087+
75088+ crypto_hash_update(&desc, &sg, sg.length);
75089+
75090+ crypto_hash_final(&desc, temp_sum);
75091+
75092+ memset(entry->pw, 0, GR_PW_LEN);
75093+
75094+ for (i = 0; i < GR_SHA_LEN; i++)
75095+ if (sum[i] != temp_sum[i])
75096+ retval = 1;
75097+ else
75098+ dummy = 1; // waste a cycle
75099+
75100+ crypto_free_hash(tfm);
75101+
75102+ return retval;
75103+}
75104diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
75105index 77ff547..181834f 100644
75106--- a/include/asm-generic/4level-fixup.h
75107+++ b/include/asm-generic/4level-fixup.h
75108@@ -13,8 +13,10 @@
75109 #define pmd_alloc(mm, pud, address) \
75110 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
75111 NULL: pmd_offset(pud, address))
75112+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
75113
75114 #define pud_alloc(mm, pgd, address) (pgd)
75115+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
75116 #define pud_offset(pgd, start) (pgd)
75117 #define pud_none(pud) 0
75118 #define pud_bad(pud) 0
75119diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
75120index b7babf0..97f4c4f 100644
75121--- a/include/asm-generic/atomic-long.h
75122+++ b/include/asm-generic/atomic-long.h
75123@@ -22,6 +22,12 @@
75124
75125 typedef atomic64_t atomic_long_t;
75126
75127+#ifdef CONFIG_PAX_REFCOUNT
75128+typedef atomic64_unchecked_t atomic_long_unchecked_t;
75129+#else
75130+typedef atomic64_t atomic_long_unchecked_t;
75131+#endif
75132+
75133 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
75134
75135 static inline long atomic_long_read(atomic_long_t *l)
75136@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
75137 return (long)atomic64_read(v);
75138 }
75139
75140+#ifdef CONFIG_PAX_REFCOUNT
75141+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
75142+{
75143+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75144+
75145+ return (long)atomic64_read_unchecked(v);
75146+}
75147+#endif
75148+
75149 static inline void atomic_long_set(atomic_long_t *l, long i)
75150 {
75151 atomic64_t *v = (atomic64_t *)l;
75152@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
75153 atomic64_set(v, i);
75154 }
75155
75156+#ifdef CONFIG_PAX_REFCOUNT
75157+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
75158+{
75159+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75160+
75161+ atomic64_set_unchecked(v, i);
75162+}
75163+#endif
75164+
75165 static inline void atomic_long_inc(atomic_long_t *l)
75166 {
75167 atomic64_t *v = (atomic64_t *)l;
75168@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
75169 atomic64_inc(v);
75170 }
75171
75172+#ifdef CONFIG_PAX_REFCOUNT
75173+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
75174+{
75175+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75176+
75177+ atomic64_inc_unchecked(v);
75178+}
75179+#endif
75180+
75181 static inline void atomic_long_dec(atomic_long_t *l)
75182 {
75183 atomic64_t *v = (atomic64_t *)l;
75184@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
75185 atomic64_dec(v);
75186 }
75187
75188+#ifdef CONFIG_PAX_REFCOUNT
75189+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
75190+{
75191+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75192+
75193+ atomic64_dec_unchecked(v);
75194+}
75195+#endif
75196+
75197 static inline void atomic_long_add(long i, atomic_long_t *l)
75198 {
75199 atomic64_t *v = (atomic64_t *)l;
75200@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
75201 atomic64_add(i, v);
75202 }
75203
75204+#ifdef CONFIG_PAX_REFCOUNT
75205+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
75206+{
75207+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75208+
75209+ atomic64_add_unchecked(i, v);
75210+}
75211+#endif
75212+
75213 static inline void atomic_long_sub(long i, atomic_long_t *l)
75214 {
75215 atomic64_t *v = (atomic64_t *)l;
75216@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
75217 atomic64_sub(i, v);
75218 }
75219
75220+#ifdef CONFIG_PAX_REFCOUNT
75221+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
75222+{
75223+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75224+
75225+ atomic64_sub_unchecked(i, v);
75226+}
75227+#endif
75228+
75229 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
75230 {
75231 atomic64_t *v = (atomic64_t *)l;
75232@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
75233 return atomic64_add_negative(i, v);
75234 }
75235
75236-static inline long atomic_long_add_return(long i, atomic_long_t *l)
75237+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
75238 {
75239 atomic64_t *v = (atomic64_t *)l;
75240
75241 return (long)atomic64_add_return(i, v);
75242 }
75243
75244+#ifdef CONFIG_PAX_REFCOUNT
75245+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
75246+{
75247+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75248+
75249+ return (long)atomic64_add_return_unchecked(i, v);
75250+}
75251+#endif
75252+
75253 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
75254 {
75255 atomic64_t *v = (atomic64_t *)l;
75256@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
75257 return (long)atomic64_inc_return(v);
75258 }
75259
75260+#ifdef CONFIG_PAX_REFCOUNT
75261+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
75262+{
75263+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75264+
75265+ return (long)atomic64_inc_return_unchecked(v);
75266+}
75267+#endif
75268+
75269 static inline long atomic_long_dec_return(atomic_long_t *l)
75270 {
75271 atomic64_t *v = (atomic64_t *)l;
75272@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
75273
75274 typedef atomic_t atomic_long_t;
75275
75276+#ifdef CONFIG_PAX_REFCOUNT
75277+typedef atomic_unchecked_t atomic_long_unchecked_t;
75278+#else
75279+typedef atomic_t atomic_long_unchecked_t;
75280+#endif
75281+
75282 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
75283 static inline long atomic_long_read(atomic_long_t *l)
75284 {
75285@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
75286 return (long)atomic_read(v);
75287 }
75288
75289+#ifdef CONFIG_PAX_REFCOUNT
75290+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
75291+{
75292+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75293+
75294+ return (long)atomic_read_unchecked(v);
75295+}
75296+#endif
75297+
75298 static inline void atomic_long_set(atomic_long_t *l, long i)
75299 {
75300 atomic_t *v = (atomic_t *)l;
75301@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
75302 atomic_set(v, i);
75303 }
75304
75305+#ifdef CONFIG_PAX_REFCOUNT
75306+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
75307+{
75308+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75309+
75310+ atomic_set_unchecked(v, i);
75311+}
75312+#endif
75313+
75314 static inline void atomic_long_inc(atomic_long_t *l)
75315 {
75316 atomic_t *v = (atomic_t *)l;
75317@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
75318 atomic_inc(v);
75319 }
75320
75321+#ifdef CONFIG_PAX_REFCOUNT
75322+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
75323+{
75324+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75325+
75326+ atomic_inc_unchecked(v);
75327+}
75328+#endif
75329+
75330 static inline void atomic_long_dec(atomic_long_t *l)
75331 {
75332 atomic_t *v = (atomic_t *)l;
75333@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
75334 atomic_dec(v);
75335 }
75336
75337+#ifdef CONFIG_PAX_REFCOUNT
75338+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
75339+{
75340+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75341+
75342+ atomic_dec_unchecked(v);
75343+}
75344+#endif
75345+
75346 static inline void atomic_long_add(long i, atomic_long_t *l)
75347 {
75348 atomic_t *v = (atomic_t *)l;
75349@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
75350 atomic_add(i, v);
75351 }
75352
75353+#ifdef CONFIG_PAX_REFCOUNT
75354+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
75355+{
75356+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75357+
75358+ atomic_add_unchecked(i, v);
75359+}
75360+#endif
75361+
75362 static inline void atomic_long_sub(long i, atomic_long_t *l)
75363 {
75364 atomic_t *v = (atomic_t *)l;
75365@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
75366 atomic_sub(i, v);
75367 }
75368
75369+#ifdef CONFIG_PAX_REFCOUNT
75370+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
75371+{
75372+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75373+
75374+ atomic_sub_unchecked(i, v);
75375+}
75376+#endif
75377+
75378 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
75379 {
75380 atomic_t *v = (atomic_t *)l;
75381@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
75382 return (long)atomic_add_return(i, v);
75383 }
75384
75385+#ifdef CONFIG_PAX_REFCOUNT
75386+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
75387+{
75388+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75389+
75390+ return (long)atomic_add_return_unchecked(i, v);
75391+}
75392+
75393+#endif
75394+
75395 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
75396 {
75397 atomic_t *v = (atomic_t *)l;
75398@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
75399 return (long)atomic_inc_return(v);
75400 }
75401
75402+#ifdef CONFIG_PAX_REFCOUNT
75403+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
75404+{
75405+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75406+
75407+ return (long)atomic_inc_return_unchecked(v);
75408+}
75409+#endif
75410+
75411 static inline long atomic_long_dec_return(atomic_long_t *l)
75412 {
75413 atomic_t *v = (atomic_t *)l;
75414@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
75415
75416 #endif /* BITS_PER_LONG == 64 */
75417
75418+#ifdef CONFIG_PAX_REFCOUNT
75419+static inline void pax_refcount_needs_these_functions(void)
75420+{
75421+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
75422+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
75423+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
75424+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
75425+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
75426+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
75427+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
75428+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
75429+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
75430+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
75431+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
75432+#ifdef CONFIG_X86
75433+ atomic_clear_mask_unchecked(0, NULL);
75434+ atomic_set_mask_unchecked(0, NULL);
75435+#endif
75436+
75437+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
75438+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
75439+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
75440+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
75441+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
75442+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
75443+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
75444+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
75445+}
75446+#else
75447+#define atomic_read_unchecked(v) atomic_read(v)
75448+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
75449+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
75450+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
75451+#define atomic_inc_unchecked(v) atomic_inc(v)
75452+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
75453+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
75454+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
75455+#define atomic_dec_unchecked(v) atomic_dec(v)
75456+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
75457+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
75458+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
75459+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
75460+
75461+#define atomic_long_read_unchecked(v) atomic_long_read(v)
75462+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
75463+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
75464+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
75465+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
75466+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
75467+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
75468+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
75469+#endif
75470+
75471 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
75472diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
75473index 33bd2de..f31bff97 100644
75474--- a/include/asm-generic/atomic.h
75475+++ b/include/asm-generic/atomic.h
75476@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
75477 * Atomically clears the bits set in @mask from @v
75478 */
75479 #ifndef atomic_clear_mask
75480-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
75481+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
75482 {
75483 unsigned long flags;
75484
75485diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
75486index b18ce4f..2ee2843 100644
75487--- a/include/asm-generic/atomic64.h
75488+++ b/include/asm-generic/atomic64.h
75489@@ -16,6 +16,8 @@ typedef struct {
75490 long long counter;
75491 } atomic64_t;
75492
75493+typedef atomic64_t atomic64_unchecked_t;
75494+
75495 #define ATOMIC64_INIT(i) { (i) }
75496
75497 extern long long atomic64_read(const atomic64_t *v);
75498@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
75499 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
75500 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
75501
75502+#define atomic64_read_unchecked(v) atomic64_read(v)
75503+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
75504+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
75505+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
75506+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
75507+#define atomic64_inc_unchecked(v) atomic64_inc(v)
75508+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
75509+#define atomic64_dec_unchecked(v) atomic64_dec(v)
75510+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
75511+
75512 #endif /* _ASM_GENERIC_ATOMIC64_H */
75513diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
75514index a60a7cc..0fe12f2 100644
75515--- a/include/asm-generic/bitops/__fls.h
75516+++ b/include/asm-generic/bitops/__fls.h
75517@@ -9,7 +9,7 @@
75518 *
75519 * Undefined if no set bit exists, so code should check against 0 first.
75520 */
75521-static __always_inline unsigned long __fls(unsigned long word)
75522+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
75523 {
75524 int num = BITS_PER_LONG - 1;
75525
75526diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
75527index 0576d1f..dad6c71 100644
75528--- a/include/asm-generic/bitops/fls.h
75529+++ b/include/asm-generic/bitops/fls.h
75530@@ -9,7 +9,7 @@
75531 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
75532 */
75533
75534-static __always_inline int fls(int x)
75535+static __always_inline int __intentional_overflow(-1) fls(int x)
75536 {
75537 int r = 32;
75538
75539diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
75540index b097cf8..3d40e14 100644
75541--- a/include/asm-generic/bitops/fls64.h
75542+++ b/include/asm-generic/bitops/fls64.h
75543@@ -15,7 +15,7 @@
75544 * at position 64.
75545 */
75546 #if BITS_PER_LONG == 32
75547-static __always_inline int fls64(__u64 x)
75548+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
75549 {
75550 __u32 h = x >> 32;
75551 if (h)
75552@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
75553 return fls(x);
75554 }
75555 #elif BITS_PER_LONG == 64
75556-static __always_inline int fls64(__u64 x)
75557+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
75558 {
75559 if (x == 0)
75560 return 0;
75561diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
75562index 1bfcfe5..e04c5c9 100644
75563--- a/include/asm-generic/cache.h
75564+++ b/include/asm-generic/cache.h
75565@@ -6,7 +6,7 @@
75566 * cache lines need to provide their own cache.h.
75567 */
75568
75569-#define L1_CACHE_SHIFT 5
75570-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
75571+#define L1_CACHE_SHIFT 5UL
75572+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
75573
75574 #endif /* __ASM_GENERIC_CACHE_H */
75575diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
75576index 0d68a1e..b74a761 100644
75577--- a/include/asm-generic/emergency-restart.h
75578+++ b/include/asm-generic/emergency-restart.h
75579@@ -1,7 +1,7 @@
75580 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
75581 #define _ASM_GENERIC_EMERGENCY_RESTART_H
75582
75583-static inline void machine_emergency_restart(void)
75584+static inline __noreturn void machine_emergency_restart(void)
75585 {
75586 machine_restart(NULL);
75587 }
75588diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
75589index 90f99c7..00ce236 100644
75590--- a/include/asm-generic/kmap_types.h
75591+++ b/include/asm-generic/kmap_types.h
75592@@ -2,9 +2,9 @@
75593 #define _ASM_GENERIC_KMAP_TYPES_H
75594
75595 #ifdef __WITH_KM_FENCE
75596-# define KM_TYPE_NR 41
75597+# define KM_TYPE_NR 42
75598 #else
75599-# define KM_TYPE_NR 20
75600+# define KM_TYPE_NR 21
75601 #endif
75602
75603 #endif
75604diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
75605index 9ceb03b..62b0b8f 100644
75606--- a/include/asm-generic/local.h
75607+++ b/include/asm-generic/local.h
75608@@ -23,24 +23,37 @@ typedef struct
75609 atomic_long_t a;
75610 } local_t;
75611
75612+typedef struct {
75613+ atomic_long_unchecked_t a;
75614+} local_unchecked_t;
75615+
75616 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
75617
75618 #define local_read(l) atomic_long_read(&(l)->a)
75619+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
75620 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
75621+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
75622 #define local_inc(l) atomic_long_inc(&(l)->a)
75623+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
75624 #define local_dec(l) atomic_long_dec(&(l)->a)
75625+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
75626 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
75627+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
75628 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
75629+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
75630
75631 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
75632 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
75633 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
75634 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
75635 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
75636+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
75637 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
75638 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
75639+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
75640
75641 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
75642+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
75643 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
75644 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
75645 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
75646diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
75647index 725612b..9cc513a 100644
75648--- a/include/asm-generic/pgtable-nopmd.h
75649+++ b/include/asm-generic/pgtable-nopmd.h
75650@@ -1,14 +1,19 @@
75651 #ifndef _PGTABLE_NOPMD_H
75652 #define _PGTABLE_NOPMD_H
75653
75654-#ifndef __ASSEMBLY__
75655-
75656 #include <asm-generic/pgtable-nopud.h>
75657
75658-struct mm_struct;
75659-
75660 #define __PAGETABLE_PMD_FOLDED
75661
75662+#define PMD_SHIFT PUD_SHIFT
75663+#define PTRS_PER_PMD 1
75664+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
75665+#define PMD_MASK (~(PMD_SIZE-1))
75666+
75667+#ifndef __ASSEMBLY__
75668+
75669+struct mm_struct;
75670+
75671 /*
75672 * Having the pmd type consist of a pud gets the size right, and allows
75673 * us to conceptually access the pud entry that this pmd is folded into
75674@@ -16,11 +21,6 @@ struct mm_struct;
75675 */
75676 typedef struct { pud_t pud; } pmd_t;
75677
75678-#define PMD_SHIFT PUD_SHIFT
75679-#define PTRS_PER_PMD 1
75680-#define PMD_SIZE (1UL << PMD_SHIFT)
75681-#define PMD_MASK (~(PMD_SIZE-1))
75682-
75683 /*
75684 * The "pud_xxx()" functions here are trivial for a folded two-level
75685 * setup: the pmd is never bad, and a pmd always exists (as it's folded
75686diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
75687index 810431d..0ec4804f 100644
75688--- a/include/asm-generic/pgtable-nopud.h
75689+++ b/include/asm-generic/pgtable-nopud.h
75690@@ -1,10 +1,15 @@
75691 #ifndef _PGTABLE_NOPUD_H
75692 #define _PGTABLE_NOPUD_H
75693
75694-#ifndef __ASSEMBLY__
75695-
75696 #define __PAGETABLE_PUD_FOLDED
75697
75698+#define PUD_SHIFT PGDIR_SHIFT
75699+#define PTRS_PER_PUD 1
75700+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
75701+#define PUD_MASK (~(PUD_SIZE-1))
75702+
75703+#ifndef __ASSEMBLY__
75704+
75705 /*
75706 * Having the pud type consist of a pgd gets the size right, and allows
75707 * us to conceptually access the pgd entry that this pud is folded into
75708@@ -12,11 +17,6 @@
75709 */
75710 typedef struct { pgd_t pgd; } pud_t;
75711
75712-#define PUD_SHIFT PGDIR_SHIFT
75713-#define PTRS_PER_PUD 1
75714-#define PUD_SIZE (1UL << PUD_SHIFT)
75715-#define PUD_MASK (~(PUD_SIZE-1))
75716-
75717 /*
75718 * The "pgd_xxx()" functions here are trivial for a folded two-level
75719 * setup: the pud is never bad, and a pud always exists (as it's folded
75720@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
75721 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
75722
75723 #define pgd_populate(mm, pgd, pud) do { } while (0)
75724+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
75725 /*
75726 * (puds are folded into pgds so this doesn't get actually called,
75727 * but the define is needed for a generic inline function.)
75728diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
75729index db09234..86683e3 100644
75730--- a/include/asm-generic/pgtable.h
75731+++ b/include/asm-generic/pgtable.h
75732@@ -736,6 +736,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
75733 }
75734 #endif /* CONFIG_NUMA_BALANCING */
75735
75736+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
75737+#ifdef CONFIG_PAX_KERNEXEC
75738+#error KERNEXEC requires pax_open_kernel
75739+#else
75740+static inline unsigned long pax_open_kernel(void) { return 0; }
75741+#endif
75742+#endif
75743+
75744+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
75745+#ifdef CONFIG_PAX_KERNEXEC
75746+#error KERNEXEC requires pax_close_kernel
75747+#else
75748+static inline unsigned long pax_close_kernel(void) { return 0; }
75749+#endif
75750+#endif
75751+
75752 #endif /* CONFIG_MMU */
75753
75754 #endif /* !__ASSEMBLY__ */
75755diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
75756index dc1269c..48a4f51 100644
75757--- a/include/asm-generic/uaccess.h
75758+++ b/include/asm-generic/uaccess.h
75759@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
75760 return __clear_user(to, n);
75761 }
75762
75763+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
75764+#ifdef CONFIG_PAX_MEMORY_UDEREF
75765+#error UDEREF requires pax_open_userland
75766+#else
75767+static inline unsigned long pax_open_userland(void) { return 0; }
75768+#endif
75769+#endif
75770+
75771+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
75772+#ifdef CONFIG_PAX_MEMORY_UDEREF
75773+#error UDEREF requires pax_close_userland
75774+#else
75775+static inline unsigned long pax_close_userland(void) { return 0; }
75776+#endif
75777+#endif
75778+
75779 #endif /* __ASM_GENERIC_UACCESS_H */
75780diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
75781index bc2121f..2f41f9a 100644
75782--- a/include/asm-generic/vmlinux.lds.h
75783+++ b/include/asm-generic/vmlinux.lds.h
75784@@ -232,6 +232,7 @@
75785 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
75786 VMLINUX_SYMBOL(__start_rodata) = .; \
75787 *(.rodata) *(.rodata.*) \
75788+ *(.data..read_only) \
75789 *(__vermagic) /* Kernel version magic */ \
75790 . = ALIGN(8); \
75791 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
75792@@ -716,17 +717,18 @@
75793 * section in the linker script will go there too. @phdr should have
75794 * a leading colon.
75795 *
75796- * Note that this macros defines __per_cpu_load as an absolute symbol.
75797+ * Note that this macros defines per_cpu_load as an absolute symbol.
75798 * If there is no need to put the percpu section at a predetermined
75799 * address, use PERCPU_SECTION.
75800 */
75801 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
75802- VMLINUX_SYMBOL(__per_cpu_load) = .; \
75803- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
75804+ per_cpu_load = .; \
75805+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
75806 - LOAD_OFFSET) { \
75807+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
75808 PERCPU_INPUT(cacheline) \
75809 } phdr \
75810- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
75811+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
75812
75813 /**
75814 * PERCPU_SECTION - define output section for percpu area, simple version
75815diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
75816index e73c19e..5b89e00 100644
75817--- a/include/crypto/algapi.h
75818+++ b/include/crypto/algapi.h
75819@@ -34,7 +34,7 @@ struct crypto_type {
75820 unsigned int maskclear;
75821 unsigned int maskset;
75822 unsigned int tfmsize;
75823-};
75824+} __do_const;
75825
75826 struct crypto_instance {
75827 struct crypto_alg alg;
75828diff --git a/include/drm/drmP.h b/include/drm/drmP.h
75829index 1d4a920..da65658 100644
75830--- a/include/drm/drmP.h
75831+++ b/include/drm/drmP.h
75832@@ -66,6 +66,7 @@
75833 #include <linux/workqueue.h>
75834 #include <linux/poll.h>
75835 #include <asm/pgalloc.h>
75836+#include <asm/local.h>
75837 #include <drm/drm.h>
75838 #include <drm/drm_sarea.h>
75839 #include <drm/drm_vma_manager.h>
75840@@ -278,10 +279,12 @@ do { \
75841 * \param cmd command.
75842 * \param arg argument.
75843 */
75844-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
75845+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
75846+ struct drm_file *file_priv);
75847+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
75848 struct drm_file *file_priv);
75849
75850-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
75851+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
75852 unsigned long arg);
75853
75854 #define DRM_IOCTL_NR(n) _IOC_NR(n)
75855@@ -297,10 +300,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
75856 struct drm_ioctl_desc {
75857 unsigned int cmd;
75858 int flags;
75859- drm_ioctl_t *func;
75860+ drm_ioctl_t func;
75861 unsigned int cmd_drv;
75862 const char *name;
75863-};
75864+} __do_const;
75865
75866 /**
75867 * Creates a driver or general drm_ioctl_desc array entry for the given
75868@@ -1013,7 +1016,8 @@ struct drm_info_list {
75869 int (*show)(struct seq_file*, void*); /** show callback */
75870 u32 driver_features; /**< Required driver features for this entry */
75871 void *data;
75872-};
75873+} __do_const;
75874+typedef struct drm_info_list __no_const drm_info_list_no_const;
75875
75876 /**
75877 * debugfs node structure. This structure represents a debugfs file.
75878@@ -1097,7 +1101,7 @@ struct drm_device {
75879
75880 /** \name Usage Counters */
75881 /*@{ */
75882- int open_count; /**< Outstanding files open */
75883+ local_t open_count; /**< Outstanding files open */
75884 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
75885 atomic_t vma_count; /**< Outstanding vma areas open */
75886 int buf_use; /**< Buffers in use -- cannot alloc */
75887diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
75888index ef6ad3a..be34b16 100644
75889--- a/include/drm/drm_crtc_helper.h
75890+++ b/include/drm/drm_crtc_helper.h
75891@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
75892 struct drm_connector *connector);
75893 /* disable encoder when not in use - more explicit than dpms off */
75894 void (*disable)(struct drm_encoder *encoder);
75895-};
75896+} __no_const;
75897
75898 /**
75899 * drm_connector_helper_funcs - helper operations for connectors
75900diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
75901index 940ece4..8cb727f 100644
75902--- a/include/drm/i915_pciids.h
75903+++ b/include/drm/i915_pciids.h
75904@@ -37,7 +37,7 @@
75905 */
75906 #define INTEL_VGA_DEVICE(id, info) { \
75907 0x8086, id, \
75908- ~0, ~0, \
75909+ PCI_ANY_ID, PCI_ANY_ID, \
75910 0x030000, 0xff0000, \
75911 (unsigned long) info }
75912
75913diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
75914index 72dcbe8..8db58d7 100644
75915--- a/include/drm/ttm/ttm_memory.h
75916+++ b/include/drm/ttm/ttm_memory.h
75917@@ -48,7 +48,7 @@
75918
75919 struct ttm_mem_shrink {
75920 int (*do_shrink) (struct ttm_mem_shrink *);
75921-};
75922+} __no_const;
75923
75924 /**
75925 * struct ttm_mem_global - Global memory accounting structure.
75926diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
75927index d1f61bf..2239439 100644
75928--- a/include/drm/ttm/ttm_page_alloc.h
75929+++ b/include/drm/ttm/ttm_page_alloc.h
75930@@ -78,6 +78,7 @@ void ttm_dma_page_alloc_fini(void);
75931 */
75932 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
75933
75934+struct device;
75935 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
75936 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
75937
75938diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
75939index 4b840e8..155d235 100644
75940--- a/include/keys/asymmetric-subtype.h
75941+++ b/include/keys/asymmetric-subtype.h
75942@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
75943 /* Verify the signature on a key of this subtype (optional) */
75944 int (*verify_signature)(const struct key *key,
75945 const struct public_key_signature *sig);
75946-};
75947+} __do_const;
75948
75949 /**
75950 * asymmetric_key_subtype - Get the subtype from an asymmetric key
75951diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
75952index c1da539..1dcec55 100644
75953--- a/include/linux/atmdev.h
75954+++ b/include/linux/atmdev.h
75955@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
75956 #endif
75957
75958 struct k_atm_aal_stats {
75959-#define __HANDLE_ITEM(i) atomic_t i
75960+#define __HANDLE_ITEM(i) atomic_unchecked_t i
75961 __AAL_STAT_ITEMS
75962 #undef __HANDLE_ITEM
75963 };
75964@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
75965 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
75966 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
75967 struct module *owner;
75968-};
75969+} __do_const ;
75970
75971 struct atmphy_ops {
75972 int (*start)(struct atm_dev *dev);
75973diff --git a/include/linux/audit.h b/include/linux/audit.h
75974index a406419..c2bb164 100644
75975--- a/include/linux/audit.h
75976+++ b/include/linux/audit.h
75977@@ -195,7 +195,7 @@ static inline void audit_ptrace(struct task_struct *t)
75978 extern unsigned int audit_serial(void);
75979 extern int auditsc_get_stamp(struct audit_context *ctx,
75980 struct timespec *t, unsigned int *serial);
75981-extern int audit_set_loginuid(kuid_t loginuid);
75982+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
75983
75984 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
75985 {
75986diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
75987index fd8bf32..2cccd5a 100644
75988--- a/include/linux/binfmts.h
75989+++ b/include/linux/binfmts.h
75990@@ -74,8 +74,10 @@ struct linux_binfmt {
75991 int (*load_binary)(struct linux_binprm *);
75992 int (*load_shlib)(struct file *);
75993 int (*core_dump)(struct coredump_params *cprm);
75994+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
75995+ void (*handle_mmap)(struct file *);
75996 unsigned long min_coredump; /* minimal dump size */
75997-};
75998+} __do_const;
75999
76000 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
76001
76002diff --git a/include/linux/bitops.h b/include/linux/bitops.h
76003index abc9ca7..e54ee27 100644
76004--- a/include/linux/bitops.h
76005+++ b/include/linux/bitops.h
76006@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
76007 * @word: value to rotate
76008 * @shift: bits to roll
76009 */
76010-static inline __u32 rol32(__u32 word, unsigned int shift)
76011+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
76012 {
76013 return (word << shift) | (word >> (32 - shift));
76014 }
76015@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
76016 * @word: value to rotate
76017 * @shift: bits to roll
76018 */
76019-static inline __u32 ror32(__u32 word, unsigned int shift)
76020+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
76021 {
76022 return (word >> shift) | (word << (32 - shift));
76023 }
76024@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
76025 return (__s32)(value << shift) >> shift;
76026 }
76027
76028-static inline unsigned fls_long(unsigned long l)
76029+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
76030 {
76031 if (sizeof(l) == 4)
76032 return fls(l);
76033diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
76034index 1b135d4..59fc876 100644
76035--- a/include/linux/blkdev.h
76036+++ b/include/linux/blkdev.h
76037@@ -1578,7 +1578,7 @@ struct block_device_operations {
76038 /* this callback is with swap_lock and sometimes page table lock held */
76039 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
76040 struct module *owner;
76041-};
76042+} __do_const;
76043
76044 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
76045 unsigned long);
76046diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
76047index afc1343..9735539 100644
76048--- a/include/linux/blktrace_api.h
76049+++ b/include/linux/blktrace_api.h
76050@@ -25,7 +25,7 @@ struct blk_trace {
76051 struct dentry *dropped_file;
76052 struct dentry *msg_file;
76053 struct list_head running_list;
76054- atomic_t dropped;
76055+ atomic_unchecked_t dropped;
76056 };
76057
76058 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
76059diff --git a/include/linux/cache.h b/include/linux/cache.h
76060index 4c57065..40346da 100644
76061--- a/include/linux/cache.h
76062+++ b/include/linux/cache.h
76063@@ -16,6 +16,14 @@
76064 #define __read_mostly
76065 #endif
76066
76067+#ifndef __read_only
76068+#ifdef CONFIG_PAX_KERNEXEC
76069+#error KERNEXEC requires __read_only
76070+#else
76071+#define __read_only __read_mostly
76072+#endif
76073+#endif
76074+
76075 #ifndef ____cacheline_aligned
76076 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
76077 #endif
76078diff --git a/include/linux/capability.h b/include/linux/capability.h
76079index a6ee1f9..e1ca49d 100644
76080--- a/include/linux/capability.h
76081+++ b/include/linux/capability.h
76082@@ -212,8 +212,13 @@ extern bool capable(int cap);
76083 extern bool ns_capable(struct user_namespace *ns, int cap);
76084 extern bool inode_capable(const struct inode *inode, int cap);
76085 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
76086+extern bool capable_nolog(int cap);
76087+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
76088+extern bool inode_capable_nolog(const struct inode *inode, int cap);
76089
76090 /* audit system wants to get cap info from files as well */
76091 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
76092
76093+extern int is_privileged_binary(const struct dentry *dentry);
76094+
76095 #endif /* !_LINUX_CAPABILITY_H */
76096diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
76097index 8609d57..86e4d79 100644
76098--- a/include/linux/cdrom.h
76099+++ b/include/linux/cdrom.h
76100@@ -87,7 +87,6 @@ struct cdrom_device_ops {
76101
76102 /* driver specifications */
76103 const int capability; /* capability flags */
76104- int n_minors; /* number of active minor devices */
76105 /* handle uniform packets for scsi type devices (scsi,atapi) */
76106 int (*generic_packet) (struct cdrom_device_info *,
76107 struct packet_command *);
76108diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
76109index 4ce9056..86caac6 100644
76110--- a/include/linux/cleancache.h
76111+++ b/include/linux/cleancache.h
76112@@ -31,7 +31,7 @@ struct cleancache_ops {
76113 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
76114 void (*invalidate_inode)(int, struct cleancache_filekey);
76115 void (*invalidate_fs)(int);
76116-};
76117+} __no_const;
76118
76119 extern struct cleancache_ops *
76120 cleancache_register_ops(struct cleancache_ops *ops);
76121diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
76122index 7e59253..d6e4cae 100644
76123--- a/include/linux/clk-provider.h
76124+++ b/include/linux/clk-provider.h
76125@@ -141,6 +141,7 @@ struct clk_ops {
76126 unsigned long);
76127 void (*init)(struct clk_hw *hw);
76128 };
76129+typedef struct clk_ops __no_const clk_ops_no_const;
76130
76131 /**
76132 * struct clk_init_data - holds init data that's common to all clocks and is
76133diff --git a/include/linux/compat.h b/include/linux/compat.h
76134index eb8a49d..6b66ed9 100644
76135--- a/include/linux/compat.h
76136+++ b/include/linux/compat.h
76137@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
76138 compat_size_t __user *len_ptr);
76139
76140 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
76141-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
76142+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
76143 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
76144 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
76145 compat_ssize_t msgsz, int msgflg);
76146@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
76147 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
76148 compat_ulong_t addr, compat_ulong_t data);
76149 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76150- compat_long_t addr, compat_long_t data);
76151+ compat_ulong_t addr, compat_ulong_t data);
76152
76153 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
76154 /*
76155diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
76156index ded4299..ddcbe31 100644
76157--- a/include/linux/compiler-gcc4.h
76158+++ b/include/linux/compiler-gcc4.h
76159@@ -39,9 +39,34 @@
76160 # define __compiletime_warning(message) __attribute__((warning(message)))
76161 # define __compiletime_error(message) __attribute__((error(message)))
76162 #endif /* __CHECKER__ */
76163+
76164+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
76165+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
76166+#define __bos0(ptr) __bos((ptr), 0)
76167+#define __bos1(ptr) __bos((ptr), 1)
76168 #endif /* GCC_VERSION >= 40300 */
76169
76170 #if GCC_VERSION >= 40500
76171+
76172+#ifdef RANDSTRUCT_PLUGIN
76173+#define __randomize_layout __attribute__((randomize_layout))
76174+#define __no_randomize_layout __attribute__((no_randomize_layout))
76175+#endif
76176+
76177+#ifdef CONSTIFY_PLUGIN
76178+#define __no_const __attribute__((no_const))
76179+#define __do_const __attribute__((do_const))
76180+#endif
76181+
76182+#ifdef SIZE_OVERFLOW_PLUGIN
76183+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
76184+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
76185+#endif
76186+
76187+#ifdef LATENT_ENTROPY_PLUGIN
76188+#define __latent_entropy __attribute__((latent_entropy))
76189+#endif
76190+
76191 /*
76192 * Mark a position in code as unreachable. This can be used to
76193 * suppress control flow warnings after asm blocks that transfer
76194diff --git a/include/linux/compiler.h b/include/linux/compiler.h
76195index 92669cd..cc564c0 100644
76196--- a/include/linux/compiler.h
76197+++ b/include/linux/compiler.h
76198@@ -5,11 +5,14 @@
76199
76200 #ifdef __CHECKER__
76201 # define __user __attribute__((noderef, address_space(1)))
76202+# define __force_user __force __user
76203 # define __kernel __attribute__((address_space(0)))
76204+# define __force_kernel __force __kernel
76205 # define __safe __attribute__((safe))
76206 # define __force __attribute__((force))
76207 # define __nocast __attribute__((nocast))
76208 # define __iomem __attribute__((noderef, address_space(2)))
76209+# define __force_iomem __force __iomem
76210 # define __must_hold(x) __attribute__((context(x,1,1)))
76211 # define __acquires(x) __attribute__((context(x,0,1)))
76212 # define __releases(x) __attribute__((context(x,1,0)))
76213@@ -17,20 +20,37 @@
76214 # define __release(x) __context__(x,-1)
76215 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
76216 # define __percpu __attribute__((noderef, address_space(3)))
76217+# define __force_percpu __force __percpu
76218 #ifdef CONFIG_SPARSE_RCU_POINTER
76219 # define __rcu __attribute__((noderef, address_space(4)))
76220+# define __force_rcu __force __rcu
76221 #else
76222 # define __rcu
76223+# define __force_rcu
76224 #endif
76225 extern void __chk_user_ptr(const volatile void __user *);
76226 extern void __chk_io_ptr(const volatile void __iomem *);
76227 #else
76228-# define __user
76229-# define __kernel
76230+# ifdef CHECKER_PLUGIN
76231+//# define __user
76232+//# define __force_user
76233+//# define __kernel
76234+//# define __force_kernel
76235+# else
76236+# ifdef STRUCTLEAK_PLUGIN
76237+# define __user __attribute__((user))
76238+# else
76239+# define __user
76240+# endif
76241+# define __force_user
76242+# define __kernel
76243+# define __force_kernel
76244+# endif
76245 # define __safe
76246 # define __force
76247 # define __nocast
76248 # define __iomem
76249+# define __force_iomem
76250 # define __chk_user_ptr(x) (void)0
76251 # define __chk_io_ptr(x) (void)0
76252 # define __builtin_warning(x, y...) (1)
76253@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
76254 # define __release(x) (void)0
76255 # define __cond_lock(x,c) (c)
76256 # define __percpu
76257+# define __force_percpu
76258 # define __rcu
76259+# define __force_rcu
76260 #endif
76261
76262 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
76263@@ -275,6 +297,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
76264 # define __attribute_const__ /* unimplemented */
76265 #endif
76266
76267+#ifndef __randomize_layout
76268+# define __randomize_layout
76269+#endif
76270+
76271+#ifndef __no_randomize_layout
76272+# define __no_randomize_layout
76273+#endif
76274+
76275+#ifndef __no_const
76276+# define __no_const
76277+#endif
76278+
76279+#ifndef __do_const
76280+# define __do_const
76281+#endif
76282+
76283+#ifndef __size_overflow
76284+# define __size_overflow(...)
76285+#endif
76286+
76287+#ifndef __intentional_overflow
76288+# define __intentional_overflow(...)
76289+#endif
76290+
76291+#ifndef __latent_entropy
76292+# define __latent_entropy
76293+#endif
76294+
76295 /*
76296 * Tell gcc if a function is cold. The compiler will assume any path
76297 * directly leading to the call is unlikely.
76298@@ -284,6 +334,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
76299 #define __cold
76300 #endif
76301
76302+#ifndef __alloc_size
76303+#define __alloc_size(...)
76304+#endif
76305+
76306+#ifndef __bos
76307+#define __bos(ptr, arg)
76308+#endif
76309+
76310+#ifndef __bos0
76311+#define __bos0(ptr)
76312+#endif
76313+
76314+#ifndef __bos1
76315+#define __bos1(ptr)
76316+#endif
76317+
76318 /* Simple shorthand for a section definition */
76319 #ifndef __section
76320 # define __section(S) __attribute__ ((__section__(#S)))
76321@@ -349,7 +415,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
76322 * use is to mediate communication between process-level code and irq/NMI
76323 * handlers, all running on the same CPU.
76324 */
76325-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
76326+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
76327+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
76328
76329 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
76330 #ifdef CONFIG_KPROBES
76331diff --git a/include/linux/completion.h b/include/linux/completion.h
76332index 5d5aaae..0ea9b84 100644
76333--- a/include/linux/completion.h
76334+++ b/include/linux/completion.h
76335@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
76336
76337 extern void wait_for_completion(struct completion *);
76338 extern void wait_for_completion_io(struct completion *);
76339-extern int wait_for_completion_interruptible(struct completion *x);
76340-extern int wait_for_completion_killable(struct completion *x);
76341+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
76342+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
76343 extern unsigned long wait_for_completion_timeout(struct completion *x,
76344- unsigned long timeout);
76345+ unsigned long timeout) __intentional_overflow(-1);
76346 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
76347- unsigned long timeout);
76348+ unsigned long timeout) __intentional_overflow(-1);
76349 extern long wait_for_completion_interruptible_timeout(
76350- struct completion *x, unsigned long timeout);
76351+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
76352 extern long wait_for_completion_killable_timeout(
76353- struct completion *x, unsigned long timeout);
76354+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
76355 extern bool try_wait_for_completion(struct completion *x);
76356 extern bool completion_done(struct completion *x);
76357
76358diff --git a/include/linux/configfs.h b/include/linux/configfs.h
76359index 34025df..d94bbbc 100644
76360--- a/include/linux/configfs.h
76361+++ b/include/linux/configfs.h
76362@@ -125,7 +125,7 @@ struct configfs_attribute {
76363 const char *ca_name;
76364 struct module *ca_owner;
76365 umode_t ca_mode;
76366-};
76367+} __do_const;
76368
76369 /*
76370 * Users often need to create attribute structures for their configurable
76371diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
76372index dc196bb..c55a50f 100644
76373--- a/include/linux/cpufreq.h
76374+++ b/include/linux/cpufreq.h
76375@@ -189,6 +189,7 @@ struct global_attr {
76376 ssize_t (*store)(struct kobject *a, struct attribute *b,
76377 const char *c, size_t count);
76378 };
76379+typedef struct global_attr __no_const global_attr_no_const;
76380
76381 #define define_one_global_ro(_name) \
76382 static struct global_attr _name = \
76383@@ -225,7 +226,7 @@ struct cpufreq_driver {
76384 int (*suspend) (struct cpufreq_policy *policy);
76385 int (*resume) (struct cpufreq_policy *policy);
76386 struct freq_attr **attr;
76387-};
76388+} __do_const;
76389
76390 /* flags */
76391 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
76392diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
76393index 50fcbb0..9d2dbd9 100644
76394--- a/include/linux/cpuidle.h
76395+++ b/include/linux/cpuidle.h
76396@@ -50,7 +50,8 @@ struct cpuidle_state {
76397 int index);
76398
76399 int (*enter_dead) (struct cpuidle_device *dev, int index);
76400-};
76401+} __do_const;
76402+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
76403
76404 /* Idle State Flags */
76405 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
76406@@ -192,7 +193,7 @@ struct cpuidle_governor {
76407 void (*reflect) (struct cpuidle_device *dev, int index);
76408
76409 struct module *owner;
76410-};
76411+} __do_const;
76412
76413 #ifdef CONFIG_CPU_IDLE
76414 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
76415diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
76416index d08e4d2..95fad61 100644
76417--- a/include/linux/cpumask.h
76418+++ b/include/linux/cpumask.h
76419@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
76420 }
76421
76422 /* Valid inputs for n are -1 and 0. */
76423-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
76424+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
76425 {
76426 return n+1;
76427 }
76428
76429-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
76430+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
76431 {
76432 return n+1;
76433 }
76434
76435-static inline unsigned int cpumask_next_and(int n,
76436+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
76437 const struct cpumask *srcp,
76438 const struct cpumask *andp)
76439 {
76440@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
76441 *
76442 * Returns >= nr_cpu_ids if no further cpus set.
76443 */
76444-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
76445+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
76446 {
76447 /* -1 is a legal arg here. */
76448 if (n != -1)
76449@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
76450 *
76451 * Returns >= nr_cpu_ids if no further cpus unset.
76452 */
76453-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
76454+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
76455 {
76456 /* -1 is a legal arg here. */
76457 if (n != -1)
76458@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
76459 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
76460 }
76461
76462-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
76463+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
76464 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
76465
76466 /**
76467diff --git a/include/linux/cred.h b/include/linux/cred.h
76468index 04421e8..117e17a 100644
76469--- a/include/linux/cred.h
76470+++ b/include/linux/cred.h
76471@@ -35,7 +35,7 @@ struct group_info {
76472 int nblocks;
76473 kgid_t small_block[NGROUPS_SMALL];
76474 kgid_t *blocks[0];
76475-};
76476+} __randomize_layout;
76477
76478 /**
76479 * get_group_info - Get a reference to a group info structure
76480@@ -136,7 +136,7 @@ struct cred {
76481 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
76482 struct group_info *group_info; /* supplementary groups for euid/fsgid */
76483 struct rcu_head rcu; /* RCU deletion hook */
76484-};
76485+} __randomize_layout;
76486
76487 extern void __put_cred(struct cred *);
76488 extern void exit_creds(struct task_struct *);
76489@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
76490 static inline void validate_process_creds(void)
76491 {
76492 }
76493+static inline void validate_task_creds(struct task_struct *task)
76494+{
76495+}
76496 #endif
76497
76498 /**
76499diff --git a/include/linux/crypto.h b/include/linux/crypto.h
76500index b92eadf..b4ecdc1 100644
76501--- a/include/linux/crypto.h
76502+++ b/include/linux/crypto.h
76503@@ -373,7 +373,7 @@ struct cipher_tfm {
76504 const u8 *key, unsigned int keylen);
76505 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
76506 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
76507-};
76508+} __no_const;
76509
76510 struct hash_tfm {
76511 int (*init)(struct hash_desc *desc);
76512@@ -394,13 +394,13 @@ struct compress_tfm {
76513 int (*cot_decompress)(struct crypto_tfm *tfm,
76514 const u8 *src, unsigned int slen,
76515 u8 *dst, unsigned int *dlen);
76516-};
76517+} __no_const;
76518
76519 struct rng_tfm {
76520 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
76521 unsigned int dlen);
76522 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
76523-};
76524+} __no_const;
76525
76526 #define crt_ablkcipher crt_u.ablkcipher
76527 #define crt_aead crt_u.aead
76528diff --git a/include/linux/ctype.h b/include/linux/ctype.h
76529index 653589e..4ef254a 100644
76530--- a/include/linux/ctype.h
76531+++ b/include/linux/ctype.h
76532@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
76533 * Fast implementation of tolower() for internal usage. Do not use in your
76534 * code.
76535 */
76536-static inline char _tolower(const char c)
76537+static inline unsigned char _tolower(const unsigned char c)
76538 {
76539 return c | 0x20;
76540 }
76541diff --git a/include/linux/dcache.h b/include/linux/dcache.h
76542index bf72e9a..4ca7927 100644
76543--- a/include/linux/dcache.h
76544+++ b/include/linux/dcache.h
76545@@ -133,7 +133,7 @@ struct dentry {
76546 } d_u;
76547 struct list_head d_subdirs; /* our children */
76548 struct hlist_node d_alias; /* inode alias list */
76549-};
76550+} __randomize_layout;
76551
76552 /*
76553 * dentry->d_lock spinlock nesting subclasses:
76554diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
76555index 7925bf0..d5143d2 100644
76556--- a/include/linux/decompress/mm.h
76557+++ b/include/linux/decompress/mm.h
76558@@ -77,7 +77,7 @@ static void free(void *where)
76559 * warnings when not needed (indeed large_malloc / large_free are not
76560 * needed by inflate */
76561
76562-#define malloc(a) kmalloc(a, GFP_KERNEL)
76563+#define malloc(a) kmalloc((a), GFP_KERNEL)
76564 #define free(a) kfree(a)
76565
76566 #define large_malloc(a) vmalloc(a)
76567diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
76568index d48dc00..211ee54 100644
76569--- a/include/linux/devfreq.h
76570+++ b/include/linux/devfreq.h
76571@@ -114,7 +114,7 @@ struct devfreq_governor {
76572 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
76573 int (*event_handler)(struct devfreq *devfreq,
76574 unsigned int event, void *data);
76575-};
76576+} __do_const;
76577
76578 /**
76579 * struct devfreq - Device devfreq structure
76580diff --git a/include/linux/device.h b/include/linux/device.h
76581index 952b010..d5b7691 100644
76582--- a/include/linux/device.h
76583+++ b/include/linux/device.h
76584@@ -310,7 +310,7 @@ struct subsys_interface {
76585 struct list_head node;
76586 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
76587 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
76588-};
76589+} __do_const;
76590
76591 int subsys_interface_register(struct subsys_interface *sif);
76592 void subsys_interface_unregister(struct subsys_interface *sif);
76593@@ -506,7 +506,7 @@ struct device_type {
76594 void (*release)(struct device *dev);
76595
76596 const struct dev_pm_ops *pm;
76597-};
76598+} __do_const;
76599
76600 /* interface for exporting device attributes */
76601 struct device_attribute {
76602@@ -516,11 +516,12 @@ struct device_attribute {
76603 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
76604 const char *buf, size_t count);
76605 };
76606+typedef struct device_attribute __no_const device_attribute_no_const;
76607
76608 struct dev_ext_attribute {
76609 struct device_attribute attr;
76610 void *var;
76611-};
76612+} __do_const;
76613
76614 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
76615 char *buf);
76616diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
76617index fd4aee2..1f28db9 100644
76618--- a/include/linux/dma-mapping.h
76619+++ b/include/linux/dma-mapping.h
76620@@ -54,7 +54,7 @@ struct dma_map_ops {
76621 u64 (*get_required_mask)(struct device *dev);
76622 #endif
76623 int is_phys;
76624-};
76625+} __do_const;
76626
76627 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
76628
76629diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
76630index 41cf0c3..f3b771c 100644
76631--- a/include/linux/dmaengine.h
76632+++ b/include/linux/dmaengine.h
76633@@ -1114,9 +1114,9 @@ struct dma_pinned_list {
76634 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
76635 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
76636
76637-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
76638+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
76639 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
76640-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
76641+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
76642 struct dma_pinned_list *pinned_list, struct page *page,
76643 unsigned int offset, size_t len);
76644
76645diff --git a/include/linux/efi.h b/include/linux/efi.h
76646index 11ce678..7b8c69c 100644
76647--- a/include/linux/efi.h
76648+++ b/include/linux/efi.h
76649@@ -764,6 +764,7 @@ struct efivar_operations {
76650 efi_set_variable_t *set_variable;
76651 efi_query_variable_store_t *query_variable_store;
76652 };
76653+typedef struct efivar_operations __no_const efivar_operations_no_const;
76654
76655 struct efivars {
76656 /*
76657diff --git a/include/linux/elf.h b/include/linux/elf.h
76658index 67a5fa7..b817372 100644
76659--- a/include/linux/elf.h
76660+++ b/include/linux/elf.h
76661@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
76662 #define elf_note elf32_note
76663 #define elf_addr_t Elf32_Off
76664 #define Elf_Half Elf32_Half
76665+#define elf_dyn Elf32_Dyn
76666
76667 #else
76668
76669@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
76670 #define elf_note elf64_note
76671 #define elf_addr_t Elf64_Off
76672 #define Elf_Half Elf64_Half
76673+#define elf_dyn Elf64_Dyn
76674
76675 #endif
76676
76677diff --git a/include/linux/err.h b/include/linux/err.h
76678index 15f92e0..e825a8e 100644
76679--- a/include/linux/err.h
76680+++ b/include/linux/err.h
76681@@ -19,12 +19,12 @@
76682
76683 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
76684
76685-static inline void * __must_check ERR_PTR(long error)
76686+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
76687 {
76688 return (void *) error;
76689 }
76690
76691-static inline long __must_check PTR_ERR(__force const void *ptr)
76692+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
76693 {
76694 return (long) ptr;
76695 }
76696diff --git a/include/linux/extcon.h b/include/linux/extcon.h
76697index 21c59af..6057a03 100644
76698--- a/include/linux/extcon.h
76699+++ b/include/linux/extcon.h
76700@@ -135,7 +135,7 @@ struct extcon_dev {
76701 /* /sys/class/extcon/.../mutually_exclusive/... */
76702 struct attribute_group attr_g_muex;
76703 struct attribute **attrs_muex;
76704- struct device_attribute *d_attrs_muex;
76705+ device_attribute_no_const *d_attrs_muex;
76706 };
76707
76708 /**
76709diff --git a/include/linux/fb.h b/include/linux/fb.h
76710index 70c4836..ff3daec 100644
76711--- a/include/linux/fb.h
76712+++ b/include/linux/fb.h
76713@@ -304,7 +304,7 @@ struct fb_ops {
76714 /* called at KDB enter and leave time to prepare the console */
76715 int (*fb_debug_enter)(struct fb_info *info);
76716 int (*fb_debug_leave)(struct fb_info *info);
76717-};
76718+} __do_const;
76719
76720 #ifdef CONFIG_FB_TILEBLITTING
76721 #define FB_TILE_CURSOR_NONE 0
76722diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
76723index 085197b..0fa6f0b 100644
76724--- a/include/linux/fdtable.h
76725+++ b/include/linux/fdtable.h
76726@@ -95,7 +95,7 @@ struct files_struct *get_files_struct(struct task_struct *);
76727 void put_files_struct(struct files_struct *fs);
76728 void reset_files_struct(struct files_struct *);
76729 int unshare_files(struct files_struct **);
76730-struct files_struct *dup_fd(struct files_struct *, int *);
76731+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
76732 void do_close_on_exec(struct files_struct *);
76733 int iterate_fd(struct files_struct *, unsigned,
76734 int (*)(const void *, struct file *, unsigned),
76735diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
76736index 8293262..2b3b8bd 100644
76737--- a/include/linux/frontswap.h
76738+++ b/include/linux/frontswap.h
76739@@ -11,7 +11,7 @@ struct frontswap_ops {
76740 int (*load)(unsigned, pgoff_t, struct page *);
76741 void (*invalidate_page)(unsigned, pgoff_t);
76742 void (*invalidate_area)(unsigned);
76743-};
76744+} __no_const;
76745
76746 extern bool frontswap_enabled;
76747 extern struct frontswap_ops *
76748diff --git a/include/linux/fs.h b/include/linux/fs.h
76749index 121f11f..0f2a863 100644
76750--- a/include/linux/fs.h
76751+++ b/include/linux/fs.h
76752@@ -423,7 +423,7 @@ struct address_space {
76753 spinlock_t private_lock; /* for use by the address_space */
76754 struct list_head private_list; /* ditto */
76755 void *private_data; /* ditto */
76756-} __attribute__((aligned(sizeof(long))));
76757+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
76758 /*
76759 * On most architectures that alignment is already the case; but
76760 * must be enforced here for CRIS, to let the least significant bit
76761@@ -466,7 +466,7 @@ struct block_device {
76762 int bd_fsfreeze_count;
76763 /* Mutex for freeze */
76764 struct mutex bd_fsfreeze_mutex;
76765-};
76766+} __randomize_layout;
76767
76768 /*
76769 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
76770@@ -610,7 +610,7 @@ struct inode {
76771 atomic_t i_readcount; /* struct files open RO */
76772 #endif
76773 void *i_private; /* fs or device private pointer */
76774-};
76775+} __randomize_layout;
76776
76777 static inline int inode_unhashed(struct inode *inode)
76778 {
76779@@ -808,7 +808,7 @@ struct file {
76780 #ifdef CONFIG_DEBUG_WRITECOUNT
76781 unsigned long f_mnt_write_state;
76782 #endif
76783-};
76784+} __randomize_layout;
76785
76786 struct file_handle {
76787 __u32 handle_bytes;
76788@@ -978,7 +978,7 @@ struct file_lock {
76789 int state; /* state of grant or error if -ve */
76790 } afs;
76791 } fl_u;
76792-};
76793+} __randomize_layout;
76794
76795 /* The following constant reflects the upper bound of the file/locking space */
76796 #ifndef OFFSET_MAX
76797@@ -1325,7 +1325,7 @@ struct super_block {
76798 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
76799 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
76800 struct rcu_head rcu;
76801-};
76802+} __randomize_layout;
76803
76804 extern struct timespec current_fs_time(struct super_block *sb);
76805
76806@@ -1547,7 +1547,8 @@ struct file_operations {
76807 long (*fallocate)(struct file *file, int mode, loff_t offset,
76808 loff_t len);
76809 int (*show_fdinfo)(struct seq_file *m, struct file *f);
76810-};
76811+} __do_const __randomize_layout;
76812+typedef struct file_operations __no_const file_operations_no_const;
76813
76814 struct inode_operations {
76815 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
76816@@ -2808,4 +2809,14 @@ static inline bool dir_relax(struct inode *inode)
76817 return !IS_DEADDIR(inode);
76818 }
76819
76820+static inline bool is_sidechannel_device(const struct inode *inode)
76821+{
76822+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
76823+ umode_t mode = inode->i_mode;
76824+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
76825+#else
76826+ return false;
76827+#endif
76828+}
76829+
76830 #endif /* _LINUX_FS_H */
76831diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
76832index 0efc3e6..fd23610 100644
76833--- a/include/linux/fs_struct.h
76834+++ b/include/linux/fs_struct.h
76835@@ -6,13 +6,13 @@
76836 #include <linux/seqlock.h>
76837
76838 struct fs_struct {
76839- int users;
76840+ atomic_t users;
76841 spinlock_t lock;
76842 seqcount_t seq;
76843 int umask;
76844 int in_exec;
76845 struct path root, pwd;
76846-};
76847+} __randomize_layout;
76848
76849 extern struct kmem_cache *fs_cachep;
76850
76851diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
76852index 7714849..a4a5c7a 100644
76853--- a/include/linux/fscache-cache.h
76854+++ b/include/linux/fscache-cache.h
76855@@ -113,7 +113,7 @@ struct fscache_operation {
76856 fscache_operation_release_t release;
76857 };
76858
76859-extern atomic_t fscache_op_debug_id;
76860+extern atomic_unchecked_t fscache_op_debug_id;
76861 extern void fscache_op_work_func(struct work_struct *work);
76862
76863 extern void fscache_enqueue_operation(struct fscache_operation *);
76864@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
76865 INIT_WORK(&op->work, fscache_op_work_func);
76866 atomic_set(&op->usage, 1);
76867 op->state = FSCACHE_OP_ST_INITIALISED;
76868- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
76869+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
76870 op->processor = processor;
76871 op->release = release;
76872 INIT_LIST_HEAD(&op->pend_link);
76873diff --git a/include/linux/fscache.h b/include/linux/fscache.h
76874index 115bb81..e7b812b 100644
76875--- a/include/linux/fscache.h
76876+++ b/include/linux/fscache.h
76877@@ -152,7 +152,7 @@ struct fscache_cookie_def {
76878 * - this is mandatory for any object that may have data
76879 */
76880 void (*now_uncached)(void *cookie_netfs_data);
76881-};
76882+} __do_const;
76883
76884 /*
76885 * fscache cached network filesystem type
76886diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
76887index 1c804b0..1432c2b 100644
76888--- a/include/linux/fsnotify.h
76889+++ b/include/linux/fsnotify.h
76890@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
76891 struct inode *inode = file_inode(file);
76892 __u32 mask = FS_ACCESS;
76893
76894+ if (is_sidechannel_device(inode))
76895+ return;
76896+
76897 if (S_ISDIR(inode->i_mode))
76898 mask |= FS_ISDIR;
76899
76900@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
76901 struct inode *inode = file_inode(file);
76902 __u32 mask = FS_MODIFY;
76903
76904+ if (is_sidechannel_device(inode))
76905+ return;
76906+
76907 if (S_ISDIR(inode->i_mode))
76908 mask |= FS_ISDIR;
76909
76910@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
76911 */
76912 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
76913 {
76914- return kstrdup(name, GFP_KERNEL);
76915+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
76916 }
76917
76918 /*
76919diff --git a/include/linux/genhd.h b/include/linux/genhd.h
76920index 9f3c275..8bdff5d 100644
76921--- a/include/linux/genhd.h
76922+++ b/include/linux/genhd.h
76923@@ -194,7 +194,7 @@ struct gendisk {
76924 struct kobject *slave_dir;
76925
76926 struct timer_rand_state *random;
76927- atomic_t sync_io; /* RAID */
76928+ atomic_unchecked_t sync_io; /* RAID */
76929 struct disk_events *ev;
76930 #ifdef CONFIG_BLK_DEV_INTEGRITY
76931 struct blk_integrity *integrity;
76932@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
76933 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
76934
76935 /* drivers/char/random.c */
76936-extern void add_disk_randomness(struct gendisk *disk);
76937+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
76938 extern void rand_initialize_disk(struct gendisk *disk);
76939
76940 static inline sector_t get_start_sect(struct block_device *bdev)
76941diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
76942index c0894dd..2fbf10c 100644
76943--- a/include/linux/genl_magic_func.h
76944+++ b/include/linux/genl_magic_func.h
76945@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
76946 },
76947
76948 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
76949-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
76950+static struct genl_ops ZZZ_genl_ops[] = {
76951 #include GENL_MAGIC_INCLUDE_FILE
76952 };
76953
76954diff --git a/include/linux/gfp.h b/include/linux/gfp.h
76955index 9b4dd49..61fd41d 100644
76956--- a/include/linux/gfp.h
76957+++ b/include/linux/gfp.h
76958@@ -35,6 +35,13 @@ struct vm_area_struct;
76959 #define ___GFP_NO_KSWAPD 0x400000u
76960 #define ___GFP_OTHER_NODE 0x800000u
76961 #define ___GFP_WRITE 0x1000000u
76962+
76963+#ifdef CONFIG_PAX_USERCOPY_SLABS
76964+#define ___GFP_USERCOPY 0x2000000u
76965+#else
76966+#define ___GFP_USERCOPY 0
76967+#endif
76968+
76969 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
76970
76971 /*
76972@@ -92,6 +99,7 @@ struct vm_area_struct;
76973 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
76974 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
76975 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
76976+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
76977
76978 /*
76979 * This may seem redundant, but it's a way of annotating false positives vs.
76980@@ -99,7 +107,7 @@ struct vm_area_struct;
76981 */
76982 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
76983
76984-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
76985+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
76986 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
76987
76988 /* This equals 0, but use constants in case they ever change */
76989@@ -153,6 +161,8 @@ struct vm_area_struct;
76990 /* 4GB DMA on some platforms */
76991 #define GFP_DMA32 __GFP_DMA32
76992
76993+#define GFP_USERCOPY __GFP_USERCOPY
76994+
76995 /* Convert GFP flags to their corresponding migrate type */
76996 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
76997 {
76998diff --git a/include/linux/gracl.h b/include/linux/gracl.h
76999new file mode 100644
77000index 0000000..edb2cb6
77001--- /dev/null
77002+++ b/include/linux/gracl.h
77003@@ -0,0 +1,340 @@
77004+#ifndef GR_ACL_H
77005+#define GR_ACL_H
77006+
77007+#include <linux/grdefs.h>
77008+#include <linux/resource.h>
77009+#include <linux/capability.h>
77010+#include <linux/dcache.h>
77011+#include <asm/resource.h>
77012+
77013+/* Major status information */
77014+
77015+#define GR_VERSION "grsecurity 3.0"
77016+#define GRSECURITY_VERSION 0x3000
77017+
77018+enum {
77019+ GR_SHUTDOWN = 0,
77020+ GR_ENABLE = 1,
77021+ GR_SPROLE = 2,
77022+ GR_OLDRELOAD = 3,
77023+ GR_SEGVMOD = 4,
77024+ GR_STATUS = 5,
77025+ GR_UNSPROLE = 6,
77026+ GR_PASSSET = 7,
77027+ GR_SPROLEPAM = 8,
77028+ GR_RELOAD = 9,
77029+};
77030+
77031+/* Password setup definitions
77032+ * kernel/grhash.c */
77033+enum {
77034+ GR_PW_LEN = 128,
77035+ GR_SALT_LEN = 16,
77036+ GR_SHA_LEN = 32,
77037+};
77038+
77039+enum {
77040+ GR_SPROLE_LEN = 64,
77041+};
77042+
77043+enum {
77044+ GR_NO_GLOB = 0,
77045+ GR_REG_GLOB,
77046+ GR_CREATE_GLOB
77047+};
77048+
77049+#define GR_NLIMITS 32
77050+
77051+/* Begin Data Structures */
77052+
77053+struct sprole_pw {
77054+ unsigned char *rolename;
77055+ unsigned char salt[GR_SALT_LEN];
77056+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
77057+};
77058+
77059+struct name_entry {
77060+ __u32 key;
77061+ ino_t inode;
77062+ dev_t device;
77063+ char *name;
77064+ __u16 len;
77065+ __u8 deleted;
77066+ struct name_entry *prev;
77067+ struct name_entry *next;
77068+};
77069+
77070+struct inodev_entry {
77071+ struct name_entry *nentry;
77072+ struct inodev_entry *prev;
77073+ struct inodev_entry *next;
77074+};
77075+
77076+struct acl_role_db {
77077+ struct acl_role_label **r_hash;
77078+ __u32 r_size;
77079+};
77080+
77081+struct inodev_db {
77082+ struct inodev_entry **i_hash;
77083+ __u32 i_size;
77084+};
77085+
77086+struct name_db {
77087+ struct name_entry **n_hash;
77088+ __u32 n_size;
77089+};
77090+
77091+struct crash_uid {
77092+ uid_t uid;
77093+ unsigned long expires;
77094+};
77095+
77096+struct gr_hash_struct {
77097+ void **table;
77098+ void **nametable;
77099+ void *first;
77100+ __u32 table_size;
77101+ __u32 used_size;
77102+ int type;
77103+};
77104+
77105+/* Userspace Grsecurity ACL data structures */
77106+
77107+struct acl_subject_label {
77108+ char *filename;
77109+ ino_t inode;
77110+ dev_t device;
77111+ __u32 mode;
77112+ kernel_cap_t cap_mask;
77113+ kernel_cap_t cap_lower;
77114+ kernel_cap_t cap_invert_audit;
77115+
77116+ struct rlimit res[GR_NLIMITS];
77117+ __u32 resmask;
77118+
77119+ __u8 user_trans_type;
77120+ __u8 group_trans_type;
77121+ uid_t *user_transitions;
77122+ gid_t *group_transitions;
77123+ __u16 user_trans_num;
77124+ __u16 group_trans_num;
77125+
77126+ __u32 sock_families[2];
77127+ __u32 ip_proto[8];
77128+ __u32 ip_type;
77129+ struct acl_ip_label **ips;
77130+ __u32 ip_num;
77131+ __u32 inaddr_any_override;
77132+
77133+ __u32 crashes;
77134+ unsigned long expires;
77135+
77136+ struct acl_subject_label *parent_subject;
77137+ struct gr_hash_struct *hash;
77138+ struct acl_subject_label *prev;
77139+ struct acl_subject_label *next;
77140+
77141+ struct acl_object_label **obj_hash;
77142+ __u32 obj_hash_size;
77143+ __u16 pax_flags;
77144+};
77145+
77146+struct role_allowed_ip {
77147+ __u32 addr;
77148+ __u32 netmask;
77149+
77150+ struct role_allowed_ip *prev;
77151+ struct role_allowed_ip *next;
77152+};
77153+
77154+struct role_transition {
77155+ char *rolename;
77156+
77157+ struct role_transition *prev;
77158+ struct role_transition *next;
77159+};
77160+
77161+struct acl_role_label {
77162+ char *rolename;
77163+ uid_t uidgid;
77164+ __u16 roletype;
77165+
77166+ __u16 auth_attempts;
77167+ unsigned long expires;
77168+
77169+ struct acl_subject_label *root_label;
77170+ struct gr_hash_struct *hash;
77171+
77172+ struct acl_role_label *prev;
77173+ struct acl_role_label *next;
77174+
77175+ struct role_transition *transitions;
77176+ struct role_allowed_ip *allowed_ips;
77177+ uid_t *domain_children;
77178+ __u16 domain_child_num;
77179+
77180+ umode_t umask;
77181+
77182+ struct acl_subject_label **subj_hash;
77183+ __u32 subj_hash_size;
77184+};
77185+
77186+struct user_acl_role_db {
77187+ struct acl_role_label **r_table;
77188+ __u32 num_pointers; /* Number of allocations to track */
77189+ __u32 num_roles; /* Number of roles */
77190+ __u32 num_domain_children; /* Number of domain children */
77191+ __u32 num_subjects; /* Number of subjects */
77192+ __u32 num_objects; /* Number of objects */
77193+};
77194+
77195+struct acl_object_label {
77196+ char *filename;
77197+ ino_t inode;
77198+ dev_t device;
77199+ __u32 mode;
77200+
77201+ struct acl_subject_label *nested;
77202+ struct acl_object_label *globbed;
77203+
77204+ /* next two structures not used */
77205+
77206+ struct acl_object_label *prev;
77207+ struct acl_object_label *next;
77208+};
77209+
77210+struct acl_ip_label {
77211+ char *iface;
77212+ __u32 addr;
77213+ __u32 netmask;
77214+ __u16 low, high;
77215+ __u8 mode;
77216+ __u32 type;
77217+ __u32 proto[8];
77218+
77219+ /* next two structures not used */
77220+
77221+ struct acl_ip_label *prev;
77222+ struct acl_ip_label *next;
77223+};
77224+
77225+struct gr_arg {
77226+ struct user_acl_role_db role_db;
77227+ unsigned char pw[GR_PW_LEN];
77228+ unsigned char salt[GR_SALT_LEN];
77229+ unsigned char sum[GR_SHA_LEN];
77230+ unsigned char sp_role[GR_SPROLE_LEN];
77231+ struct sprole_pw *sprole_pws;
77232+ dev_t segv_device;
77233+ ino_t segv_inode;
77234+ uid_t segv_uid;
77235+ __u16 num_sprole_pws;
77236+ __u16 mode;
77237+};
77238+
77239+struct gr_arg_wrapper {
77240+ struct gr_arg *arg;
77241+ __u32 version;
77242+ __u32 size;
77243+};
77244+
77245+struct subject_map {
77246+ struct acl_subject_label *user;
77247+ struct acl_subject_label *kernel;
77248+ struct subject_map *prev;
77249+ struct subject_map *next;
77250+};
77251+
77252+struct acl_subj_map_db {
77253+ struct subject_map **s_hash;
77254+ __u32 s_size;
77255+};
77256+
77257+struct gr_policy_state {
77258+ struct sprole_pw **acl_special_roles;
77259+ __u16 num_sprole_pws;
77260+ struct acl_role_label *kernel_role;
77261+ struct acl_role_label *role_list;
77262+ struct acl_role_label *default_role;
77263+ struct acl_role_db acl_role_set;
77264+ struct acl_subj_map_db subj_map_set;
77265+ struct name_db name_set;
77266+ struct inodev_db inodev_set;
77267+};
77268+
77269+struct gr_alloc_state {
77270+ unsigned long alloc_stack_next;
77271+ unsigned long alloc_stack_size;
77272+ void **alloc_stack;
77273+};
77274+
77275+struct gr_reload_state {
77276+ struct gr_policy_state oldpolicy;
77277+ struct gr_alloc_state oldalloc;
77278+ struct gr_policy_state newpolicy;
77279+ struct gr_alloc_state newalloc;
77280+ struct gr_policy_state *oldpolicy_ptr;
77281+ struct gr_alloc_state *oldalloc_ptr;
77282+ unsigned char oldmode;
77283+};
77284+
77285+/* End Data Structures Section */
77286+
77287+/* Hash functions generated by empirical testing by Brad Spengler
77288+ Makes good use of the low bits of the inode. Generally 0-1 times
77289+ in loop for successful match. 0-3 for unsuccessful match.
77290+ Shift/add algorithm with modulus of table size and an XOR*/
77291+
77292+static __inline__ unsigned int
77293+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
77294+{
77295+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
77296+}
77297+
77298+ static __inline__ unsigned int
77299+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
77300+{
77301+ return ((const unsigned long)userp % sz);
77302+}
77303+
77304+static __inline__ unsigned int
77305+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
77306+{
77307+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
77308+}
77309+
77310+static __inline__ unsigned int
77311+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
77312+{
77313+ return full_name_hash((const unsigned char *)name, len) % sz;
77314+}
77315+
77316+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
77317+ subj = NULL; \
77318+ iter = 0; \
77319+ while (iter < role->subj_hash_size) { \
77320+ if (subj == NULL) \
77321+ subj = role->subj_hash[iter]; \
77322+ if (subj == NULL) { \
77323+ iter++; \
77324+ continue; \
77325+ }
77326+
77327+#define FOR_EACH_SUBJECT_END(subj,iter) \
77328+ subj = subj->next; \
77329+ if (subj == NULL) \
77330+ iter++; \
77331+ }
77332+
77333+
77334+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
77335+ subj = role->hash->first; \
77336+ while (subj != NULL) {
77337+
77338+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
77339+ subj = subj->next; \
77340+ }
77341+
77342+#endif
77343+
77344diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
77345new file mode 100644
77346index 0000000..33ebd1f
77347--- /dev/null
77348+++ b/include/linux/gracl_compat.h
77349@@ -0,0 +1,156 @@
77350+#ifndef GR_ACL_COMPAT_H
77351+#define GR_ACL_COMPAT_H
77352+
77353+#include <linux/resource.h>
77354+#include <asm/resource.h>
77355+
77356+struct sprole_pw_compat {
77357+ compat_uptr_t rolename;
77358+ unsigned char salt[GR_SALT_LEN];
77359+ unsigned char sum[GR_SHA_LEN];
77360+};
77361+
77362+struct gr_hash_struct_compat {
77363+ compat_uptr_t table;
77364+ compat_uptr_t nametable;
77365+ compat_uptr_t first;
77366+ __u32 table_size;
77367+ __u32 used_size;
77368+ int type;
77369+};
77370+
77371+struct acl_subject_label_compat {
77372+ compat_uptr_t filename;
77373+ compat_ino_t inode;
77374+ __u32 device;
77375+ __u32 mode;
77376+ kernel_cap_t cap_mask;
77377+ kernel_cap_t cap_lower;
77378+ kernel_cap_t cap_invert_audit;
77379+
77380+ struct compat_rlimit res[GR_NLIMITS];
77381+ __u32 resmask;
77382+
77383+ __u8 user_trans_type;
77384+ __u8 group_trans_type;
77385+ compat_uptr_t user_transitions;
77386+ compat_uptr_t group_transitions;
77387+ __u16 user_trans_num;
77388+ __u16 group_trans_num;
77389+
77390+ __u32 sock_families[2];
77391+ __u32 ip_proto[8];
77392+ __u32 ip_type;
77393+ compat_uptr_t ips;
77394+ __u32 ip_num;
77395+ __u32 inaddr_any_override;
77396+
77397+ __u32 crashes;
77398+ compat_ulong_t expires;
77399+
77400+ compat_uptr_t parent_subject;
77401+ compat_uptr_t hash;
77402+ compat_uptr_t prev;
77403+ compat_uptr_t next;
77404+
77405+ compat_uptr_t obj_hash;
77406+ __u32 obj_hash_size;
77407+ __u16 pax_flags;
77408+};
77409+
77410+struct role_allowed_ip_compat {
77411+ __u32 addr;
77412+ __u32 netmask;
77413+
77414+ compat_uptr_t prev;
77415+ compat_uptr_t next;
77416+};
77417+
77418+struct role_transition_compat {
77419+ compat_uptr_t rolename;
77420+
77421+ compat_uptr_t prev;
77422+ compat_uptr_t next;
77423+};
77424+
77425+struct acl_role_label_compat {
77426+ compat_uptr_t rolename;
77427+ uid_t uidgid;
77428+ __u16 roletype;
77429+
77430+ __u16 auth_attempts;
77431+ compat_ulong_t expires;
77432+
77433+ compat_uptr_t root_label;
77434+ compat_uptr_t hash;
77435+
77436+ compat_uptr_t prev;
77437+ compat_uptr_t next;
77438+
77439+ compat_uptr_t transitions;
77440+ compat_uptr_t allowed_ips;
77441+ compat_uptr_t domain_children;
77442+ __u16 domain_child_num;
77443+
77444+ umode_t umask;
77445+
77446+ compat_uptr_t subj_hash;
77447+ __u32 subj_hash_size;
77448+};
77449+
77450+struct user_acl_role_db_compat {
77451+ compat_uptr_t r_table;
77452+ __u32 num_pointers;
77453+ __u32 num_roles;
77454+ __u32 num_domain_children;
77455+ __u32 num_subjects;
77456+ __u32 num_objects;
77457+};
77458+
77459+struct acl_object_label_compat {
77460+ compat_uptr_t filename;
77461+ compat_ino_t inode;
77462+ __u32 device;
77463+ __u32 mode;
77464+
77465+ compat_uptr_t nested;
77466+ compat_uptr_t globbed;
77467+
77468+ compat_uptr_t prev;
77469+ compat_uptr_t next;
77470+};
77471+
77472+struct acl_ip_label_compat {
77473+ compat_uptr_t iface;
77474+ __u32 addr;
77475+ __u32 netmask;
77476+ __u16 low, high;
77477+ __u8 mode;
77478+ __u32 type;
77479+ __u32 proto[8];
77480+
77481+ compat_uptr_t prev;
77482+ compat_uptr_t next;
77483+};
77484+
77485+struct gr_arg_compat {
77486+ struct user_acl_role_db_compat role_db;
77487+ unsigned char pw[GR_PW_LEN];
77488+ unsigned char salt[GR_SALT_LEN];
77489+ unsigned char sum[GR_SHA_LEN];
77490+ unsigned char sp_role[GR_SPROLE_LEN];
77491+ compat_uptr_t sprole_pws;
77492+ __u32 segv_device;
77493+ compat_ino_t segv_inode;
77494+ uid_t segv_uid;
77495+ __u16 num_sprole_pws;
77496+ __u16 mode;
77497+};
77498+
77499+struct gr_arg_wrapper_compat {
77500+ compat_uptr_t arg;
77501+ __u32 version;
77502+ __u32 size;
77503+};
77504+
77505+#endif
77506diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
77507new file mode 100644
77508index 0000000..323ecf2
77509--- /dev/null
77510+++ b/include/linux/gralloc.h
77511@@ -0,0 +1,9 @@
77512+#ifndef __GRALLOC_H
77513+#define __GRALLOC_H
77514+
77515+void acl_free_all(void);
77516+int acl_alloc_stack_init(unsigned long size);
77517+void *acl_alloc(unsigned long len);
77518+void *acl_alloc_num(unsigned long num, unsigned long len);
77519+
77520+#endif
77521diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
77522new file mode 100644
77523index 0000000..be66033
77524--- /dev/null
77525+++ b/include/linux/grdefs.h
77526@@ -0,0 +1,140 @@
77527+#ifndef GRDEFS_H
77528+#define GRDEFS_H
77529+
77530+/* Begin grsecurity status declarations */
77531+
77532+enum {
77533+ GR_READY = 0x01,
77534+ GR_STATUS_INIT = 0x00 // disabled state
77535+};
77536+
77537+/* Begin ACL declarations */
77538+
77539+/* Role flags */
77540+
77541+enum {
77542+ GR_ROLE_USER = 0x0001,
77543+ GR_ROLE_GROUP = 0x0002,
77544+ GR_ROLE_DEFAULT = 0x0004,
77545+ GR_ROLE_SPECIAL = 0x0008,
77546+ GR_ROLE_AUTH = 0x0010,
77547+ GR_ROLE_NOPW = 0x0020,
77548+ GR_ROLE_GOD = 0x0040,
77549+ GR_ROLE_LEARN = 0x0080,
77550+ GR_ROLE_TPE = 0x0100,
77551+ GR_ROLE_DOMAIN = 0x0200,
77552+ GR_ROLE_PAM = 0x0400,
77553+ GR_ROLE_PERSIST = 0x0800
77554+};
77555+
77556+/* ACL Subject and Object mode flags */
77557+enum {
77558+ GR_DELETED = 0x80000000
77559+};
77560+
77561+/* ACL Object-only mode flags */
77562+enum {
77563+ GR_READ = 0x00000001,
77564+ GR_APPEND = 0x00000002,
77565+ GR_WRITE = 0x00000004,
77566+ GR_EXEC = 0x00000008,
77567+ GR_FIND = 0x00000010,
77568+ GR_INHERIT = 0x00000020,
77569+ GR_SETID = 0x00000040,
77570+ GR_CREATE = 0x00000080,
77571+ GR_DELETE = 0x00000100,
77572+ GR_LINK = 0x00000200,
77573+ GR_AUDIT_READ = 0x00000400,
77574+ GR_AUDIT_APPEND = 0x00000800,
77575+ GR_AUDIT_WRITE = 0x00001000,
77576+ GR_AUDIT_EXEC = 0x00002000,
77577+ GR_AUDIT_FIND = 0x00004000,
77578+ GR_AUDIT_INHERIT= 0x00008000,
77579+ GR_AUDIT_SETID = 0x00010000,
77580+ GR_AUDIT_CREATE = 0x00020000,
77581+ GR_AUDIT_DELETE = 0x00040000,
77582+ GR_AUDIT_LINK = 0x00080000,
77583+ GR_PTRACERD = 0x00100000,
77584+ GR_NOPTRACE = 0x00200000,
77585+ GR_SUPPRESS = 0x00400000,
77586+ GR_NOLEARN = 0x00800000,
77587+ GR_INIT_TRANSFER= 0x01000000
77588+};
77589+
77590+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
77591+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
77592+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
77593+
77594+/* ACL subject-only mode flags */
77595+enum {
77596+ GR_KILL = 0x00000001,
77597+ GR_VIEW = 0x00000002,
77598+ GR_PROTECTED = 0x00000004,
77599+ GR_LEARN = 0x00000008,
77600+ GR_OVERRIDE = 0x00000010,
77601+ /* just a placeholder, this mode is only used in userspace */
77602+ GR_DUMMY = 0x00000020,
77603+ GR_PROTSHM = 0x00000040,
77604+ GR_KILLPROC = 0x00000080,
77605+ GR_KILLIPPROC = 0x00000100,
77606+ /* just a placeholder, this mode is only used in userspace */
77607+ GR_NOTROJAN = 0x00000200,
77608+ GR_PROTPROCFD = 0x00000400,
77609+ GR_PROCACCT = 0x00000800,
77610+ GR_RELAXPTRACE = 0x00001000,
77611+ //GR_NESTED = 0x00002000,
77612+ GR_INHERITLEARN = 0x00004000,
77613+ GR_PROCFIND = 0x00008000,
77614+ GR_POVERRIDE = 0x00010000,
77615+ GR_KERNELAUTH = 0x00020000,
77616+ GR_ATSECURE = 0x00040000,
77617+ GR_SHMEXEC = 0x00080000
77618+};
77619+
77620+enum {
77621+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
77622+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
77623+ GR_PAX_ENABLE_MPROTECT = 0x0004,
77624+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
77625+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
77626+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
77627+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
77628+ GR_PAX_DISABLE_MPROTECT = 0x0400,
77629+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
77630+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
77631+};
77632+
77633+enum {
77634+ GR_ID_USER = 0x01,
77635+ GR_ID_GROUP = 0x02,
77636+};
77637+
77638+enum {
77639+ GR_ID_ALLOW = 0x01,
77640+ GR_ID_DENY = 0x02,
77641+};
77642+
77643+#define GR_CRASH_RES 31
77644+#define GR_UIDTABLE_MAX 500
77645+
77646+/* begin resource learning section */
77647+enum {
77648+ GR_RLIM_CPU_BUMP = 60,
77649+ GR_RLIM_FSIZE_BUMP = 50000,
77650+ GR_RLIM_DATA_BUMP = 10000,
77651+ GR_RLIM_STACK_BUMP = 1000,
77652+ GR_RLIM_CORE_BUMP = 10000,
77653+ GR_RLIM_RSS_BUMP = 500000,
77654+ GR_RLIM_NPROC_BUMP = 1,
77655+ GR_RLIM_NOFILE_BUMP = 5,
77656+ GR_RLIM_MEMLOCK_BUMP = 50000,
77657+ GR_RLIM_AS_BUMP = 500000,
77658+ GR_RLIM_LOCKS_BUMP = 2,
77659+ GR_RLIM_SIGPENDING_BUMP = 5,
77660+ GR_RLIM_MSGQUEUE_BUMP = 10000,
77661+ GR_RLIM_NICE_BUMP = 1,
77662+ GR_RLIM_RTPRIO_BUMP = 1,
77663+ GR_RLIM_RTTIME_BUMP = 1000000
77664+};
77665+
77666+#endif
77667diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
77668new file mode 100644
77669index 0000000..d25522e
77670--- /dev/null
77671+++ b/include/linux/grinternal.h
77672@@ -0,0 +1,229 @@
77673+#ifndef __GRINTERNAL_H
77674+#define __GRINTERNAL_H
77675+
77676+#ifdef CONFIG_GRKERNSEC
77677+
77678+#include <linux/fs.h>
77679+#include <linux/mnt_namespace.h>
77680+#include <linux/nsproxy.h>
77681+#include <linux/gracl.h>
77682+#include <linux/grdefs.h>
77683+#include <linux/grmsg.h>
77684+
77685+void gr_add_learn_entry(const char *fmt, ...)
77686+ __attribute__ ((format (printf, 1, 2)));
77687+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
77688+ const struct vfsmount *mnt);
77689+__u32 gr_check_create(const struct dentry *new_dentry,
77690+ const struct dentry *parent,
77691+ const struct vfsmount *mnt, const __u32 mode);
77692+int gr_check_protected_task(const struct task_struct *task);
77693+__u32 to_gr_audit(const __u32 reqmode);
77694+int gr_set_acls(const int type);
77695+int gr_acl_is_enabled(void);
77696+char gr_roletype_to_char(void);
77697+
77698+void gr_handle_alertkill(struct task_struct *task);
77699+char *gr_to_filename(const struct dentry *dentry,
77700+ const struct vfsmount *mnt);
77701+char *gr_to_filename1(const struct dentry *dentry,
77702+ const struct vfsmount *mnt);
77703+char *gr_to_filename2(const struct dentry *dentry,
77704+ const struct vfsmount *mnt);
77705+char *gr_to_filename3(const struct dentry *dentry,
77706+ const struct vfsmount *mnt);
77707+
77708+extern int grsec_enable_ptrace_readexec;
77709+extern int grsec_enable_harden_ptrace;
77710+extern int grsec_enable_link;
77711+extern int grsec_enable_fifo;
77712+extern int grsec_enable_execve;
77713+extern int grsec_enable_shm;
77714+extern int grsec_enable_execlog;
77715+extern int grsec_enable_signal;
77716+extern int grsec_enable_audit_ptrace;
77717+extern int grsec_enable_forkfail;
77718+extern int grsec_enable_time;
77719+extern int grsec_enable_rofs;
77720+extern int grsec_deny_new_usb;
77721+extern int grsec_enable_chroot_shmat;
77722+extern int grsec_enable_chroot_mount;
77723+extern int grsec_enable_chroot_double;
77724+extern int grsec_enable_chroot_pivot;
77725+extern int grsec_enable_chroot_chdir;
77726+extern int grsec_enable_chroot_chmod;
77727+extern int grsec_enable_chroot_mknod;
77728+extern int grsec_enable_chroot_fchdir;
77729+extern int grsec_enable_chroot_nice;
77730+extern int grsec_enable_chroot_execlog;
77731+extern int grsec_enable_chroot_caps;
77732+extern int grsec_enable_chroot_sysctl;
77733+extern int grsec_enable_chroot_unix;
77734+extern int grsec_enable_symlinkown;
77735+extern kgid_t grsec_symlinkown_gid;
77736+extern int grsec_enable_tpe;
77737+extern kgid_t grsec_tpe_gid;
77738+extern int grsec_enable_tpe_all;
77739+extern int grsec_enable_tpe_invert;
77740+extern int grsec_enable_socket_all;
77741+extern kgid_t grsec_socket_all_gid;
77742+extern int grsec_enable_socket_client;
77743+extern kgid_t grsec_socket_client_gid;
77744+extern int grsec_enable_socket_server;
77745+extern kgid_t grsec_socket_server_gid;
77746+extern kgid_t grsec_audit_gid;
77747+extern int grsec_enable_group;
77748+extern int grsec_enable_log_rwxmaps;
77749+extern int grsec_enable_mount;
77750+extern int grsec_enable_chdir;
77751+extern int grsec_resource_logging;
77752+extern int grsec_enable_blackhole;
77753+extern int grsec_lastack_retries;
77754+extern int grsec_enable_brute;
77755+extern int grsec_enable_harden_ipc;
77756+extern int grsec_lock;
77757+
77758+extern spinlock_t grsec_alert_lock;
77759+extern unsigned long grsec_alert_wtime;
77760+extern unsigned long grsec_alert_fyet;
77761+
77762+extern spinlock_t grsec_audit_lock;
77763+
77764+extern rwlock_t grsec_exec_file_lock;
77765+
77766+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
77767+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
77768+ (tsk)->exec_file->f_path.mnt) : "/")
77769+
77770+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
77771+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
77772+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
77773+
77774+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
77775+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
77776+ (tsk)->exec_file->f_path.mnt) : "/")
77777+
77778+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
77779+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
77780+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
77781+
77782+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
77783+
77784+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
77785+
77786+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
77787+{
77788+ if (file1 && file2) {
77789+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
77790+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
77791+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
77792+ return true;
77793+ }
77794+
77795+ return false;
77796+}
77797+
77798+#define GR_CHROOT_CAPS {{ \
77799+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
77800+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
77801+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
77802+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
77803+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
77804+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
77805+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
77806+
77807+#define security_learn(normal_msg,args...) \
77808+({ \
77809+ read_lock(&grsec_exec_file_lock); \
77810+ gr_add_learn_entry(normal_msg "\n", ## args); \
77811+ read_unlock(&grsec_exec_file_lock); \
77812+})
77813+
77814+enum {
77815+ GR_DO_AUDIT,
77816+ GR_DONT_AUDIT,
77817+ /* used for non-audit messages that we shouldn't kill the task on */
77818+ GR_DONT_AUDIT_GOOD
77819+};
77820+
77821+enum {
77822+ GR_TTYSNIFF,
77823+ GR_RBAC,
77824+ GR_RBAC_STR,
77825+ GR_STR_RBAC,
77826+ GR_RBAC_MODE2,
77827+ GR_RBAC_MODE3,
77828+ GR_FILENAME,
77829+ GR_SYSCTL_HIDDEN,
77830+ GR_NOARGS,
77831+ GR_ONE_INT,
77832+ GR_ONE_INT_TWO_STR,
77833+ GR_ONE_STR,
77834+ GR_STR_INT,
77835+ GR_TWO_STR_INT,
77836+ GR_TWO_INT,
77837+ GR_TWO_U64,
77838+ GR_THREE_INT,
77839+ GR_FIVE_INT_TWO_STR,
77840+ GR_TWO_STR,
77841+ GR_THREE_STR,
77842+ GR_FOUR_STR,
77843+ GR_STR_FILENAME,
77844+ GR_FILENAME_STR,
77845+ GR_FILENAME_TWO_INT,
77846+ GR_FILENAME_TWO_INT_STR,
77847+ GR_TEXTREL,
77848+ GR_PTRACE,
77849+ GR_RESOURCE,
77850+ GR_CAP,
77851+ GR_SIG,
77852+ GR_SIG2,
77853+ GR_CRASH1,
77854+ GR_CRASH2,
77855+ GR_PSACCT,
77856+ GR_RWXMAP,
77857+ GR_RWXMAPVMA
77858+};
77859+
77860+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
77861+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
77862+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
77863+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
77864+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
77865+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
77866+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
77867+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
77868+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
77869+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
77870+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
77871+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
77872+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
77873+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
77874+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
77875+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
77876+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
77877+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
77878+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
77879+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
77880+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
77881+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
77882+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
77883+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
77884+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
77885+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
77886+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
77887+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
77888+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
77889+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
77890+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
77891+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
77892+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
77893+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
77894+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
77895+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
77896+
77897+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
77898+
77899+#endif
77900+
77901+#endif
77902diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
77903new file mode 100644
77904index 0000000..195cbe4
77905--- /dev/null
77906+++ b/include/linux/grmsg.h
77907@@ -0,0 +1,115 @@
77908+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
77909+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
77910+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
77911+#define GR_STOPMOD_MSG "denied modification of module state by "
77912+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
77913+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
77914+#define GR_IOPERM_MSG "denied use of ioperm() by "
77915+#define GR_IOPL_MSG "denied use of iopl() by "
77916+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
77917+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
77918+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
77919+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
77920+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
77921+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
77922+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
77923+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
77924+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
77925+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
77926+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
77927+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
77928+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
77929+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
77930+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
77931+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
77932+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
77933+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
77934+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
77935+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
77936+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
77937+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
77938+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
77939+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
77940+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
77941+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
77942+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
77943+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
77944+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
77945+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
77946+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
77947+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
77948+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
77949+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
77950+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
77951+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
77952+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
77953+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
77954+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
77955+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
77956+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
77957+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
77958+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
77959+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
77960+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
77961+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
77962+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
77963+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
77964+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
77965+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
77966+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
77967+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
77968+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
77969+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
77970+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
77971+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
77972+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
77973+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
77974+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
77975+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
77976+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
77977+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
77978+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
77979+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
77980+#define GR_FAILFORK_MSG "failed fork with errno %s by "
77981+#define GR_NICE_CHROOT_MSG "denied priority change by "
77982+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
77983+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
77984+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
77985+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
77986+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
77987+#define GR_TIME_MSG "time set by "
77988+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
77989+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
77990+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
77991+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
77992+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
77993+#define GR_BIND_MSG "denied bind() by "
77994+#define GR_CONNECT_MSG "denied connect() by "
77995+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
77996+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
77997+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
77998+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
77999+#define GR_CAP_ACL_MSG "use of %s denied for "
78000+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
78001+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
78002+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
78003+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
78004+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
78005+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
78006+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
78007+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
78008+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
78009+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
78010+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
78011+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
78012+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
78013+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
78014+#define GR_VM86_MSG "denied use of vm86 by "
78015+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
78016+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
78017+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
78018+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
78019+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
78020+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
78021+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
78022+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
78023diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
78024new file mode 100644
78025index 0000000..d8b5b48
78026--- /dev/null
78027+++ b/include/linux/grsecurity.h
78028@@ -0,0 +1,245 @@
78029+#ifndef GR_SECURITY_H
78030+#define GR_SECURITY_H
78031+#include <linux/fs.h>
78032+#include <linux/fs_struct.h>
78033+#include <linux/binfmts.h>
78034+#include <linux/gracl.h>
78035+
78036+/* notify of brain-dead configs */
78037+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78038+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
78039+#endif
78040+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
78041+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
78042+#endif
78043+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
78044+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
78045+#endif
78046+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
78047+#error "CONFIG_PAX enabled, but no PaX options are enabled."
78048+#endif
78049+
78050+int gr_handle_new_usb(void);
78051+
78052+void gr_handle_brute_attach(int dumpable);
78053+void gr_handle_brute_check(void);
78054+void gr_handle_kernel_exploit(void);
78055+
78056+char gr_roletype_to_char(void);
78057+
78058+int gr_acl_enable_at_secure(void);
78059+
78060+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
78061+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
78062+
78063+void gr_del_task_from_ip_table(struct task_struct *p);
78064+
78065+int gr_pid_is_chrooted(struct task_struct *p);
78066+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
78067+int gr_handle_chroot_nice(void);
78068+int gr_handle_chroot_sysctl(const int op);
78069+int gr_handle_chroot_setpriority(struct task_struct *p,
78070+ const int niceval);
78071+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
78072+int gr_handle_chroot_chroot(const struct dentry *dentry,
78073+ const struct vfsmount *mnt);
78074+void gr_handle_chroot_chdir(const struct path *path);
78075+int gr_handle_chroot_chmod(const struct dentry *dentry,
78076+ const struct vfsmount *mnt, const int mode);
78077+int gr_handle_chroot_mknod(const struct dentry *dentry,
78078+ const struct vfsmount *mnt, const int mode);
78079+int gr_handle_chroot_mount(const struct dentry *dentry,
78080+ const struct vfsmount *mnt,
78081+ const char *dev_name);
78082+int gr_handle_chroot_pivot(void);
78083+int gr_handle_chroot_unix(const pid_t pid);
78084+
78085+int gr_handle_rawio(const struct inode *inode);
78086+
78087+void gr_handle_ioperm(void);
78088+void gr_handle_iopl(void);
78089+
78090+umode_t gr_acl_umask(void);
78091+
78092+int gr_tpe_allow(const struct file *file);
78093+
78094+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
78095+void gr_clear_chroot_entries(struct task_struct *task);
78096+
78097+void gr_log_forkfail(const int retval);
78098+void gr_log_timechange(void);
78099+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
78100+void gr_log_chdir(const struct dentry *dentry,
78101+ const struct vfsmount *mnt);
78102+void gr_log_chroot_exec(const struct dentry *dentry,
78103+ const struct vfsmount *mnt);
78104+void gr_log_remount(const char *devname, const int retval);
78105+void gr_log_unmount(const char *devname, const int retval);
78106+void gr_log_mount(const char *from, const char *to, const int retval);
78107+void gr_log_textrel(struct vm_area_struct *vma);
78108+void gr_log_ptgnustack(struct file *file);
78109+void gr_log_rwxmmap(struct file *file);
78110+void gr_log_rwxmprotect(struct vm_area_struct *vma);
78111+
78112+int gr_handle_follow_link(const struct inode *parent,
78113+ const struct inode *inode,
78114+ const struct dentry *dentry,
78115+ const struct vfsmount *mnt);
78116+int gr_handle_fifo(const struct dentry *dentry,
78117+ const struct vfsmount *mnt,
78118+ const struct dentry *dir, const int flag,
78119+ const int acc_mode);
78120+int gr_handle_hardlink(const struct dentry *dentry,
78121+ const struct vfsmount *mnt,
78122+ struct inode *inode,
78123+ const int mode, const struct filename *to);
78124+
78125+int gr_is_capable(const int cap);
78126+int gr_is_capable_nolog(const int cap);
78127+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78128+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
78129+
78130+void gr_copy_label(struct task_struct *tsk);
78131+void gr_handle_crash(struct task_struct *task, const int sig);
78132+int gr_handle_signal(const struct task_struct *p, const int sig);
78133+int gr_check_crash_uid(const kuid_t uid);
78134+int gr_check_protected_task(const struct task_struct *task);
78135+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
78136+int gr_acl_handle_mmap(const struct file *file,
78137+ const unsigned long prot);
78138+int gr_acl_handle_mprotect(const struct file *file,
78139+ const unsigned long prot);
78140+int gr_check_hidden_task(const struct task_struct *tsk);
78141+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
78142+ const struct vfsmount *mnt);
78143+__u32 gr_acl_handle_utime(const struct dentry *dentry,
78144+ const struct vfsmount *mnt);
78145+__u32 gr_acl_handle_access(const struct dentry *dentry,
78146+ const struct vfsmount *mnt, const int fmode);
78147+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
78148+ const struct vfsmount *mnt, umode_t *mode);
78149+__u32 gr_acl_handle_chown(const struct dentry *dentry,
78150+ const struct vfsmount *mnt);
78151+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
78152+ const struct vfsmount *mnt);
78153+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
78154+ const struct vfsmount *mnt);
78155+int gr_handle_ptrace(struct task_struct *task, const long request);
78156+int gr_handle_proc_ptrace(struct task_struct *task);
78157+__u32 gr_acl_handle_execve(const struct dentry *dentry,
78158+ const struct vfsmount *mnt);
78159+int gr_check_crash_exec(const struct file *filp);
78160+int gr_acl_is_enabled(void);
78161+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
78162+ const kgid_t gid);
78163+int gr_set_proc_label(const struct dentry *dentry,
78164+ const struct vfsmount *mnt,
78165+ const int unsafe_flags);
78166+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
78167+ const struct vfsmount *mnt);
78168+__u32 gr_acl_handle_open(const struct dentry *dentry,
78169+ const struct vfsmount *mnt, int acc_mode);
78170+__u32 gr_acl_handle_creat(const struct dentry *dentry,
78171+ const struct dentry *p_dentry,
78172+ const struct vfsmount *p_mnt,
78173+ int open_flags, int acc_mode, const int imode);
78174+void gr_handle_create(const struct dentry *dentry,
78175+ const struct vfsmount *mnt);
78176+void gr_handle_proc_create(const struct dentry *dentry,
78177+ const struct inode *inode);
78178+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
78179+ const struct dentry *parent_dentry,
78180+ const struct vfsmount *parent_mnt,
78181+ const int mode);
78182+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
78183+ const struct dentry *parent_dentry,
78184+ const struct vfsmount *parent_mnt);
78185+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
78186+ const struct vfsmount *mnt);
78187+void gr_handle_delete(const ino_t ino, const dev_t dev);
78188+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
78189+ const struct vfsmount *mnt);
78190+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
78191+ const struct dentry *parent_dentry,
78192+ const struct vfsmount *parent_mnt,
78193+ const struct filename *from);
78194+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
78195+ const struct dentry *parent_dentry,
78196+ const struct vfsmount *parent_mnt,
78197+ const struct dentry *old_dentry,
78198+ const struct vfsmount *old_mnt, const struct filename *to);
78199+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
78200+int gr_acl_handle_rename(struct dentry *new_dentry,
78201+ struct dentry *parent_dentry,
78202+ const struct vfsmount *parent_mnt,
78203+ struct dentry *old_dentry,
78204+ struct inode *old_parent_inode,
78205+ struct vfsmount *old_mnt, const struct filename *newname);
78206+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
78207+ struct dentry *old_dentry,
78208+ struct dentry *new_dentry,
78209+ struct vfsmount *mnt, const __u8 replace);
78210+__u32 gr_check_link(const struct dentry *new_dentry,
78211+ const struct dentry *parent_dentry,
78212+ const struct vfsmount *parent_mnt,
78213+ const struct dentry *old_dentry,
78214+ const struct vfsmount *old_mnt);
78215+int gr_acl_handle_filldir(const struct file *file, const char *name,
78216+ const unsigned int namelen, const ino_t ino);
78217+
78218+__u32 gr_acl_handle_unix(const struct dentry *dentry,
78219+ const struct vfsmount *mnt);
78220+void gr_acl_handle_exit(void);
78221+void gr_acl_handle_psacct(struct task_struct *task, const long code);
78222+int gr_acl_handle_procpidmem(const struct task_struct *task);
78223+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
78224+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
78225+void gr_audit_ptrace(struct task_struct *task);
78226+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
78227+void gr_put_exec_file(struct task_struct *task);
78228+
78229+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
78230+
78231+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
78232+extern void gr_learn_resource(const struct task_struct *task, const int res,
78233+ const unsigned long wanted, const int gt);
78234+#else
78235+static inline void gr_learn_resource(const struct task_struct *task, const int res,
78236+ const unsigned long wanted, const int gt)
78237+{
78238+}
78239+#endif
78240+
78241+#ifdef CONFIG_GRKERNSEC_RESLOG
78242+extern void gr_log_resource(const struct task_struct *task, const int res,
78243+ const unsigned long wanted, const int gt);
78244+#else
78245+static inline void gr_log_resource(const struct task_struct *task, const int res,
78246+ const unsigned long wanted, const int gt)
78247+{
78248+}
78249+#endif
78250+
78251+#ifdef CONFIG_GRKERNSEC
78252+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
78253+void gr_handle_vm86(void);
78254+void gr_handle_mem_readwrite(u64 from, u64 to);
78255+
78256+void gr_log_badprocpid(const char *entry);
78257+
78258+extern int grsec_enable_dmesg;
78259+extern int grsec_disable_privio;
78260+
78261+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78262+extern kgid_t grsec_proc_gid;
78263+#endif
78264+
78265+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78266+extern int grsec_enable_chroot_findtask;
78267+#endif
78268+#ifdef CONFIG_GRKERNSEC_SETXID
78269+extern int grsec_enable_setxid;
78270+#endif
78271+#endif
78272+
78273+#endif
78274diff --git a/include/linux/grsock.h b/include/linux/grsock.h
78275new file mode 100644
78276index 0000000..e7ffaaf
78277--- /dev/null
78278+++ b/include/linux/grsock.h
78279@@ -0,0 +1,19 @@
78280+#ifndef __GRSOCK_H
78281+#define __GRSOCK_H
78282+
78283+extern void gr_attach_curr_ip(const struct sock *sk);
78284+extern int gr_handle_sock_all(const int family, const int type,
78285+ const int protocol);
78286+extern int gr_handle_sock_server(const struct sockaddr *sck);
78287+extern int gr_handle_sock_server_other(const struct sock *sck);
78288+extern int gr_handle_sock_client(const struct sockaddr *sck);
78289+extern int gr_search_connect(struct socket * sock,
78290+ struct sockaddr_in * addr);
78291+extern int gr_search_bind(struct socket * sock,
78292+ struct sockaddr_in * addr);
78293+extern int gr_search_listen(struct socket * sock);
78294+extern int gr_search_accept(struct socket * sock);
78295+extern int gr_search_socket(const int domain, const int type,
78296+ const int protocol);
78297+
78298+#endif
78299diff --git a/include/linux/highmem.h b/include/linux/highmem.h
78300index 7fb31da..08b5114 100644
78301--- a/include/linux/highmem.h
78302+++ b/include/linux/highmem.h
78303@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
78304 kunmap_atomic(kaddr);
78305 }
78306
78307+static inline void sanitize_highpage(struct page *page)
78308+{
78309+ void *kaddr;
78310+ unsigned long flags;
78311+
78312+ local_irq_save(flags);
78313+ kaddr = kmap_atomic(page);
78314+ clear_page(kaddr);
78315+ kunmap_atomic(kaddr);
78316+ local_irq_restore(flags);
78317+}
78318+
78319 static inline void zero_user_segments(struct page *page,
78320 unsigned start1, unsigned end1,
78321 unsigned start2, unsigned end2)
78322diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
78323index 1c7b89a..7dda400 100644
78324--- a/include/linux/hwmon-sysfs.h
78325+++ b/include/linux/hwmon-sysfs.h
78326@@ -25,7 +25,8 @@
78327 struct sensor_device_attribute{
78328 struct device_attribute dev_attr;
78329 int index;
78330-};
78331+} __do_const;
78332+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
78333 #define to_sensor_dev_attr(_dev_attr) \
78334 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
78335
78336@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
78337 struct device_attribute dev_attr;
78338 u8 index;
78339 u8 nr;
78340-};
78341+} __do_const;
78342+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
78343 #define to_sensor_dev_attr_2(_dev_attr) \
78344 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
78345
78346diff --git a/include/linux/i2c.h b/include/linux/i2c.h
78347index d9c8dbd3..def6e5a 100644
78348--- a/include/linux/i2c.h
78349+++ b/include/linux/i2c.h
78350@@ -364,6 +364,7 @@ struct i2c_algorithm {
78351 /* To determine what the adapter supports */
78352 u32 (*functionality) (struct i2c_adapter *);
78353 };
78354+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
78355
78356 /**
78357 * struct i2c_bus_recovery_info - I2C bus recovery information
78358diff --git a/include/linux/i2o.h b/include/linux/i2o.h
78359index d23c3c2..eb63c81 100644
78360--- a/include/linux/i2o.h
78361+++ b/include/linux/i2o.h
78362@@ -565,7 +565,7 @@ struct i2o_controller {
78363 struct i2o_device *exec; /* Executive */
78364 #if BITS_PER_LONG == 64
78365 spinlock_t context_list_lock; /* lock for context_list */
78366- atomic_t context_list_counter; /* needed for unique contexts */
78367+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
78368 struct list_head context_list; /* list of context id's
78369 and pointers */
78370 #endif
78371diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
78372index aff7ad8..3942bbd 100644
78373--- a/include/linux/if_pppox.h
78374+++ b/include/linux/if_pppox.h
78375@@ -76,7 +76,7 @@ struct pppox_proto {
78376 int (*ioctl)(struct socket *sock, unsigned int cmd,
78377 unsigned long arg);
78378 struct module *owner;
78379-};
78380+} __do_const;
78381
78382 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
78383 extern void unregister_pppox_proto(int proto_num);
78384diff --git a/include/linux/init.h b/include/linux/init.h
78385index 8e68a64..3f977a0 100644
78386--- a/include/linux/init.h
78387+++ b/include/linux/init.h
78388@@ -37,9 +37,17 @@
78389 * section.
78390 */
78391
78392+#define add_init_latent_entropy __latent_entropy
78393+
78394+#ifdef CONFIG_MEMORY_HOTPLUG
78395+#define add_meminit_latent_entropy
78396+#else
78397+#define add_meminit_latent_entropy __latent_entropy
78398+#endif
78399+
78400 /* These are for everybody (although not all archs will actually
78401 discard it in modules) */
78402-#define __init __section(.init.text) __cold notrace
78403+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
78404 #define __initdata __section(.init.data)
78405 #define __initconst __constsection(.init.rodata)
78406 #define __exitdata __section(.exit.data)
78407@@ -100,7 +108,7 @@
78408 #define __cpuexitconst
78409
78410 /* Used for MEMORY_HOTPLUG */
78411-#define __meminit __section(.meminit.text) __cold notrace
78412+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
78413 #define __meminitdata __section(.meminit.data)
78414 #define __meminitconst __constsection(.meminit.rodata)
78415 #define __memexit __section(.memexit.text) __exitused __cold notrace
78416diff --git a/include/linux/init_task.h b/include/linux/init_task.h
78417index b0ed422..d79ea23 100644
78418--- a/include/linux/init_task.h
78419+++ b/include/linux/init_task.h
78420@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
78421
78422 #define INIT_TASK_COMM "swapper"
78423
78424+#ifdef CONFIG_X86
78425+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
78426+#else
78427+#define INIT_TASK_THREAD_INFO
78428+#endif
78429+
78430 /*
78431 * INIT_TASK is used to set up the first task table, touch at
78432 * your own risk!. Base=0, limit=0x1fffff (=2MB)
78433@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
78434 RCU_POINTER_INITIALIZER(cred, &init_cred), \
78435 .comm = INIT_TASK_COMM, \
78436 .thread = INIT_THREAD, \
78437+ INIT_TASK_THREAD_INFO \
78438 .fs = &init_fs, \
78439 .files = &init_files, \
78440 .signal = &init_signals, \
78441diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
78442index db43b58..5d5084b 100644
78443--- a/include/linux/interrupt.h
78444+++ b/include/linux/interrupt.h
78445@@ -360,7 +360,7 @@ enum
78446 /* map softirq index to softirq name. update 'softirq_to_name' in
78447 * kernel/softirq.c when adding a new softirq.
78448 */
78449-extern char *softirq_to_name[NR_SOFTIRQS];
78450+extern const char * const softirq_to_name[NR_SOFTIRQS];
78451
78452 /* softirq mask and active fields moved to irq_cpustat_t in
78453 * asm/hardirq.h to get better cache usage. KAO
78454@@ -368,8 +368,8 @@ extern char *softirq_to_name[NR_SOFTIRQS];
78455
78456 struct softirq_action
78457 {
78458- void (*action)(struct softirq_action *);
78459-};
78460+ void (*action)(void);
78461+} __no_const;
78462
78463 asmlinkage void do_softirq(void);
78464 asmlinkage void __do_softirq(void);
78465@@ -383,7 +383,7 @@ static inline void do_softirq_own_stack(void)
78466 }
78467 #endif
78468
78469-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
78470+extern void open_softirq(int nr, void (*action)(void));
78471 extern void softirq_init(void);
78472 extern void __raise_softirq_irqoff(unsigned int nr);
78473
78474diff --git a/include/linux/iommu.h b/include/linux/iommu.h
78475index a444c79..8c41ea9 100644
78476--- a/include/linux/iommu.h
78477+++ b/include/linux/iommu.h
78478@@ -130,7 +130,7 @@ struct iommu_ops {
78479 u32 (*domain_get_windows)(struct iommu_domain *domain);
78480
78481 unsigned long pgsize_bitmap;
78482-};
78483+} __do_const;
78484
78485 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
78486 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
78487diff --git a/include/linux/ioport.h b/include/linux/ioport.h
78488index 89b7c24..382af74 100644
78489--- a/include/linux/ioport.h
78490+++ b/include/linux/ioport.h
78491@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
78492 int adjust_resource(struct resource *res, resource_size_t start,
78493 resource_size_t size);
78494 resource_size_t resource_alignment(struct resource *res);
78495-static inline resource_size_t resource_size(const struct resource *res)
78496+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
78497 {
78498 return res->end - res->start + 1;
78499 }
78500diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
78501index f6c82de..de8619e 100644
78502--- a/include/linux/ipc_namespace.h
78503+++ b/include/linux/ipc_namespace.h
78504@@ -70,7 +70,7 @@ struct ipc_namespace {
78505 struct user_namespace *user_ns;
78506
78507 unsigned int proc_inum;
78508-};
78509+} __randomize_layout;
78510
78511 extern struct ipc_namespace init_ipc_ns;
78512 extern atomic_t nr_ipc_ns;
78513diff --git a/include/linux/irq.h b/include/linux/irq.h
78514index 7dc1003..407327b 100644
78515--- a/include/linux/irq.h
78516+++ b/include/linux/irq.h
78517@@ -338,7 +338,8 @@ struct irq_chip {
78518 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
78519
78520 unsigned long flags;
78521-};
78522+} __do_const;
78523+typedef struct irq_chip __no_const irq_chip_no_const;
78524
78525 /*
78526 * irq_chip specific flags
78527diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
78528index cac496b..ffa0567 100644
78529--- a/include/linux/irqchip/arm-gic.h
78530+++ b/include/linux/irqchip/arm-gic.h
78531@@ -61,9 +61,11 @@
78532
78533 #ifndef __ASSEMBLY__
78534
78535+#include <linux/irq.h>
78536+
78537 struct device_node;
78538
78539-extern struct irq_chip gic_arch_extn;
78540+extern irq_chip_no_const gic_arch_extn;
78541
78542 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
78543 u32 offset, struct device_node *);
78544diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
78545index d235e88..8ccbe74 100644
78546--- a/include/linux/jiffies.h
78547+++ b/include/linux/jiffies.h
78548@@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
78549 /*
78550 * Convert various time units to each other:
78551 */
78552-extern unsigned int jiffies_to_msecs(const unsigned long j);
78553-extern unsigned int jiffies_to_usecs(const unsigned long j);
78554-extern unsigned long msecs_to_jiffies(const unsigned int m);
78555-extern unsigned long usecs_to_jiffies(const unsigned int u);
78556-extern unsigned long timespec_to_jiffies(const struct timespec *value);
78557+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
78558+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
78559+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
78560+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
78561+extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
78562 extern void jiffies_to_timespec(const unsigned long jiffies,
78563 struct timespec *value);
78564-extern unsigned long timeval_to_jiffies(const struct timeval *value);
78565+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
78566 extern void jiffies_to_timeval(const unsigned long jiffies,
78567 struct timeval *value);
78568
78569diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
78570index 6883e19..e854fcb 100644
78571--- a/include/linux/kallsyms.h
78572+++ b/include/linux/kallsyms.h
78573@@ -15,7 +15,8 @@
78574
78575 struct module;
78576
78577-#ifdef CONFIG_KALLSYMS
78578+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
78579+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
78580 /* Lookup the address for a symbol. Returns 0 if not found. */
78581 unsigned long kallsyms_lookup_name(const char *name);
78582
78583@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
78584 /* Stupid that this does nothing, but I didn't create this mess. */
78585 #define __print_symbol(fmt, addr)
78586 #endif /*CONFIG_KALLSYMS*/
78587+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
78588+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
78589+extern unsigned long kallsyms_lookup_name(const char *name);
78590+extern void __print_symbol(const char *fmt, unsigned long address);
78591+extern int sprint_backtrace(char *buffer, unsigned long address);
78592+extern int sprint_symbol(char *buffer, unsigned long address);
78593+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
78594+const char *kallsyms_lookup(unsigned long addr,
78595+ unsigned long *symbolsize,
78596+ unsigned long *offset,
78597+ char **modname, char *namebuf);
78598+extern int kallsyms_lookup_size_offset(unsigned long addr,
78599+ unsigned long *symbolsize,
78600+ unsigned long *offset);
78601+#endif
78602
78603 /* This macro allows us to keep printk typechecking */
78604 static __printf(1, 2)
78605diff --git a/include/linux/key-type.h b/include/linux/key-type.h
78606index a74c3a8..28d3f21 100644
78607--- a/include/linux/key-type.h
78608+++ b/include/linux/key-type.h
78609@@ -131,7 +131,7 @@ struct key_type {
78610 /* internal fields */
78611 struct list_head link; /* link in types list */
78612 struct lock_class_key lock_class; /* key->sem lock class */
78613-};
78614+} __do_const;
78615
78616 extern struct key_type key_type_keyring;
78617
78618diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
78619index dfb4f2f..7927e62 100644
78620--- a/include/linux/kgdb.h
78621+++ b/include/linux/kgdb.h
78622@@ -52,7 +52,7 @@ extern int kgdb_connected;
78623 extern int kgdb_io_module_registered;
78624
78625 extern atomic_t kgdb_setting_breakpoint;
78626-extern atomic_t kgdb_cpu_doing_single_step;
78627+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
78628
78629 extern struct task_struct *kgdb_usethread;
78630 extern struct task_struct *kgdb_contthread;
78631@@ -254,7 +254,7 @@ struct kgdb_arch {
78632 void (*correct_hw_break)(void);
78633
78634 void (*enable_nmi)(bool on);
78635-};
78636+} __do_const;
78637
78638 /**
78639 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
78640@@ -279,7 +279,7 @@ struct kgdb_io {
78641 void (*pre_exception) (void);
78642 void (*post_exception) (void);
78643 int is_console;
78644-};
78645+} __do_const;
78646
78647 extern struct kgdb_arch arch_kgdb_ops;
78648
78649diff --git a/include/linux/kmod.h b/include/linux/kmod.h
78650index 0555cc6..40116ce 100644
78651--- a/include/linux/kmod.h
78652+++ b/include/linux/kmod.h
78653@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
78654 * usually useless though. */
78655 extern __printf(2, 3)
78656 int __request_module(bool wait, const char *name, ...);
78657+extern __printf(3, 4)
78658+int ___request_module(bool wait, char *param_name, const char *name, ...);
78659 #define request_module(mod...) __request_module(true, mod)
78660 #define request_module_nowait(mod...) __request_module(false, mod)
78661 #define try_then_request_module(x, mod...) \
78662@@ -57,6 +59,9 @@ struct subprocess_info {
78663 struct work_struct work;
78664 struct completion *complete;
78665 char *path;
78666+#ifdef CONFIG_GRKERNSEC
78667+ char *origpath;
78668+#endif
78669 char **argv;
78670 char **envp;
78671 int wait;
78672diff --git a/include/linux/kobject.h b/include/linux/kobject.h
78673index e7ba650..0af3acb 100644
78674--- a/include/linux/kobject.h
78675+++ b/include/linux/kobject.h
78676@@ -116,7 +116,7 @@ struct kobj_type {
78677 struct attribute **default_attrs;
78678 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
78679 const void *(*namespace)(struct kobject *kobj);
78680-};
78681+} __do_const;
78682
78683 struct kobj_uevent_env {
78684 char *envp[UEVENT_NUM_ENVP];
78685@@ -139,6 +139,7 @@ struct kobj_attribute {
78686 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
78687 const char *buf, size_t count);
78688 };
78689+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
78690
78691 extern const struct sysfs_ops kobj_sysfs_ops;
78692
78693@@ -166,7 +167,7 @@ struct kset {
78694 spinlock_t list_lock;
78695 struct kobject kobj;
78696 const struct kset_uevent_ops *uevent_ops;
78697-};
78698+} __randomize_layout;
78699
78700 extern void kset_init(struct kset *kset);
78701 extern int __must_check kset_register(struct kset *kset);
78702diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
78703index df32d25..fb52e27 100644
78704--- a/include/linux/kobject_ns.h
78705+++ b/include/linux/kobject_ns.h
78706@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
78707 const void *(*netlink_ns)(struct sock *sk);
78708 const void *(*initial_ns)(void);
78709 void (*drop_ns)(void *);
78710-};
78711+} __do_const;
78712
78713 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
78714 int kobj_ns_type_registered(enum kobj_ns_type type);
78715diff --git a/include/linux/kref.h b/include/linux/kref.h
78716index 484604d..0f6c5b6 100644
78717--- a/include/linux/kref.h
78718+++ b/include/linux/kref.h
78719@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
78720 static inline int kref_sub(struct kref *kref, unsigned int count,
78721 void (*release)(struct kref *kref))
78722 {
78723- WARN_ON(release == NULL);
78724+ BUG_ON(release == NULL);
78725
78726 if (atomic_sub_and_test((int) count, &kref->refcount)) {
78727 release(kref);
78728diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
78729index 9523d2a..16c0424 100644
78730--- a/include/linux/kvm_host.h
78731+++ b/include/linux/kvm_host.h
78732@@ -457,7 +457,7 @@ static inline void kvm_irqfd_exit(void)
78733 {
78734 }
78735 #endif
78736-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78737+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78738 struct module *module);
78739 void kvm_exit(void);
78740
78741@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
78742 struct kvm_guest_debug *dbg);
78743 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
78744
78745-int kvm_arch_init(void *opaque);
78746+int kvm_arch_init(const void *opaque);
78747 void kvm_arch_exit(void);
78748
78749 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
78750diff --git a/include/linux/libata.h b/include/linux/libata.h
78751index bec6dbe..2873d64 100644
78752--- a/include/linux/libata.h
78753+++ b/include/linux/libata.h
78754@@ -975,7 +975,7 @@ struct ata_port_operations {
78755 * fields must be pointers.
78756 */
78757 const struct ata_port_operations *inherits;
78758-};
78759+} __do_const;
78760
78761 struct ata_port_info {
78762 unsigned long flags;
78763diff --git a/include/linux/linkage.h b/include/linux/linkage.h
78764index d3e8ad2..a949f68 100644
78765--- a/include/linux/linkage.h
78766+++ b/include/linux/linkage.h
78767@@ -31,6 +31,7 @@
78768 #endif
78769
78770 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
78771+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
78772 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
78773
78774 /*
78775diff --git a/include/linux/list.h b/include/linux/list.h
78776index ef95941..82db65a 100644
78777--- a/include/linux/list.h
78778+++ b/include/linux/list.h
78779@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
78780 extern void list_del(struct list_head *entry);
78781 #endif
78782
78783+extern void __pax_list_add(struct list_head *new,
78784+ struct list_head *prev,
78785+ struct list_head *next);
78786+static inline void pax_list_add(struct list_head *new, struct list_head *head)
78787+{
78788+ __pax_list_add(new, head, head->next);
78789+}
78790+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
78791+{
78792+ __pax_list_add(new, head->prev, head);
78793+}
78794+extern void pax_list_del(struct list_head *entry);
78795+
78796 /**
78797 * list_replace - replace old entry by new one
78798 * @old : the element to be replaced
78799@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
78800 INIT_LIST_HEAD(entry);
78801 }
78802
78803+extern void pax_list_del_init(struct list_head *entry);
78804+
78805 /**
78806 * list_move - delete from one list and add as another's head
78807 * @list: the entry to move
78808diff --git a/include/linux/math64.h b/include/linux/math64.h
78809index c45c089..298841c 100644
78810--- a/include/linux/math64.h
78811+++ b/include/linux/math64.h
78812@@ -15,7 +15,7 @@
78813 * This is commonly provided by 32bit archs to provide an optimized 64bit
78814 * divide.
78815 */
78816-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78817+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78818 {
78819 *remainder = dividend % divisor;
78820 return dividend / divisor;
78821@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
78822 /**
78823 * div64_u64 - unsigned 64bit divide with 64bit divisor
78824 */
78825-static inline u64 div64_u64(u64 dividend, u64 divisor)
78826+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
78827 {
78828 return dividend / divisor;
78829 }
78830@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
78831 #define div64_ul(x, y) div_u64((x), (y))
78832
78833 #ifndef div_u64_rem
78834-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78835+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78836 {
78837 *remainder = do_div(dividend, divisor);
78838 return dividend;
78839@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
78840 #endif
78841
78842 #ifndef div64_u64
78843-extern u64 div64_u64(u64 dividend, u64 divisor);
78844+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
78845 #endif
78846
78847 #ifndef div64_s64
78848@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
78849 * divide.
78850 */
78851 #ifndef div_u64
78852-static inline u64 div_u64(u64 dividend, u32 divisor)
78853+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
78854 {
78855 u32 remainder;
78856 return div_u64_rem(dividend, divisor, &remainder);
78857diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
78858index 9fe426b..8148be6 100644
78859--- a/include/linux/mempolicy.h
78860+++ b/include/linux/mempolicy.h
78861@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
78862 }
78863
78864 #define vma_policy(vma) ((vma)->vm_policy)
78865+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
78866+{
78867+ vma->vm_policy = pol;
78868+}
78869
78870 static inline void mpol_get(struct mempolicy *pol)
78871 {
78872@@ -241,6 +245,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
78873 }
78874
78875 #define vma_policy(vma) NULL
78876+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
78877+{
78878+}
78879
78880 static inline int
78881 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
78882diff --git a/include/linux/mm.h b/include/linux/mm.h
78883index 9fac6dd..158ca43 100644
78884--- a/include/linux/mm.h
78885+++ b/include/linux/mm.h
78886@@ -117,6 +117,11 @@ extern unsigned int kobjsize(const void *objp);
78887 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
78888 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
78889 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
78890+
78891+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78892+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
78893+#endif
78894+
78895 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
78896
78897 #ifdef CONFIG_MEM_SOFT_DIRTY
78898@@ -219,8 +224,8 @@ struct vm_operations_struct {
78899 /* called by access_process_vm when get_user_pages() fails, typically
78900 * for use by special VMAs that can switch between memory and hardware
78901 */
78902- int (*access)(struct vm_area_struct *vma, unsigned long addr,
78903- void *buf, int len, int write);
78904+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
78905+ void *buf, size_t len, int write);
78906 #ifdef CONFIG_NUMA
78907 /*
78908 * set_policy() op must add a reference to any non-NULL @new mempolicy
78909@@ -250,6 +255,7 @@ struct vm_operations_struct {
78910 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
78911 unsigned long size, pgoff_t pgoff);
78912 };
78913+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
78914
78915 struct mmu_gather;
78916 struct inode;
78917@@ -1064,8 +1070,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
78918 unsigned long *pfn);
78919 int follow_phys(struct vm_area_struct *vma, unsigned long address,
78920 unsigned int flags, unsigned long *prot, resource_size_t *phys);
78921-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
78922- void *buf, int len, int write);
78923+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
78924+ void *buf, size_t len, int write);
78925
78926 static inline void unmap_shared_mapping_range(struct address_space *mapping,
78927 loff_t const holebegin, loff_t const holelen)
78928@@ -1104,9 +1110,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
78929 }
78930 #endif
78931
78932-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
78933-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
78934- void *buf, int len, int write);
78935+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
78936+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
78937+ void *buf, size_t len, int write);
78938
78939 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
78940 unsigned long start, unsigned long nr_pages,
78941@@ -1138,34 +1144,6 @@ int set_page_dirty(struct page *page);
78942 int set_page_dirty_lock(struct page *page);
78943 int clear_page_dirty_for_io(struct page *page);
78944
78945-/* Is the vma a continuation of the stack vma above it? */
78946-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
78947-{
78948- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
78949-}
78950-
78951-static inline int stack_guard_page_start(struct vm_area_struct *vma,
78952- unsigned long addr)
78953-{
78954- return (vma->vm_flags & VM_GROWSDOWN) &&
78955- (vma->vm_start == addr) &&
78956- !vma_growsdown(vma->vm_prev, addr);
78957-}
78958-
78959-/* Is the vma a continuation of the stack vma below it? */
78960-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
78961-{
78962- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
78963-}
78964-
78965-static inline int stack_guard_page_end(struct vm_area_struct *vma,
78966- unsigned long addr)
78967-{
78968- return (vma->vm_flags & VM_GROWSUP) &&
78969- (vma->vm_end == addr) &&
78970- !vma_growsup(vma->vm_next, addr);
78971-}
78972-
78973 extern pid_t
78974 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
78975
78976@@ -1265,6 +1243,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
78977 }
78978 #endif
78979
78980+#ifdef CONFIG_MMU
78981+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
78982+#else
78983+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
78984+{
78985+ return __pgprot(0);
78986+}
78987+#endif
78988+
78989 int vma_wants_writenotify(struct vm_area_struct *vma);
78990
78991 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
78992@@ -1283,8 +1270,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
78993 {
78994 return 0;
78995 }
78996+
78997+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
78998+ unsigned long address)
78999+{
79000+ return 0;
79001+}
79002 #else
79003 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79004+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79005 #endif
79006
79007 #ifdef __PAGETABLE_PMD_FOLDED
79008@@ -1293,8 +1287,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
79009 {
79010 return 0;
79011 }
79012+
79013+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
79014+ unsigned long address)
79015+{
79016+ return 0;
79017+}
79018 #else
79019 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
79020+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
79021 #endif
79022
79023 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
79024@@ -1312,11 +1313,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
79025 NULL: pud_offset(pgd, address);
79026 }
79027
79028+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79029+{
79030+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
79031+ NULL: pud_offset(pgd, address);
79032+}
79033+
79034 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
79035 {
79036 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
79037 NULL: pmd_offset(pud, address);
79038 }
79039+
79040+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
79041+{
79042+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
79043+ NULL: pmd_offset(pud, address);
79044+}
79045 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
79046
79047 #if USE_SPLIT_PTE_PTLOCKS
79048@@ -1694,7 +1707,7 @@ extern int install_special_mapping(struct mm_struct *mm,
79049 unsigned long addr, unsigned long len,
79050 unsigned long flags, struct page **pages);
79051
79052-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
79053+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
79054
79055 extern unsigned long mmap_region(struct file *file, unsigned long addr,
79056 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
79057@@ -1702,6 +1715,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79058 unsigned long len, unsigned long prot, unsigned long flags,
79059 unsigned long pgoff, unsigned long *populate);
79060 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
79061+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
79062
79063 #ifdef CONFIG_MMU
79064 extern int __mm_populate(unsigned long addr, unsigned long len,
79065@@ -1730,10 +1744,11 @@ struct vm_unmapped_area_info {
79066 unsigned long high_limit;
79067 unsigned long align_mask;
79068 unsigned long align_offset;
79069+ unsigned long threadstack_offset;
79070 };
79071
79072-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
79073-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
79074+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
79075+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
79076
79077 /*
79078 * Search for an unmapped address range.
79079@@ -1745,7 +1760,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
79080 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
79081 */
79082 static inline unsigned long
79083-vm_unmapped_area(struct vm_unmapped_area_info *info)
79084+vm_unmapped_area(const struct vm_unmapped_area_info *info)
79085 {
79086 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
79087 return unmapped_area(info);
79088@@ -1808,6 +1823,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
79089 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
79090 struct vm_area_struct **pprev);
79091
79092+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
79093+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
79094+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
79095+
79096 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
79097 NULL if none. Assume start_addr < end_addr. */
79098 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
79099@@ -1836,15 +1855,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
79100 return vma;
79101 }
79102
79103-#ifdef CONFIG_MMU
79104-pgprot_t vm_get_page_prot(unsigned long vm_flags);
79105-#else
79106-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
79107-{
79108- return __pgprot(0);
79109-}
79110-#endif
79111-
79112 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
79113 unsigned long change_prot_numa(struct vm_area_struct *vma,
79114 unsigned long start, unsigned long end);
79115@@ -1896,6 +1906,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
79116 static inline void vm_stat_account(struct mm_struct *mm,
79117 unsigned long flags, struct file *file, long pages)
79118 {
79119+
79120+#ifdef CONFIG_PAX_RANDMMAP
79121+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
79122+#endif
79123+
79124 mm->total_vm += pages;
79125 }
79126 #endif /* CONFIG_PROC_FS */
79127@@ -1977,7 +1992,7 @@ extern int unpoison_memory(unsigned long pfn);
79128 extern int sysctl_memory_failure_early_kill;
79129 extern int sysctl_memory_failure_recovery;
79130 extern void shake_page(struct page *p, int access);
79131-extern atomic_long_t num_poisoned_pages;
79132+extern atomic_long_unchecked_t num_poisoned_pages;
79133 extern int soft_offline_page(struct page *page, int flags);
79134
79135 extern void dump_page(struct page *page);
79136@@ -2014,5 +2029,11 @@ void __init setup_nr_node_ids(void);
79137 static inline void setup_nr_node_ids(void) {}
79138 #endif
79139
79140+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79141+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
79142+#else
79143+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
79144+#endif
79145+
79146 #endif /* __KERNEL__ */
79147 #endif /* _LINUX_MM_H */
79148diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
79149index 290901a..e99b01c 100644
79150--- a/include/linux/mm_types.h
79151+++ b/include/linux/mm_types.h
79152@@ -307,7 +307,9 @@ struct vm_area_struct {
79153 #ifdef CONFIG_NUMA
79154 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
79155 #endif
79156-};
79157+
79158+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
79159+} __randomize_layout;
79160
79161 struct core_thread {
79162 struct task_struct *task;
79163@@ -453,7 +455,25 @@ struct mm_struct {
79164 bool tlb_flush_pending;
79165 #endif
79166 struct uprobes_state uprobes_state;
79167-};
79168+
79169+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
79170+ unsigned long pax_flags;
79171+#endif
79172+
79173+#ifdef CONFIG_PAX_DLRESOLVE
79174+ unsigned long call_dl_resolve;
79175+#endif
79176+
79177+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
79178+ unsigned long call_syscall;
79179+#endif
79180+
79181+#ifdef CONFIG_PAX_ASLR
79182+ unsigned long delta_mmap; /* randomized offset */
79183+ unsigned long delta_stack; /* randomized offset */
79184+#endif
79185+
79186+} __randomize_layout;
79187
79188 static inline void mm_init_cpumask(struct mm_struct *mm)
79189 {
79190diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
79191index c5d5278..f0b68c8 100644
79192--- a/include/linux/mmiotrace.h
79193+++ b/include/linux/mmiotrace.h
79194@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
79195 /* Called from ioremap.c */
79196 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
79197 void __iomem *addr);
79198-extern void mmiotrace_iounmap(volatile void __iomem *addr);
79199+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
79200
79201 /* For anyone to insert markers. Remember trailing newline. */
79202 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
79203@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
79204 {
79205 }
79206
79207-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
79208+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
79209 {
79210 }
79211
79212diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
79213index bd791e4..8617c34f 100644
79214--- a/include/linux/mmzone.h
79215+++ b/include/linux/mmzone.h
79216@@ -396,7 +396,7 @@ struct zone {
79217 unsigned long flags; /* zone flags, see below */
79218
79219 /* Zone statistics */
79220- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79221+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79222
79223 /*
79224 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
79225diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
79226index 45e9214..a7227d6 100644
79227--- a/include/linux/mod_devicetable.h
79228+++ b/include/linux/mod_devicetable.h
79229@@ -13,7 +13,7 @@
79230 typedef unsigned long kernel_ulong_t;
79231 #endif
79232
79233-#define PCI_ANY_ID (~0)
79234+#define PCI_ANY_ID ((__u16)~0)
79235
79236 struct pci_device_id {
79237 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
79238@@ -139,7 +139,7 @@ struct usb_device_id {
79239 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
79240 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
79241
79242-#define HID_ANY_ID (~0)
79243+#define HID_ANY_ID (~0U)
79244 #define HID_BUS_ANY 0xffff
79245 #define HID_GROUP_ANY 0x0000
79246
79247@@ -467,7 +467,7 @@ struct dmi_system_id {
79248 const char *ident;
79249 struct dmi_strmatch matches[4];
79250 void *driver_data;
79251-};
79252+} __do_const;
79253 /*
79254 * struct dmi_device_id appears during expansion of
79255 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
79256diff --git a/include/linux/module.h b/include/linux/module.h
79257index 15cd6b1..f6e2e6a 100644
79258--- a/include/linux/module.h
79259+++ b/include/linux/module.h
79260@@ -17,9 +17,11 @@
79261 #include <linux/moduleparam.h>
79262 #include <linux/tracepoint.h>
79263 #include <linux/export.h>
79264+#include <linux/fs.h>
79265
79266 #include <linux/percpu.h>
79267 #include <asm/module.h>
79268+#include <asm/pgtable.h>
79269
79270 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
79271 #define MODULE_SIG_STRING "~Module signature appended~\n"
79272@@ -43,7 +45,7 @@ struct module_kobject {
79273 struct kobject *drivers_dir;
79274 struct module_param_attrs *mp;
79275 struct completion *kobj_completion;
79276-};
79277+} __randomize_layout;
79278
79279 struct module_attribute {
79280 struct attribute attr;
79281@@ -55,12 +57,13 @@ struct module_attribute {
79282 int (*test)(struct module *);
79283 void (*free)(struct module *);
79284 };
79285+typedef struct module_attribute __no_const module_attribute_no_const;
79286
79287 struct module_version_attribute {
79288 struct module_attribute mattr;
79289 const char *module_name;
79290 const char *version;
79291-} __attribute__ ((__aligned__(sizeof(void *))));
79292+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
79293
79294 extern ssize_t __modver_version_show(struct module_attribute *,
79295 struct module_kobject *, char *);
79296@@ -238,7 +241,7 @@ struct module
79297
79298 /* Sysfs stuff. */
79299 struct module_kobject mkobj;
79300- struct module_attribute *modinfo_attrs;
79301+ module_attribute_no_const *modinfo_attrs;
79302 const char *version;
79303 const char *srcversion;
79304 struct kobject *holders_dir;
79305@@ -287,19 +290,16 @@ struct module
79306 int (*init)(void);
79307
79308 /* If this is non-NULL, vfree after init() returns */
79309- void *module_init;
79310+ void *module_init_rx, *module_init_rw;
79311
79312 /* Here is the actual code + data, vfree'd on unload. */
79313- void *module_core;
79314+ void *module_core_rx, *module_core_rw;
79315
79316 /* Here are the sizes of the init and core sections */
79317- unsigned int init_size, core_size;
79318+ unsigned int init_size_rw, core_size_rw;
79319
79320 /* The size of the executable code in each section. */
79321- unsigned int init_text_size, core_text_size;
79322-
79323- /* Size of RO sections of the module (text+rodata) */
79324- unsigned int init_ro_size, core_ro_size;
79325+ unsigned int init_size_rx, core_size_rx;
79326
79327 /* Arch-specific module values */
79328 struct mod_arch_specific arch;
79329@@ -355,6 +355,10 @@ struct module
79330 #ifdef CONFIG_EVENT_TRACING
79331 struct ftrace_event_call **trace_events;
79332 unsigned int num_trace_events;
79333+ struct file_operations trace_id;
79334+ struct file_operations trace_enable;
79335+ struct file_operations trace_format;
79336+ struct file_operations trace_filter;
79337 #endif
79338 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
79339 unsigned int num_ftrace_callsites;
79340@@ -378,7 +382,7 @@ struct module
79341 ctor_fn_t *ctors;
79342 unsigned int num_ctors;
79343 #endif
79344-};
79345+} __randomize_layout;
79346 #ifndef MODULE_ARCH_INIT
79347 #define MODULE_ARCH_INIT {}
79348 #endif
79349@@ -399,16 +403,46 @@ bool is_module_address(unsigned long addr);
79350 bool is_module_percpu_address(unsigned long addr);
79351 bool is_module_text_address(unsigned long addr);
79352
79353+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
79354+{
79355+
79356+#ifdef CONFIG_PAX_KERNEXEC
79357+ if (ktla_ktva(addr) >= (unsigned long)start &&
79358+ ktla_ktva(addr) < (unsigned long)start + size)
79359+ return 1;
79360+#endif
79361+
79362+ return ((void *)addr >= start && (void *)addr < start + size);
79363+}
79364+
79365+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
79366+{
79367+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
79368+}
79369+
79370+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
79371+{
79372+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
79373+}
79374+
79375+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
79376+{
79377+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
79378+}
79379+
79380+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
79381+{
79382+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
79383+}
79384+
79385 static inline int within_module_core(unsigned long addr, const struct module *mod)
79386 {
79387- return (unsigned long)mod->module_core <= addr &&
79388- addr < (unsigned long)mod->module_core + mod->core_size;
79389+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
79390 }
79391
79392 static inline int within_module_init(unsigned long addr, const struct module *mod)
79393 {
79394- return (unsigned long)mod->module_init <= addr &&
79395- addr < (unsigned long)mod->module_init + mod->init_size;
79396+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
79397 }
79398
79399 /* Search for module by name: must hold module_mutex. */
79400diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
79401index 560ca53..ef621ef 100644
79402--- a/include/linux/moduleloader.h
79403+++ b/include/linux/moduleloader.h
79404@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
79405 sections. Returns NULL on failure. */
79406 void *module_alloc(unsigned long size);
79407
79408+#ifdef CONFIG_PAX_KERNEXEC
79409+void *module_alloc_exec(unsigned long size);
79410+#else
79411+#define module_alloc_exec(x) module_alloc(x)
79412+#endif
79413+
79414 /* Free memory returned from module_alloc. */
79415 void module_free(struct module *mod, void *module_region);
79416
79417+#ifdef CONFIG_PAX_KERNEXEC
79418+void module_free_exec(struct module *mod, void *module_region);
79419+#else
79420+#define module_free_exec(x, y) module_free((x), (y))
79421+#endif
79422+
79423 /*
79424 * Apply the given relocation to the (simplified) ELF. Return -error
79425 * or 0.
79426@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
79427 unsigned int relsec,
79428 struct module *me)
79429 {
79430+#ifdef CONFIG_MODULES
79431 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
79432+#endif
79433 return -ENOEXEC;
79434 }
79435 #endif
79436@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
79437 unsigned int relsec,
79438 struct module *me)
79439 {
79440+#ifdef CONFIG_MODULES
79441 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
79442+#endif
79443 return -ENOEXEC;
79444 }
79445 #endif
79446diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
79447index c3eb102..073c4a6 100644
79448--- a/include/linux/moduleparam.h
79449+++ b/include/linux/moduleparam.h
79450@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
79451 * @len is usually just sizeof(string).
79452 */
79453 #define module_param_string(name, string, len, perm) \
79454- static const struct kparam_string __param_string_##name \
79455+ static const struct kparam_string __param_string_##name __used \
79456 = { len, string }; \
79457 __module_param_call(MODULE_PARAM_PREFIX, name, \
79458 &param_ops_string, \
79459@@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
79460 */
79461 #define module_param_array_named(name, array, type, nump, perm) \
79462 param_check_##type(name, &(array)[0]); \
79463- static const struct kparam_array __param_arr_##name \
79464+ static const struct kparam_array __param_arr_##name __used \
79465 = { .max = ARRAY_SIZE(array), .num = nump, \
79466 .ops = &param_ops_##type, \
79467 .elemsize = sizeof(array[0]), .elem = array }; \
79468diff --git a/include/linux/mount.h b/include/linux/mount.h
79469index 371d346..fba2819 100644
79470--- a/include/linux/mount.h
79471+++ b/include/linux/mount.h
79472@@ -56,7 +56,7 @@ struct vfsmount {
79473 struct dentry *mnt_root; /* root of the mounted tree */
79474 struct super_block *mnt_sb; /* pointer to superblock */
79475 int mnt_flags;
79476-};
79477+} __randomize_layout;
79478
79479 struct file; /* forward dec */
79480
79481diff --git a/include/linux/namei.h b/include/linux/namei.h
79482index 492de72..1bddcd4 100644
79483--- a/include/linux/namei.h
79484+++ b/include/linux/namei.h
79485@@ -19,7 +19,7 @@ struct nameidata {
79486 unsigned seq, m_seq;
79487 int last_type;
79488 unsigned depth;
79489- char *saved_names[MAX_NESTED_LINKS + 1];
79490+ const char *saved_names[MAX_NESTED_LINKS + 1];
79491 };
79492
79493 /*
79494@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
79495
79496 extern void nd_jump_link(struct nameidata *nd, struct path *path);
79497
79498-static inline void nd_set_link(struct nameidata *nd, char *path)
79499+static inline void nd_set_link(struct nameidata *nd, const char *path)
79500 {
79501 nd->saved_names[nd->depth] = path;
79502 }
79503
79504-static inline char *nd_get_link(struct nameidata *nd)
79505+static inline const char *nd_get_link(const struct nameidata *nd)
79506 {
79507 return nd->saved_names[nd->depth];
79508 }
79509diff --git a/include/linux/net.h b/include/linux/net.h
79510index 69be3e6..0fb422d 100644
79511--- a/include/linux/net.h
79512+++ b/include/linux/net.h
79513@@ -192,7 +192,7 @@ struct net_proto_family {
79514 int (*create)(struct net *net, struct socket *sock,
79515 int protocol, int kern);
79516 struct module *owner;
79517-};
79518+} __do_const;
79519
79520 struct iovec;
79521 struct kvec;
79522diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
79523index ce2a1f5..cb9bc8c 100644
79524--- a/include/linux/netdevice.h
79525+++ b/include/linux/netdevice.h
79526@@ -1129,6 +1129,7 @@ struct net_device_ops {
79527 struct net_device *dev,
79528 void *priv);
79529 };
79530+typedef struct net_device_ops __no_const net_device_ops_no_const;
79531
79532 /*
79533 * The DEVICE structure.
79534@@ -1211,7 +1212,7 @@ struct net_device {
79535 int iflink;
79536
79537 struct net_device_stats stats;
79538- atomic_long_t rx_dropped; /* dropped packets by core network
79539+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
79540 * Do not use this in drivers.
79541 */
79542
79543diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
79544index 2077489..a15e561 100644
79545--- a/include/linux/netfilter.h
79546+++ b/include/linux/netfilter.h
79547@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
79548 #endif
79549 /* Use the module struct to lock set/get code in place */
79550 struct module *owner;
79551-};
79552+} __do_const;
79553
79554 /* Function to register/unregister hook points. */
79555 int nf_register_hook(struct nf_hook_ops *reg);
79556diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
79557index 28c7436..2d6156a 100644
79558--- a/include/linux/netfilter/nfnetlink.h
79559+++ b/include/linux/netfilter/nfnetlink.h
79560@@ -19,7 +19,7 @@ struct nfnl_callback {
79561 const struct nlattr * const cda[]);
79562 const struct nla_policy *policy; /* netlink attribute policy */
79563 const u_int16_t attr_count; /* number of nlattr's */
79564-};
79565+} __do_const;
79566
79567 struct nfnetlink_subsystem {
79568 const char *name;
79569diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
79570new file mode 100644
79571index 0000000..33f4af8
79572--- /dev/null
79573+++ b/include/linux/netfilter/xt_gradm.h
79574@@ -0,0 +1,9 @@
79575+#ifndef _LINUX_NETFILTER_XT_GRADM_H
79576+#define _LINUX_NETFILTER_XT_GRADM_H 1
79577+
79578+struct xt_gradm_mtinfo {
79579+ __u16 flags;
79580+ __u16 invflags;
79581+};
79582+
79583+#endif
79584diff --git a/include/linux/nls.h b/include/linux/nls.h
79585index 5dc635f..35f5e11 100644
79586--- a/include/linux/nls.h
79587+++ b/include/linux/nls.h
79588@@ -31,7 +31,7 @@ struct nls_table {
79589 const unsigned char *charset2upper;
79590 struct module *owner;
79591 struct nls_table *next;
79592-};
79593+} __do_const;
79594
79595 /* this value hold the maximum octet of charset */
79596 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
79597diff --git a/include/linux/notifier.h b/include/linux/notifier.h
79598index d14a4c3..a078786 100644
79599--- a/include/linux/notifier.h
79600+++ b/include/linux/notifier.h
79601@@ -54,7 +54,8 @@ struct notifier_block {
79602 notifier_fn_t notifier_call;
79603 struct notifier_block __rcu *next;
79604 int priority;
79605-};
79606+} __do_const;
79607+typedef struct notifier_block __no_const notifier_block_no_const;
79608
79609 struct atomic_notifier_head {
79610 spinlock_t lock;
79611diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
79612index b2a0f15..4d7da32 100644
79613--- a/include/linux/oprofile.h
79614+++ b/include/linux/oprofile.h
79615@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
79616 int oprofilefs_create_ro_ulong(struct dentry * root,
79617 char const * name, ulong * val);
79618
79619-/** Create a file for read-only access to an atomic_t. */
79620+/** Create a file for read-only access to an atomic_unchecked_t. */
79621 int oprofilefs_create_ro_atomic(struct dentry * root,
79622- char const * name, atomic_t * val);
79623+ char const * name, atomic_unchecked_t * val);
79624
79625 /** create a directory */
79626 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
79627diff --git a/include/linux/padata.h b/include/linux/padata.h
79628index 4386946..f50c615 100644
79629--- a/include/linux/padata.h
79630+++ b/include/linux/padata.h
79631@@ -129,7 +129,7 @@ struct parallel_data {
79632 struct padata_serial_queue __percpu *squeue;
79633 atomic_t reorder_objects;
79634 atomic_t refcnt;
79635- atomic_t seq_nr;
79636+ atomic_unchecked_t seq_nr;
79637 struct padata_cpumask cpumask;
79638 spinlock_t lock ____cacheline_aligned;
79639 unsigned int processed;
79640diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
79641index a2e2f1d..8a391d2 100644
79642--- a/include/linux/pci_hotplug.h
79643+++ b/include/linux/pci_hotplug.h
79644@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
79645 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
79646 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
79647 int (*reset_slot) (struct hotplug_slot *slot, int probe);
79648-};
79649+} __do_const;
79650+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
79651
79652 /**
79653 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
79654diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
79655index 2e069d1..27054b8 100644
79656--- a/include/linux/perf_event.h
79657+++ b/include/linux/perf_event.h
79658@@ -327,8 +327,8 @@ struct perf_event {
79659
79660 enum perf_event_active_state state;
79661 unsigned int attach_state;
79662- local64_t count;
79663- atomic64_t child_count;
79664+ local64_t count; /* PaX: fix it one day */
79665+ atomic64_unchecked_t child_count;
79666
79667 /*
79668 * These are the total time in nanoseconds that the event
79669@@ -379,8 +379,8 @@ struct perf_event {
79670 * These accumulate total time (in nanoseconds) that children
79671 * events have been enabled and running, respectively.
79672 */
79673- atomic64_t child_total_time_enabled;
79674- atomic64_t child_total_time_running;
79675+ atomic64_unchecked_t child_total_time_enabled;
79676+ atomic64_unchecked_t child_total_time_running;
79677
79678 /*
79679 * Protect attach/detach and child_list:
79680@@ -707,7 +707,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
79681 entry->ip[entry->nr++] = ip;
79682 }
79683
79684-extern int sysctl_perf_event_paranoid;
79685+extern int sysctl_perf_event_legitimately_concerned;
79686 extern int sysctl_perf_event_mlock;
79687 extern int sysctl_perf_event_sample_rate;
79688 extern int sysctl_perf_cpu_time_max_percent;
79689@@ -722,19 +722,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
79690 loff_t *ppos);
79691
79692
79693+static inline bool perf_paranoid_any(void)
79694+{
79695+ return sysctl_perf_event_legitimately_concerned > 2;
79696+}
79697+
79698 static inline bool perf_paranoid_tracepoint_raw(void)
79699 {
79700- return sysctl_perf_event_paranoid > -1;
79701+ return sysctl_perf_event_legitimately_concerned > -1;
79702 }
79703
79704 static inline bool perf_paranoid_cpu(void)
79705 {
79706- return sysctl_perf_event_paranoid > 0;
79707+ return sysctl_perf_event_legitimately_concerned > 0;
79708 }
79709
79710 static inline bool perf_paranoid_kernel(void)
79711 {
79712- return sysctl_perf_event_paranoid > 1;
79713+ return sysctl_perf_event_legitimately_concerned > 1;
79714 }
79715
79716 extern void perf_event_init(void);
79717@@ -850,7 +855,7 @@ struct perf_pmu_events_attr {
79718 struct device_attribute attr;
79719 u64 id;
79720 const char *event_str;
79721-};
79722+} __do_const;
79723
79724 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
79725 static struct perf_pmu_events_attr _var = { \
79726diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
79727index 7246ef3..1539ea4 100644
79728--- a/include/linux/pid_namespace.h
79729+++ b/include/linux/pid_namespace.h
79730@@ -43,7 +43,7 @@ struct pid_namespace {
79731 int hide_pid;
79732 int reboot; /* group exit code if this pidns was rebooted */
79733 unsigned int proc_inum;
79734-};
79735+} __randomize_layout;
79736
79737 extern struct pid_namespace init_pid_ns;
79738
79739diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
79740index b8809fe..ae4ccd0 100644
79741--- a/include/linux/pipe_fs_i.h
79742+++ b/include/linux/pipe_fs_i.h
79743@@ -47,10 +47,10 @@ struct pipe_inode_info {
79744 struct mutex mutex;
79745 wait_queue_head_t wait;
79746 unsigned int nrbufs, curbuf, buffers;
79747- unsigned int readers;
79748- unsigned int writers;
79749- unsigned int files;
79750- unsigned int waiting_writers;
79751+ atomic_t readers;
79752+ atomic_t writers;
79753+ atomic_t files;
79754+ atomic_t waiting_writers;
79755 unsigned int r_counter;
79756 unsigned int w_counter;
79757 struct page *tmp_page;
79758diff --git a/include/linux/pm.h b/include/linux/pm.h
79759index a224c7f..92d8a97 100644
79760--- a/include/linux/pm.h
79761+++ b/include/linux/pm.h
79762@@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
79763 struct dev_pm_domain {
79764 struct dev_pm_ops ops;
79765 };
79766+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
79767
79768 /*
79769 * The PM_EVENT_ messages are also used by drivers implementing the legacy
79770diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
79771index 7c1d252..c5c773e 100644
79772--- a/include/linux/pm_domain.h
79773+++ b/include/linux/pm_domain.h
79774@@ -48,7 +48,7 @@ struct gpd_dev_ops {
79775
79776 struct gpd_cpu_data {
79777 unsigned int saved_exit_latency;
79778- struct cpuidle_state *idle_state;
79779+ cpuidle_state_no_const *idle_state;
79780 };
79781
79782 struct generic_pm_domain {
79783diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
79784index 6fa7cea..7bf6415 100644
79785--- a/include/linux/pm_runtime.h
79786+++ b/include/linux/pm_runtime.h
79787@@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
79788
79789 static inline void pm_runtime_mark_last_busy(struct device *dev)
79790 {
79791- ACCESS_ONCE(dev->power.last_busy) = jiffies;
79792+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
79793 }
79794
79795 #else /* !CONFIG_PM_RUNTIME */
79796diff --git a/include/linux/pnp.h b/include/linux/pnp.h
79797index 195aafc..49a7bc2 100644
79798--- a/include/linux/pnp.h
79799+++ b/include/linux/pnp.h
79800@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
79801 struct pnp_fixup {
79802 char id[7];
79803 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
79804-};
79805+} __do_const;
79806
79807 /* config parameters */
79808 #define PNP_CONFIG_NORMAL 0x0001
79809diff --git a/include/linux/poison.h b/include/linux/poison.h
79810index 2110a81..13a11bb 100644
79811--- a/include/linux/poison.h
79812+++ b/include/linux/poison.h
79813@@ -19,8 +19,8 @@
79814 * under normal circumstances, used to verify that nobody uses
79815 * non-initialized list entries.
79816 */
79817-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
79818-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
79819+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
79820+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
79821
79822 /********** include/linux/timer.h **********/
79823 /*
79824diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
79825index d8b187c3..9a9257a 100644
79826--- a/include/linux/power/smartreflex.h
79827+++ b/include/linux/power/smartreflex.h
79828@@ -238,7 +238,7 @@ struct omap_sr_class_data {
79829 int (*notify)(struct omap_sr *sr, u32 status);
79830 u8 notify_flags;
79831 u8 class_type;
79832-};
79833+} __do_const;
79834
79835 /**
79836 * struct omap_sr_nvalue_table - Smartreflex n-target value info
79837diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
79838index 4ea1d37..80f4b33 100644
79839--- a/include/linux/ppp-comp.h
79840+++ b/include/linux/ppp-comp.h
79841@@ -84,7 +84,7 @@ struct compressor {
79842 struct module *owner;
79843 /* Extra skb space needed by the compressor algorithm */
79844 unsigned int comp_extra;
79845-};
79846+} __do_const;
79847
79848 /*
79849 * The return value from decompress routine is the length of the
79850diff --git a/include/linux/preempt.h b/include/linux/preempt.h
79851index a3d9dc8..8af9922 100644
79852--- a/include/linux/preempt.h
79853+++ b/include/linux/preempt.h
79854@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
79855 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
79856 #endif
79857
79858+#define raw_preempt_count_add(val) __preempt_count_add(val)
79859+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
79860+
79861 #define __preempt_count_inc() __preempt_count_add(1)
79862 #define __preempt_count_dec() __preempt_count_sub(1)
79863
79864 #define preempt_count_inc() preempt_count_add(1)
79865+#define raw_preempt_count_inc() raw_preempt_count_add(1)
79866 #define preempt_count_dec() preempt_count_sub(1)
79867+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
79868
79869 #ifdef CONFIG_PREEMPT_COUNT
79870
79871@@ -41,6 +46,12 @@ do { \
79872 barrier(); \
79873 } while (0)
79874
79875+#define raw_preempt_disable() \
79876+do { \
79877+ raw_preempt_count_inc(); \
79878+ barrier(); \
79879+} while (0)
79880+
79881 #define sched_preempt_enable_no_resched() \
79882 do { \
79883 barrier(); \
79884@@ -49,6 +60,12 @@ do { \
79885
79886 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
79887
79888+#define raw_preempt_enable_no_resched() \
79889+do { \
79890+ barrier(); \
79891+ raw_preempt_count_dec(); \
79892+} while (0)
79893+
79894 #ifdef CONFIG_PREEMPT
79895 #define preempt_enable() \
79896 do { \
79897@@ -105,8 +122,10 @@ do { \
79898 * region.
79899 */
79900 #define preempt_disable() barrier()
79901+#define raw_preempt_disable() barrier()
79902 #define sched_preempt_enable_no_resched() barrier()
79903 #define preempt_enable_no_resched() barrier()
79904+#define raw_preempt_enable_no_resched() barrier()
79905 #define preempt_enable() barrier()
79906 #define preempt_check_resched() do { } while (0)
79907
79908diff --git a/include/linux/printk.h b/include/linux/printk.h
79909index 6949258..7c4730e 100644
79910--- a/include/linux/printk.h
79911+++ b/include/linux/printk.h
79912@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
79913 void early_printk(const char *s, ...) { }
79914 #endif
79915
79916+extern int kptr_restrict;
79917+
79918 #ifdef CONFIG_PRINTK
79919 asmlinkage __printf(5, 0)
79920 int vprintk_emit(int facility, int level,
79921@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
79922
79923 extern int printk_delay_msec;
79924 extern int dmesg_restrict;
79925-extern int kptr_restrict;
79926
79927 extern void wake_up_klogd(void);
79928
79929diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
79930index 608e60a..c26f864 100644
79931--- a/include/linux/proc_fs.h
79932+++ b/include/linux/proc_fs.h
79933@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
79934 return proc_create_data(name, mode, parent, proc_fops, NULL);
79935 }
79936
79937+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
79938+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
79939+{
79940+#ifdef CONFIG_GRKERNSEC_PROC_USER
79941+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
79942+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79943+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
79944+#else
79945+ return proc_create_data(name, mode, parent, proc_fops, NULL);
79946+#endif
79947+}
79948+
79949+
79950 extern void proc_set_size(struct proc_dir_entry *, loff_t);
79951 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
79952 extern void *PDE_DATA(const struct inode *);
79953diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
79954index 34a1e10..70f6bde 100644
79955--- a/include/linux/proc_ns.h
79956+++ b/include/linux/proc_ns.h
79957@@ -14,7 +14,7 @@ struct proc_ns_operations {
79958 void (*put)(void *ns);
79959 int (*install)(struct nsproxy *nsproxy, void *ns);
79960 unsigned int (*inum)(void *ns);
79961-};
79962+} __do_const __randomize_layout;
79963
79964 struct proc_ns {
79965 void *ns;
79966diff --git a/include/linux/quota.h b/include/linux/quota.h
79967index cc7494a..1e27036 100644
79968--- a/include/linux/quota.h
79969+++ b/include/linux/quota.h
79970@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
79971
79972 extern bool qid_eq(struct kqid left, struct kqid right);
79973 extern bool qid_lt(struct kqid left, struct kqid right);
79974-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
79975+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
79976 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
79977 extern bool qid_valid(struct kqid qid);
79978
79979diff --git a/include/linux/random.h b/include/linux/random.h
79980index 4002b3d..d5ad855 100644
79981--- a/include/linux/random.h
79982+++ b/include/linux/random.h
79983@@ -10,9 +10,19 @@
79984
79985
79986 extern void add_device_randomness(const void *, unsigned int);
79987+
79988+static inline void add_latent_entropy(void)
79989+{
79990+
79991+#ifdef LATENT_ENTROPY_PLUGIN
79992+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79993+#endif
79994+
79995+}
79996+
79997 extern void add_input_randomness(unsigned int type, unsigned int code,
79998- unsigned int value);
79999-extern void add_interrupt_randomness(int irq, int irq_flags);
80000+ unsigned int value) __latent_entropy;
80001+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
80002
80003 extern void get_random_bytes(void *buf, int nbytes);
80004 extern void get_random_bytes_arch(void *buf, int nbytes);
80005@@ -23,10 +33,10 @@ extern int random_int_secret_init(void);
80006 extern const struct file_operations random_fops, urandom_fops;
80007 #endif
80008
80009-unsigned int get_random_int(void);
80010+unsigned int __intentional_overflow(-1) get_random_int(void);
80011 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
80012
80013-u32 prandom_u32(void);
80014+u32 prandom_u32(void) __intentional_overflow(-1);
80015 void prandom_bytes(void *buf, int nbytes);
80016 void prandom_seed(u32 seed);
80017 void prandom_reseed_late(void);
80018@@ -38,6 +48,11 @@ struct rnd_state {
80019 u32 prandom_u32_state(struct rnd_state *state);
80020 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
80021
80022+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
80023+{
80024+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
80025+}
80026+
80027 /*
80028 * Handle minimum values for seeds
80029 */
80030diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
80031index fea49b5..2ac22bb 100644
80032--- a/include/linux/rbtree_augmented.h
80033+++ b/include/linux/rbtree_augmented.h
80034@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
80035 old->rbaugmented = rbcompute(old); \
80036 } \
80037 rbstatic const struct rb_augment_callbacks rbname = { \
80038- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
80039+ .propagate = rbname ## _propagate, \
80040+ .copy = rbname ## _copy, \
80041+ .rotate = rbname ## _rotate \
80042 };
80043
80044
80045diff --git a/include/linux/rculist.h b/include/linux/rculist.h
80046index 45a0a9e..e83788e 100644
80047--- a/include/linux/rculist.h
80048+++ b/include/linux/rculist.h
80049@@ -29,8 +29,8 @@
80050 */
80051 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
80052 {
80053- ACCESS_ONCE(list->next) = list;
80054- ACCESS_ONCE(list->prev) = list;
80055+ ACCESS_ONCE_RW(list->next) = list;
80056+ ACCESS_ONCE_RW(list->prev) = list;
80057 }
80058
80059 /*
80060@@ -59,6 +59,9 @@ extern void __list_add_rcu(struct list_head *new,
80061 struct list_head *prev, struct list_head *next);
80062 #endif
80063
80064+extern void __pax_list_add_rcu(struct list_head *new,
80065+ struct list_head *prev, struct list_head *next);
80066+
80067 /**
80068 * list_add_rcu - add a new entry to rcu-protected list
80069 * @new: new entry to be added
80070@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
80071 __list_add_rcu(new, head, head->next);
80072 }
80073
80074+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
80075+{
80076+ __pax_list_add_rcu(new, head, head->next);
80077+}
80078+
80079 /**
80080 * list_add_tail_rcu - add a new entry to rcu-protected list
80081 * @new: new entry to be added
80082@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
80083 __list_add_rcu(new, head->prev, head);
80084 }
80085
80086+static inline void pax_list_add_tail_rcu(struct list_head *new,
80087+ struct list_head *head)
80088+{
80089+ __pax_list_add_rcu(new, head->prev, head);
80090+}
80091+
80092 /**
80093 * list_del_rcu - deletes entry from list without re-initialization
80094 * @entry: the element to delete from the list.
80095@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
80096 entry->prev = LIST_POISON2;
80097 }
80098
80099+extern void pax_list_del_rcu(struct list_head *entry);
80100+
80101 /**
80102 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
80103 * @n: the element to delete from the hash list.
80104diff --git a/include/linux/reboot.h b/include/linux/reboot.h
80105index 9e7db9e..7d4fd72 100644
80106--- a/include/linux/reboot.h
80107+++ b/include/linux/reboot.h
80108@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
80109 */
80110
80111 extern void migrate_to_reboot_cpu(void);
80112-extern void machine_restart(char *cmd);
80113-extern void machine_halt(void);
80114-extern void machine_power_off(void);
80115+extern void machine_restart(char *cmd) __noreturn;
80116+extern void machine_halt(void) __noreturn;
80117+extern void machine_power_off(void) __noreturn;
80118
80119 extern void machine_shutdown(void);
80120 struct pt_regs;
80121@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
80122 */
80123
80124 extern void kernel_restart_prepare(char *cmd);
80125-extern void kernel_restart(char *cmd);
80126-extern void kernel_halt(void);
80127-extern void kernel_power_off(void);
80128+extern void kernel_restart(char *cmd) __noreturn;
80129+extern void kernel_halt(void) __noreturn;
80130+extern void kernel_power_off(void) __noreturn;
80131
80132 extern int C_A_D; /* for sysctl */
80133 void ctrl_alt_del(void);
80134@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
80135 * Emergency restart, callable from an interrupt handler.
80136 */
80137
80138-extern void emergency_restart(void);
80139+extern void emergency_restart(void) __noreturn;
80140 #include <asm/emergency-restart.h>
80141
80142 #endif /* _LINUX_REBOOT_H */
80143diff --git a/include/linux/regset.h b/include/linux/regset.h
80144index 8e0c9fe..ac4d221 100644
80145--- a/include/linux/regset.h
80146+++ b/include/linux/regset.h
80147@@ -161,7 +161,8 @@ struct user_regset {
80148 unsigned int align;
80149 unsigned int bias;
80150 unsigned int core_note_type;
80151-};
80152+} __do_const;
80153+typedef struct user_regset __no_const user_regset_no_const;
80154
80155 /**
80156 * struct user_regset_view - available regsets
80157diff --git a/include/linux/relay.h b/include/linux/relay.h
80158index d7c8359..818daf5 100644
80159--- a/include/linux/relay.h
80160+++ b/include/linux/relay.h
80161@@ -157,7 +157,7 @@ struct rchan_callbacks
80162 * The callback should return 0 if successful, negative if not.
80163 */
80164 int (*remove_buf_file)(struct dentry *dentry);
80165-};
80166+} __no_const;
80167
80168 /*
80169 * CONFIG_RELAY kernel API, kernel/relay.c
80170diff --git a/include/linux/rio.h b/include/linux/rio.h
80171index b71d573..2f940bd 100644
80172--- a/include/linux/rio.h
80173+++ b/include/linux/rio.h
80174@@ -355,7 +355,7 @@ struct rio_ops {
80175 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
80176 u64 rstart, u32 size, u32 flags);
80177 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
80178-};
80179+} __no_const;
80180
80181 #define RIO_RESOURCE_MEM 0x00000100
80182 #define RIO_RESOURCE_DOORBELL 0x00000200
80183diff --git a/include/linux/rmap.h b/include/linux/rmap.h
80184index 6dacb93..6174423 100644
80185--- a/include/linux/rmap.h
80186+++ b/include/linux/rmap.h
80187@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
80188 void anon_vma_init(void); /* create anon_vma_cachep */
80189 int anon_vma_prepare(struct vm_area_struct *);
80190 void unlink_anon_vmas(struct vm_area_struct *);
80191-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
80192-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
80193+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
80194+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
80195
80196 static inline void anon_vma_merge(struct vm_area_struct *vma,
80197 struct vm_area_struct *next)
80198diff --git a/include/linux/sched.h b/include/linux/sched.h
80199index 53f97eb..1d90705 100644
80200--- a/include/linux/sched.h
80201+++ b/include/linux/sched.h
80202@@ -63,6 +63,7 @@ struct bio_list;
80203 struct fs_struct;
80204 struct perf_event_context;
80205 struct blk_plug;
80206+struct linux_binprm;
80207
80208 /*
80209 * List of flags we want to share for kernel threads,
80210@@ -304,7 +305,7 @@ extern char __sched_text_start[], __sched_text_end[];
80211 extern int in_sched_functions(unsigned long addr);
80212
80213 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
80214-extern signed long schedule_timeout(signed long timeout);
80215+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
80216 extern signed long schedule_timeout_interruptible(signed long timeout);
80217 extern signed long schedule_timeout_killable(signed long timeout);
80218 extern signed long schedule_timeout_uninterruptible(signed long timeout);
80219@@ -315,6 +316,19 @@ struct nsproxy;
80220 struct user_namespace;
80221
80222 #ifdef CONFIG_MMU
80223+
80224+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
80225+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
80226+#else
80227+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
80228+{
80229+ return 0;
80230+}
80231+#endif
80232+
80233+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
80234+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
80235+
80236 extern void arch_pick_mmap_layout(struct mm_struct *mm);
80237 extern unsigned long
80238 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
80239@@ -600,6 +614,17 @@ struct signal_struct {
80240 #ifdef CONFIG_TASKSTATS
80241 struct taskstats *stats;
80242 #endif
80243+
80244+#ifdef CONFIG_GRKERNSEC
80245+ u32 curr_ip;
80246+ u32 saved_ip;
80247+ u32 gr_saddr;
80248+ u32 gr_daddr;
80249+ u16 gr_sport;
80250+ u16 gr_dport;
80251+ u8 used_accept:1;
80252+#endif
80253+
80254 #ifdef CONFIG_AUDIT
80255 unsigned audit_tty;
80256 unsigned audit_tty_log_passwd;
80257@@ -626,7 +651,7 @@ struct signal_struct {
80258 struct mutex cred_guard_mutex; /* guard against foreign influences on
80259 * credential calculations
80260 * (notably. ptrace) */
80261-};
80262+} __randomize_layout;
80263
80264 /*
80265 * Bits in flags field of signal_struct.
80266@@ -680,6 +705,14 @@ struct user_struct {
80267 struct key *session_keyring; /* UID's default session keyring */
80268 #endif
80269
80270+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
80271+ unsigned char kernel_banned;
80272+#endif
80273+#ifdef CONFIG_GRKERNSEC_BRUTE
80274+ unsigned char suid_banned;
80275+ unsigned long suid_ban_expires;
80276+#endif
80277+
80278 /* Hash table maintenance information */
80279 struct hlist_node uidhash_node;
80280 kuid_t uid;
80281@@ -687,7 +720,7 @@ struct user_struct {
80282 #ifdef CONFIG_PERF_EVENTS
80283 atomic_long_t locked_vm;
80284 #endif
80285-};
80286+} __randomize_layout;
80287
80288 extern int uids_sysfs_init(void);
80289
80290@@ -1162,8 +1195,8 @@ struct task_struct {
80291 struct list_head thread_group;
80292
80293 struct completion *vfork_done; /* for vfork() */
80294- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
80295- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
80296+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
80297+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
80298
80299 cputime_t utime, stime, utimescaled, stimescaled;
80300 cputime_t gtime;
80301@@ -1188,11 +1221,6 @@ struct task_struct {
80302 struct task_cputime cputime_expires;
80303 struct list_head cpu_timers[3];
80304
80305-/* process credentials */
80306- const struct cred __rcu *real_cred; /* objective and real subjective task
80307- * credentials (COW) */
80308- const struct cred __rcu *cred; /* effective (overridable) subjective task
80309- * credentials (COW) */
80310 char comm[TASK_COMM_LEN]; /* executable name excluding path
80311 - access with [gs]et_task_comm (which lock
80312 it with task_lock())
80313@@ -1209,6 +1237,10 @@ struct task_struct {
80314 #endif
80315 /* CPU-specific state of this task */
80316 struct thread_struct thread;
80317+/* thread_info moved to task_struct */
80318+#ifdef CONFIG_X86
80319+ struct thread_info tinfo;
80320+#endif
80321 /* filesystem information */
80322 struct fs_struct *fs;
80323 /* open file information */
80324@@ -1282,6 +1314,10 @@ struct task_struct {
80325 gfp_t lockdep_reclaim_gfp;
80326 #endif
80327
80328+/* process credentials */
80329+ const struct cred __rcu *real_cred; /* objective and real subjective task
80330+ * credentials (COW) */
80331+
80332 /* journalling filesystem info */
80333 void *journal_info;
80334
80335@@ -1320,6 +1356,10 @@ struct task_struct {
80336 /* cg_list protected by css_set_lock and tsk->alloc_lock */
80337 struct list_head cg_list;
80338 #endif
80339+
80340+ const struct cred __rcu *cred; /* effective (overridable) subjective task
80341+ * credentials (COW) */
80342+
80343 #ifdef CONFIG_FUTEX
80344 struct robust_list_head __user *robust_list;
80345 #ifdef CONFIG_COMPAT
80346@@ -1454,7 +1494,78 @@ struct task_struct {
80347 unsigned int sequential_io;
80348 unsigned int sequential_io_avg;
80349 #endif
80350-};
80351+
80352+#ifdef CONFIG_GRKERNSEC
80353+ /* grsecurity */
80354+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80355+ u64 exec_id;
80356+#endif
80357+#ifdef CONFIG_GRKERNSEC_SETXID
80358+ const struct cred *delayed_cred;
80359+#endif
80360+ struct dentry *gr_chroot_dentry;
80361+ struct acl_subject_label *acl;
80362+ struct acl_subject_label *tmpacl;
80363+ struct acl_role_label *role;
80364+ struct file *exec_file;
80365+ unsigned long brute_expires;
80366+ u16 acl_role_id;
80367+ u8 inherited;
80368+ /* is this the task that authenticated to the special role */
80369+ u8 acl_sp_role;
80370+ u8 is_writable;
80371+ u8 brute;
80372+ u8 gr_is_chrooted;
80373+#endif
80374+
80375+} __randomize_layout;
80376+
80377+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
80378+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
80379+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
80380+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
80381+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
80382+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
80383+
80384+#ifdef CONFIG_PAX_SOFTMODE
80385+extern int pax_softmode;
80386+#endif
80387+
80388+extern int pax_check_flags(unsigned long *);
80389+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
80390+
80391+/* if tsk != current then task_lock must be held on it */
80392+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
80393+static inline unsigned long pax_get_flags(struct task_struct *tsk)
80394+{
80395+ if (likely(tsk->mm))
80396+ return tsk->mm->pax_flags;
80397+ else
80398+ return 0UL;
80399+}
80400+
80401+/* if tsk != current then task_lock must be held on it */
80402+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
80403+{
80404+ if (likely(tsk->mm)) {
80405+ tsk->mm->pax_flags = flags;
80406+ return 0;
80407+ }
80408+ return -EINVAL;
80409+}
80410+#endif
80411+
80412+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
80413+extern void pax_set_initial_flags(struct linux_binprm *bprm);
80414+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
80415+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
80416+#endif
80417+
80418+struct path;
80419+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
80420+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
80421+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
80422+extern void pax_report_refcount_overflow(struct pt_regs *regs);
80423
80424 /* Future-safe accessor for struct task_struct's cpus_allowed. */
80425 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
80426@@ -1531,7 +1642,7 @@ struct pid_namespace;
80427 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
80428 struct pid_namespace *ns);
80429
80430-static inline pid_t task_pid_nr(struct task_struct *tsk)
80431+static inline pid_t task_pid_nr(const struct task_struct *tsk)
80432 {
80433 return tsk->pid;
80434 }
80435@@ -1981,7 +2092,9 @@ void yield(void);
80436 extern struct exec_domain default_exec_domain;
80437
80438 union thread_union {
80439+#ifndef CONFIG_X86
80440 struct thread_info thread_info;
80441+#endif
80442 unsigned long stack[THREAD_SIZE/sizeof(long)];
80443 };
80444
80445@@ -2014,6 +2127,7 @@ extern struct pid_namespace init_pid_ns;
80446 */
80447
80448 extern struct task_struct *find_task_by_vpid(pid_t nr);
80449+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
80450 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
80451 struct pid_namespace *ns);
80452
80453@@ -2178,7 +2292,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
80454 extern void exit_itimers(struct signal_struct *);
80455 extern void flush_itimer_signals(void);
80456
80457-extern void do_group_exit(int);
80458+extern __noreturn void do_group_exit(int);
80459
80460 extern int allow_signal(int);
80461 extern int disallow_signal(int);
80462@@ -2369,9 +2483,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
80463
80464 #endif
80465
80466-static inline int object_is_on_stack(void *obj)
80467+static inline int object_starts_on_stack(void *obj)
80468 {
80469- void *stack = task_stack_page(current);
80470+ const void *stack = task_stack_page(current);
80471
80472 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
80473 }
80474diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
80475index 41467f8..1e4253d 100644
80476--- a/include/linux/sched/sysctl.h
80477+++ b/include/linux/sched/sysctl.h
80478@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
80479 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
80480
80481 extern int sysctl_max_map_count;
80482+extern unsigned long sysctl_heap_stack_gap;
80483
80484 extern unsigned int sysctl_sched_latency;
80485 extern unsigned int sysctl_sched_min_granularity;
80486diff --git a/include/linux/security.h b/include/linux/security.h
80487index 5623a7f..b352409 100644
80488--- a/include/linux/security.h
80489+++ b/include/linux/security.h
80490@@ -27,6 +27,7 @@
80491 #include <linux/slab.h>
80492 #include <linux/err.h>
80493 #include <linux/string.h>
80494+#include <linux/grsecurity.h>
80495
80496 struct linux_binprm;
80497 struct cred;
80498@@ -116,8 +117,6 @@ struct seq_file;
80499
80500 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
80501
80502-void reset_security_ops(void);
80503-
80504 #ifdef CONFIG_MMU
80505 extern unsigned long mmap_min_addr;
80506 extern unsigned long dac_mmap_min_addr;
80507diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
80508index dc368b8..e895209 100644
80509--- a/include/linux/semaphore.h
80510+++ b/include/linux/semaphore.h
80511@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
80512 }
80513
80514 extern void down(struct semaphore *sem);
80515-extern int __must_check down_interruptible(struct semaphore *sem);
80516+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
80517 extern int __must_check down_killable(struct semaphore *sem);
80518 extern int __must_check down_trylock(struct semaphore *sem);
80519 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
80520diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
80521index 52e0097..09625ef 100644
80522--- a/include/linux/seq_file.h
80523+++ b/include/linux/seq_file.h
80524@@ -27,6 +27,9 @@ struct seq_file {
80525 struct mutex lock;
80526 const struct seq_operations *op;
80527 int poll_event;
80528+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80529+ u64 exec_id;
80530+#endif
80531 #ifdef CONFIG_USER_NS
80532 struct user_namespace *user_ns;
80533 #endif
80534@@ -39,6 +42,7 @@ struct seq_operations {
80535 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
80536 int (*show) (struct seq_file *m, void *v);
80537 };
80538+typedef struct seq_operations __no_const seq_operations_no_const;
80539
80540 #define SEQ_SKIP 1
80541
80542diff --git a/include/linux/shm.h b/include/linux/shm.h
80543index 429c199..4d42e38 100644
80544--- a/include/linux/shm.h
80545+++ b/include/linux/shm.h
80546@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
80547
80548 /* The task created the shm object. NULL if the task is dead. */
80549 struct task_struct *shm_creator;
80550+#ifdef CONFIG_GRKERNSEC
80551+ time_t shm_createtime;
80552+ pid_t shm_lapid;
80553+#endif
80554 };
80555
80556 /* shm_mode upper byte flags */
80557diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
80558index 6f69b3f..71ac613 100644
80559--- a/include/linux/skbuff.h
80560+++ b/include/linux/skbuff.h
80561@@ -643,7 +643,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
80562 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
80563 int node);
80564 struct sk_buff *build_skb(void *data, unsigned int frag_size);
80565-static inline struct sk_buff *alloc_skb(unsigned int size,
80566+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
80567 gfp_t priority)
80568 {
80569 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
80570@@ -750,7 +750,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
80571 */
80572 static inline int skb_queue_empty(const struct sk_buff_head *list)
80573 {
80574- return list->next == (struct sk_buff *)list;
80575+ return list->next == (const struct sk_buff *)list;
80576 }
80577
80578 /**
80579@@ -763,7 +763,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
80580 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
80581 const struct sk_buff *skb)
80582 {
80583- return skb->next == (struct sk_buff *)list;
80584+ return skb->next == (const struct sk_buff *)list;
80585 }
80586
80587 /**
80588@@ -776,7 +776,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
80589 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
80590 const struct sk_buff *skb)
80591 {
80592- return skb->prev == (struct sk_buff *)list;
80593+ return skb->prev == (const struct sk_buff *)list;
80594 }
80595
80596 /**
80597@@ -1686,7 +1686,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
80598 return skb->inner_transport_header - skb->inner_network_header;
80599 }
80600
80601-static inline int skb_network_offset(const struct sk_buff *skb)
80602+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
80603 {
80604 return skb_network_header(skb) - skb->data;
80605 }
80606@@ -1746,7 +1746,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
80607 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
80608 */
80609 #ifndef NET_SKB_PAD
80610-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
80611+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
80612 #endif
80613
80614 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
80615@@ -2345,7 +2345,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
80616 int *err);
80617 unsigned int datagram_poll(struct file *file, struct socket *sock,
80618 struct poll_table_struct *wait);
80619-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
80620+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
80621 struct iovec *to, int size);
80622 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
80623 struct iovec *iov);
80624@@ -2617,6 +2617,9 @@ static inline void nf_reset(struct sk_buff *skb)
80625 nf_bridge_put(skb->nf_bridge);
80626 skb->nf_bridge = NULL;
80627 #endif
80628+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
80629+ skb->nf_trace = 0;
80630+#endif
80631 }
80632
80633 static inline void nf_reset_trace(struct sk_buff *skb)
80634diff --git a/include/linux/slab.h b/include/linux/slab.h
80635index 1e2f4fe..df49ca6 100644
80636--- a/include/linux/slab.h
80637+++ b/include/linux/slab.h
80638@@ -14,15 +14,29 @@
80639 #include <linux/gfp.h>
80640 #include <linux/types.h>
80641 #include <linux/workqueue.h>
80642-
80643+#include <linux/err.h>
80644
80645 /*
80646 * Flags to pass to kmem_cache_create().
80647 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
80648 */
80649 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
80650+
80651+#ifdef CONFIG_PAX_USERCOPY_SLABS
80652+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
80653+#else
80654+#define SLAB_USERCOPY 0x00000000UL
80655+#endif
80656+
80657 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
80658 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
80659+
80660+#ifdef CONFIG_PAX_MEMORY_SANITIZE
80661+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
80662+#else
80663+#define SLAB_NO_SANITIZE 0x00000000UL
80664+#endif
80665+
80666 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
80667 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
80668 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
80669@@ -98,10 +112,13 @@
80670 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
80671 * Both make kfree a no-op.
80672 */
80673-#define ZERO_SIZE_PTR ((void *)16)
80674+#define ZERO_SIZE_PTR \
80675+({ \
80676+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
80677+ (void *)(-MAX_ERRNO-1L); \
80678+})
80679
80680-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
80681- (unsigned long)ZERO_SIZE_PTR)
80682+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
80683
80684 #include <linux/kmemleak.h>
80685
80686@@ -142,6 +159,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
80687 void kfree(const void *);
80688 void kzfree(const void *);
80689 size_t ksize(const void *);
80690+const char *check_heap_object(const void *ptr, unsigned long n);
80691+bool is_usercopy_object(const void *ptr);
80692
80693 /*
80694 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
80695@@ -174,7 +193,7 @@ struct kmem_cache {
80696 unsigned int align; /* Alignment as calculated */
80697 unsigned long flags; /* Active flags on the slab */
80698 const char *name; /* Slab name for sysfs */
80699- int refcount; /* Use counter */
80700+ atomic_t refcount; /* Use counter */
80701 void (*ctor)(void *); /* Called on object slot creation */
80702 struct list_head list; /* List of all slab caches on the system */
80703 };
80704@@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
80705 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
80706 #endif
80707
80708+#ifdef CONFIG_PAX_USERCOPY_SLABS
80709+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
80710+#endif
80711+
80712 /*
80713 * Figure out which kmalloc slab an allocation of a certain size
80714 * belongs to.
80715@@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
80716 * 2 = 120 .. 192 bytes
80717 * n = 2^(n-1) .. 2^n -1
80718 */
80719-static __always_inline int kmalloc_index(size_t size)
80720+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
80721 {
80722 if (!size)
80723 return 0;
80724@@ -299,11 +322,11 @@ static __always_inline int kmalloc_index(size_t size)
80725 }
80726 #endif /* !CONFIG_SLOB */
80727
80728-void *__kmalloc(size_t size, gfp_t flags);
80729+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
80730 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
80731
80732 #ifdef CONFIG_NUMA
80733-void *__kmalloc_node(size_t size, gfp_t flags, int node);
80734+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
80735 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
80736 #else
80737 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
80738diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
80739index 09bfffb..4fc80fb 100644
80740--- a/include/linux/slab_def.h
80741+++ b/include/linux/slab_def.h
80742@@ -36,7 +36,7 @@ struct kmem_cache {
80743 /* 4) cache creation/removal */
80744 const char *name;
80745 struct list_head list;
80746- int refcount;
80747+ atomic_t refcount;
80748 int object_size;
80749 int align;
80750
80751@@ -52,10 +52,14 @@ struct kmem_cache {
80752 unsigned long node_allocs;
80753 unsigned long node_frees;
80754 unsigned long node_overflow;
80755- atomic_t allochit;
80756- atomic_t allocmiss;
80757- atomic_t freehit;
80758- atomic_t freemiss;
80759+ atomic_unchecked_t allochit;
80760+ atomic_unchecked_t allocmiss;
80761+ atomic_unchecked_t freehit;
80762+ atomic_unchecked_t freemiss;
80763+#ifdef CONFIG_PAX_MEMORY_SANITIZE
80764+ atomic_unchecked_t sanitized;
80765+ atomic_unchecked_t not_sanitized;
80766+#endif
80767
80768 /*
80769 * If debugging is enabled, then the allocator can add additional
80770diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
80771index f56bfa9..8378a26 100644
80772--- a/include/linux/slub_def.h
80773+++ b/include/linux/slub_def.h
80774@@ -74,7 +74,7 @@ struct kmem_cache {
80775 struct kmem_cache_order_objects max;
80776 struct kmem_cache_order_objects min;
80777 gfp_t allocflags; /* gfp flags to use on each alloc */
80778- int refcount; /* Refcount for slab cache destroy */
80779+ atomic_t refcount; /* Refcount for slab cache destroy */
80780 void (*ctor)(void *);
80781 int inuse; /* Offset to metadata */
80782 int align; /* Alignment */
80783diff --git a/include/linux/smp.h b/include/linux/smp.h
80784index 5da22ee..71d8a28 100644
80785--- a/include/linux/smp.h
80786+++ b/include/linux/smp.h
80787@@ -176,7 +176,9 @@ static inline void kick_all_cpus_sync(void) { }
80788 #endif
80789
80790 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
80791+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
80792 #define put_cpu() preempt_enable()
80793+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
80794
80795 /*
80796 * Callback to arch code if there's nosmp or maxcpus=0 on the
80797diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
80798index 54f91d3..be2c379 100644
80799--- a/include/linux/sock_diag.h
80800+++ b/include/linux/sock_diag.h
80801@@ -11,7 +11,7 @@ struct sock;
80802 struct sock_diag_handler {
80803 __u8 family;
80804 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
80805-};
80806+} __do_const;
80807
80808 int sock_diag_register(const struct sock_diag_handler *h);
80809 void sock_diag_unregister(const struct sock_diag_handler *h);
80810diff --git a/include/linux/sonet.h b/include/linux/sonet.h
80811index 680f9a3..f13aeb0 100644
80812--- a/include/linux/sonet.h
80813+++ b/include/linux/sonet.h
80814@@ -7,7 +7,7 @@
80815 #include <uapi/linux/sonet.h>
80816
80817 struct k_sonet_stats {
80818-#define __HANDLE_ITEM(i) atomic_t i
80819+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80820 __SONET_ITEMS
80821 #undef __HANDLE_ITEM
80822 };
80823diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
80824index 07d8e53..dc934c9 100644
80825--- a/include/linux/sunrpc/addr.h
80826+++ b/include/linux/sunrpc/addr.h
80827@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
80828 {
80829 switch (sap->sa_family) {
80830 case AF_INET:
80831- return ntohs(((struct sockaddr_in *)sap)->sin_port);
80832+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
80833 case AF_INET6:
80834- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
80835+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
80836 }
80837 return 0;
80838 }
80839@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
80840 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
80841 const struct sockaddr *src)
80842 {
80843- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
80844+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
80845 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
80846
80847 dsin->sin_family = ssin->sin_family;
80848@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
80849 if (sa->sa_family != AF_INET6)
80850 return 0;
80851
80852- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
80853+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
80854 }
80855
80856 #endif /* _LINUX_SUNRPC_ADDR_H */
80857diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
80858index 8af2804..c7414ef 100644
80859--- a/include/linux/sunrpc/clnt.h
80860+++ b/include/linux/sunrpc/clnt.h
80861@@ -97,7 +97,7 @@ struct rpc_procinfo {
80862 unsigned int p_timer; /* Which RTT timer to use */
80863 u32 p_statidx; /* Which procedure to account */
80864 const char * p_name; /* name of procedure */
80865-};
80866+} __do_const;
80867
80868 #ifdef __KERNEL__
80869
80870diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
80871index 6eecfc2..7ada79d 100644
80872--- a/include/linux/sunrpc/svc.h
80873+++ b/include/linux/sunrpc/svc.h
80874@@ -410,7 +410,7 @@ struct svc_procedure {
80875 unsigned int pc_count; /* call count */
80876 unsigned int pc_cachetype; /* cache info (NFS) */
80877 unsigned int pc_xdrressize; /* maximum size of XDR reply */
80878-};
80879+} __do_const;
80880
80881 /*
80882 * Function prototypes.
80883diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
80884index 0b8e3e6..33e0a01 100644
80885--- a/include/linux/sunrpc/svc_rdma.h
80886+++ b/include/linux/sunrpc/svc_rdma.h
80887@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
80888 extern unsigned int svcrdma_max_requests;
80889 extern unsigned int svcrdma_max_req_size;
80890
80891-extern atomic_t rdma_stat_recv;
80892-extern atomic_t rdma_stat_read;
80893-extern atomic_t rdma_stat_write;
80894-extern atomic_t rdma_stat_sq_starve;
80895-extern atomic_t rdma_stat_rq_starve;
80896-extern atomic_t rdma_stat_rq_poll;
80897-extern atomic_t rdma_stat_rq_prod;
80898-extern atomic_t rdma_stat_sq_poll;
80899-extern atomic_t rdma_stat_sq_prod;
80900+extern atomic_unchecked_t rdma_stat_recv;
80901+extern atomic_unchecked_t rdma_stat_read;
80902+extern atomic_unchecked_t rdma_stat_write;
80903+extern atomic_unchecked_t rdma_stat_sq_starve;
80904+extern atomic_unchecked_t rdma_stat_rq_starve;
80905+extern atomic_unchecked_t rdma_stat_rq_poll;
80906+extern atomic_unchecked_t rdma_stat_rq_prod;
80907+extern atomic_unchecked_t rdma_stat_sq_poll;
80908+extern atomic_unchecked_t rdma_stat_sq_prod;
80909
80910 #define RPCRDMA_VERSION 1
80911
80912diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
80913index 8d71d65..f79586e 100644
80914--- a/include/linux/sunrpc/svcauth.h
80915+++ b/include/linux/sunrpc/svcauth.h
80916@@ -120,7 +120,7 @@ struct auth_ops {
80917 int (*release)(struct svc_rqst *rq);
80918 void (*domain_release)(struct auth_domain *);
80919 int (*set_client)(struct svc_rqst *rq);
80920-};
80921+} __do_const;
80922
80923 #define SVC_GARBAGE 1
80924 #define SVC_SYSERR 2
80925diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
80926index a5ffd32..0935dea 100644
80927--- a/include/linux/swiotlb.h
80928+++ b/include/linux/swiotlb.h
80929@@ -60,7 +60,8 @@ extern void
80930
80931 extern void
80932 swiotlb_free_coherent(struct device *hwdev, size_t size,
80933- void *vaddr, dma_addr_t dma_handle);
80934+ void *vaddr, dma_addr_t dma_handle,
80935+ struct dma_attrs *attrs);
80936
80937 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
80938 unsigned long offset, size_t size,
80939diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
80940index 94273bb..c2e05fc 100644
80941--- a/include/linux/syscalls.h
80942+++ b/include/linux/syscalls.h
80943@@ -97,8 +97,14 @@ struct sigaltstack;
80944 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
80945
80946 #define __SC_DECL(t, a) t a
80947+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
80948 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
80949-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
80950+#define __SC_LONG(t, a) __typeof( \
80951+ __builtin_choose_expr( \
80952+ sizeof(t) > sizeof(int), \
80953+ (t) 0, \
80954+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
80955+ )) a
80956 #define __SC_CAST(t, a) (t) a
80957 #define __SC_ARGS(t, a) a
80958 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
80959@@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
80960 asmlinkage long sys_fsync(unsigned int fd);
80961 asmlinkage long sys_fdatasync(unsigned int fd);
80962 asmlinkage long sys_bdflush(int func, long data);
80963-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
80964- char __user *type, unsigned long flags,
80965+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
80966+ const char __user *type, unsigned long flags,
80967 void __user *data);
80968-asmlinkage long sys_umount(char __user *name, int flags);
80969-asmlinkage long sys_oldumount(char __user *name);
80970+asmlinkage long sys_umount(const char __user *name, int flags);
80971+asmlinkage long sys_oldumount(const char __user *name);
80972 asmlinkage long sys_truncate(const char __user *path, long length);
80973 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
80974 asmlinkage long sys_stat(const char __user *filename,
80975@@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
80976 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
80977 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
80978 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
80979- struct sockaddr __user *, int);
80980+ struct sockaddr __user *, int) __intentional_overflow(0);
80981 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
80982 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
80983 unsigned int vlen, unsigned flags);
80984diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
80985index 27b3b0b..e093dd9 100644
80986--- a/include/linux/syscore_ops.h
80987+++ b/include/linux/syscore_ops.h
80988@@ -16,7 +16,7 @@ struct syscore_ops {
80989 int (*suspend)(void);
80990 void (*resume)(void);
80991 void (*shutdown)(void);
80992-};
80993+} __do_const;
80994
80995 extern void register_syscore_ops(struct syscore_ops *ops);
80996 extern void unregister_syscore_ops(struct syscore_ops *ops);
80997diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
80998index 14a8ff2..fa95f3a 100644
80999--- a/include/linux/sysctl.h
81000+++ b/include/linux/sysctl.h
81001@@ -34,13 +34,13 @@ struct ctl_table_root;
81002 struct ctl_table_header;
81003 struct ctl_dir;
81004
81005-typedef struct ctl_table ctl_table;
81006-
81007 typedef int proc_handler (struct ctl_table *ctl, int write,
81008 void __user *buffer, size_t *lenp, loff_t *ppos);
81009
81010 extern int proc_dostring(struct ctl_table *, int,
81011 void __user *, size_t *, loff_t *);
81012+extern int proc_dostring_modpriv(struct ctl_table *, int,
81013+ void __user *, size_t *, loff_t *);
81014 extern int proc_dointvec(struct ctl_table *, int,
81015 void __user *, size_t *, loff_t *);
81016 extern int proc_dointvec_minmax(struct ctl_table *, int,
81017@@ -115,7 +115,9 @@ struct ctl_table
81018 struct ctl_table_poll *poll;
81019 void *extra1;
81020 void *extra2;
81021-};
81022+} __do_const __randomize_layout;
81023+typedef struct ctl_table __no_const ctl_table_no_const;
81024+typedef struct ctl_table ctl_table;
81025
81026 struct ctl_node {
81027 struct rb_node node;
81028diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
81029index 6695040..3d4192d 100644
81030--- a/include/linux/sysfs.h
81031+++ b/include/linux/sysfs.h
81032@@ -33,7 +33,8 @@ struct attribute {
81033 struct lock_class_key *key;
81034 struct lock_class_key skey;
81035 #endif
81036-};
81037+} __do_const;
81038+typedef struct attribute __no_const attribute_no_const;
81039
81040 /**
81041 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
81042@@ -62,7 +63,8 @@ struct attribute_group {
81043 struct attribute *, int);
81044 struct attribute **attrs;
81045 struct bin_attribute **bin_attrs;
81046-};
81047+} __do_const;
81048+typedef struct attribute_group __no_const attribute_group_no_const;
81049
81050 /**
81051 * Use these macros to make defining attributes easier. See include/linux/device.h
81052@@ -126,7 +128,8 @@ struct bin_attribute {
81053 char *, loff_t, size_t);
81054 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
81055 struct vm_area_struct *vma);
81056-};
81057+} __do_const;
81058+typedef struct bin_attribute __no_const bin_attribute_no_const;
81059
81060 /**
81061 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
81062diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
81063index 387fa7d..3fcde6b 100644
81064--- a/include/linux/sysrq.h
81065+++ b/include/linux/sysrq.h
81066@@ -16,6 +16,7 @@
81067
81068 #include <linux/errno.h>
81069 #include <linux/types.h>
81070+#include <linux/compiler.h>
81071
81072 /* Possible values of bitmask for enabling sysrq functions */
81073 /* 0x0001 is reserved for enable everything */
81074@@ -33,7 +34,7 @@ struct sysrq_key_op {
81075 char *help_msg;
81076 char *action_msg;
81077 int enable_mask;
81078-};
81079+} __do_const;
81080
81081 #ifdef CONFIG_MAGIC_SYSRQ
81082
81083diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
81084index fddbe20..0312de8 100644
81085--- a/include/linux/thread_info.h
81086+++ b/include/linux/thread_info.h
81087@@ -161,6 +161,15 @@ static inline bool test_and_clear_restore_sigmask(void)
81088 #error "no set_restore_sigmask() provided and default one won't work"
81089 #endif
81090
81091+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
81092+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
81093+{
81094+#ifndef CONFIG_PAX_USERCOPY_DEBUG
81095+ if (!__builtin_constant_p(n))
81096+#endif
81097+ __check_object_size(ptr, n, to_user);
81098+}
81099+
81100 #endif /* __KERNEL__ */
81101
81102 #endif /* _LINUX_THREAD_INFO_H */
81103diff --git a/include/linux/tty.h b/include/linux/tty.h
81104index 97d660e..6356755 100644
81105--- a/include/linux/tty.h
81106+++ b/include/linux/tty.h
81107@@ -196,7 +196,7 @@ struct tty_port {
81108 const struct tty_port_operations *ops; /* Port operations */
81109 spinlock_t lock; /* Lock protecting tty field */
81110 int blocked_open; /* Waiting to open */
81111- int count; /* Usage count */
81112+ atomic_t count; /* Usage count */
81113 wait_queue_head_t open_wait; /* Open waiters */
81114 wait_queue_head_t close_wait; /* Close waiters */
81115 wait_queue_head_t delta_msr_wait; /* Modem status change */
81116@@ -278,7 +278,7 @@ struct tty_struct {
81117 /* If the tty has a pending do_SAK, queue it here - akpm */
81118 struct work_struct SAK_work;
81119 struct tty_port *port;
81120-};
81121+} __randomize_layout;
81122
81123 /* Each of a tty's open files has private_data pointing to tty_file_private */
81124 struct tty_file_private {
81125@@ -545,7 +545,7 @@ extern int tty_port_open(struct tty_port *port,
81126 struct tty_struct *tty, struct file *filp);
81127 static inline int tty_port_users(struct tty_port *port)
81128 {
81129- return port->count + port->blocked_open;
81130+ return atomic_read(&port->count) + port->blocked_open;
81131 }
81132
81133 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
81134diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
81135index 756a609..f61242d 100644
81136--- a/include/linux/tty_driver.h
81137+++ b/include/linux/tty_driver.h
81138@@ -285,7 +285,7 @@ struct tty_operations {
81139 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
81140 #endif
81141 const struct file_operations *proc_fops;
81142-};
81143+} __do_const;
81144
81145 struct tty_driver {
81146 int magic; /* magic number for this structure */
81147@@ -319,7 +319,7 @@ struct tty_driver {
81148
81149 const struct tty_operations *ops;
81150 struct list_head tty_drivers;
81151-};
81152+} __randomize_layout;
81153
81154 extern struct list_head tty_drivers;
81155
81156diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
81157index f15c898..207b7d1 100644
81158--- a/include/linux/tty_ldisc.h
81159+++ b/include/linux/tty_ldisc.h
81160@@ -211,7 +211,7 @@ struct tty_ldisc_ops {
81161
81162 struct module *owner;
81163
81164- int refcount;
81165+ atomic_t refcount;
81166 };
81167
81168 struct tty_ldisc {
81169diff --git a/include/linux/types.h b/include/linux/types.h
81170index 4d118ba..c3ee9bf 100644
81171--- a/include/linux/types.h
81172+++ b/include/linux/types.h
81173@@ -176,10 +176,26 @@ typedef struct {
81174 int counter;
81175 } atomic_t;
81176
81177+#ifdef CONFIG_PAX_REFCOUNT
81178+typedef struct {
81179+ int counter;
81180+} atomic_unchecked_t;
81181+#else
81182+typedef atomic_t atomic_unchecked_t;
81183+#endif
81184+
81185 #ifdef CONFIG_64BIT
81186 typedef struct {
81187 long counter;
81188 } atomic64_t;
81189+
81190+#ifdef CONFIG_PAX_REFCOUNT
81191+typedef struct {
81192+ long counter;
81193+} atomic64_unchecked_t;
81194+#else
81195+typedef atomic64_t atomic64_unchecked_t;
81196+#endif
81197 #endif
81198
81199 struct list_head {
81200diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
81201index 9d8cf05..0ed74dd 100644
81202--- a/include/linux/uaccess.h
81203+++ b/include/linux/uaccess.h
81204@@ -72,11 +72,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
81205 long ret; \
81206 mm_segment_t old_fs = get_fs(); \
81207 \
81208- set_fs(KERNEL_DS); \
81209 pagefault_disable(); \
81210- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
81211- pagefault_enable(); \
81212+ set_fs(KERNEL_DS); \
81213+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
81214 set_fs(old_fs); \
81215+ pagefault_enable(); \
81216 ret; \
81217 })
81218
81219diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
81220index 8e522cbc..aa8572d 100644
81221--- a/include/linux/uidgid.h
81222+++ b/include/linux/uidgid.h
81223@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
81224
81225 #endif /* CONFIG_USER_NS */
81226
81227+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
81228+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
81229+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
81230+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
81231+
81232 #endif /* _LINUX_UIDGID_H */
81233diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
81234index 99c1b4d..562e6f3 100644
81235--- a/include/linux/unaligned/access_ok.h
81236+++ b/include/linux/unaligned/access_ok.h
81237@@ -4,34 +4,34 @@
81238 #include <linux/kernel.h>
81239 #include <asm/byteorder.h>
81240
81241-static inline u16 get_unaligned_le16(const void *p)
81242+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
81243 {
81244- return le16_to_cpup((__le16 *)p);
81245+ return le16_to_cpup((const __le16 *)p);
81246 }
81247
81248-static inline u32 get_unaligned_le32(const void *p)
81249+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
81250 {
81251- return le32_to_cpup((__le32 *)p);
81252+ return le32_to_cpup((const __le32 *)p);
81253 }
81254
81255-static inline u64 get_unaligned_le64(const void *p)
81256+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
81257 {
81258- return le64_to_cpup((__le64 *)p);
81259+ return le64_to_cpup((const __le64 *)p);
81260 }
81261
81262-static inline u16 get_unaligned_be16(const void *p)
81263+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
81264 {
81265- return be16_to_cpup((__be16 *)p);
81266+ return be16_to_cpup((const __be16 *)p);
81267 }
81268
81269-static inline u32 get_unaligned_be32(const void *p)
81270+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
81271 {
81272- return be32_to_cpup((__be32 *)p);
81273+ return be32_to_cpup((const __be32 *)p);
81274 }
81275
81276-static inline u64 get_unaligned_be64(const void *p)
81277+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
81278 {
81279- return be64_to_cpup((__be64 *)p);
81280+ return be64_to_cpup((const __be64 *)p);
81281 }
81282
81283 static inline void put_unaligned_le16(u16 val, void *p)
81284diff --git a/include/linux/usb.h b/include/linux/usb.h
81285index 512ab16..f53e1bf 100644
81286--- a/include/linux/usb.h
81287+++ b/include/linux/usb.h
81288@@ -563,7 +563,7 @@ struct usb_device {
81289 int maxchild;
81290
81291 u32 quirks;
81292- atomic_t urbnum;
81293+ atomic_unchecked_t urbnum;
81294
81295 unsigned long active_duration;
81296
81297@@ -1643,7 +1643,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
81298
81299 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
81300 __u8 request, __u8 requesttype, __u16 value, __u16 index,
81301- void *data, __u16 size, int timeout);
81302+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
81303 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
81304 void *data, int len, int *actual_length, int timeout);
81305 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
81306diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
81307index e452ba6..78f8e80 100644
81308--- a/include/linux/usb/renesas_usbhs.h
81309+++ b/include/linux/usb/renesas_usbhs.h
81310@@ -39,7 +39,7 @@ enum {
81311 */
81312 struct renesas_usbhs_driver_callback {
81313 int (*notify_hotplug)(struct platform_device *pdev);
81314-};
81315+} __no_const;
81316
81317 /*
81318 * callback functions for platform
81319diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
81320index 4836ba3..603f6ee 100644
81321--- a/include/linux/user_namespace.h
81322+++ b/include/linux/user_namespace.h
81323@@ -33,7 +33,7 @@ struct user_namespace {
81324 struct key *persistent_keyring_register;
81325 struct rw_semaphore persistent_keyring_register_sem;
81326 #endif
81327-};
81328+} __randomize_layout;
81329
81330 extern struct user_namespace init_user_ns;
81331
81332diff --git a/include/linux/utsname.h b/include/linux/utsname.h
81333index 239e277..22a5cf5 100644
81334--- a/include/linux/utsname.h
81335+++ b/include/linux/utsname.h
81336@@ -24,7 +24,7 @@ struct uts_namespace {
81337 struct new_utsname name;
81338 struct user_namespace *user_ns;
81339 unsigned int proc_inum;
81340-};
81341+} __randomize_layout;
81342 extern struct uts_namespace init_uts_ns;
81343
81344 #ifdef CONFIG_UTS_NS
81345diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
81346index 6f8fbcf..e2a0e61 100644
81347--- a/include/linux/vermagic.h
81348+++ b/include/linux/vermagic.h
81349@@ -25,9 +25,41 @@
81350 #define MODULE_ARCH_VERMAGIC ""
81351 #endif
81352
81353+#ifdef CONFIG_PAX_REFCOUNT
81354+#define MODULE_PAX_REFCOUNT "REFCOUNT "
81355+#else
81356+#define MODULE_PAX_REFCOUNT ""
81357+#endif
81358+
81359+#ifdef CONSTIFY_PLUGIN
81360+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
81361+#else
81362+#define MODULE_CONSTIFY_PLUGIN ""
81363+#endif
81364+
81365+#ifdef STACKLEAK_PLUGIN
81366+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
81367+#else
81368+#define MODULE_STACKLEAK_PLUGIN ""
81369+#endif
81370+
81371+#ifdef RANDSTRUCT_PLUGIN
81372+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
81373+#else
81374+#define MODULE_RANDSTRUCT_PLUGIN
81375+#endif
81376+
81377+#ifdef CONFIG_GRKERNSEC
81378+#define MODULE_GRSEC "GRSEC "
81379+#else
81380+#define MODULE_GRSEC ""
81381+#endif
81382+
81383 #define VERMAGIC_STRING \
81384 UTS_RELEASE " " \
81385 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
81386 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
81387- MODULE_ARCH_VERMAGIC
81388+ MODULE_ARCH_VERMAGIC \
81389+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
81390+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
81391
81392diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
81393index 502073a..a7de024 100644
81394--- a/include/linux/vga_switcheroo.h
81395+++ b/include/linux/vga_switcheroo.h
81396@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
81397
81398 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
81399
81400-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
81401-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
81402+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
81403+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
81404 #else
81405
81406 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
81407@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
81408
81409 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
81410
81411-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
81412-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
81413+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
81414+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
81415
81416 #endif
81417 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
81418diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
81419index 4b8a891..cb8df6e 100644
81420--- a/include/linux/vmalloc.h
81421+++ b/include/linux/vmalloc.h
81422@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
81423 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
81424 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
81425 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
81426+
81427+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81428+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
81429+#endif
81430+
81431 /* bits [20..32] reserved for arch specific ioremap internals */
81432
81433 /*
81434@@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
81435
81436 /* for /dev/kmem */
81437 extern long vread(char *buf, char *addr, unsigned long count);
81438-extern long vwrite(char *buf, char *addr, unsigned long count);
81439+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
81440
81441 /*
81442 * Internals. Dont't use..
81443diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
81444index e4b9480..5a5f65a 100644
81445--- a/include/linux/vmstat.h
81446+++ b/include/linux/vmstat.h
81447@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
81448 /*
81449 * Zone based page accounting with per cpu differentials.
81450 */
81451-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81452+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81453
81454 static inline void zone_page_state_add(long x, struct zone *zone,
81455 enum zone_stat_item item)
81456 {
81457- atomic_long_add(x, &zone->vm_stat[item]);
81458- atomic_long_add(x, &vm_stat[item]);
81459+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
81460+ atomic_long_add_unchecked(x, &vm_stat[item]);
81461 }
81462
81463-static inline unsigned long global_page_state(enum zone_stat_item item)
81464+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
81465 {
81466- long x = atomic_long_read(&vm_stat[item]);
81467+ long x = atomic_long_read_unchecked(&vm_stat[item]);
81468 #ifdef CONFIG_SMP
81469 if (x < 0)
81470 x = 0;
81471@@ -109,10 +109,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
81472 return x;
81473 }
81474
81475-static inline unsigned long zone_page_state(struct zone *zone,
81476+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
81477 enum zone_stat_item item)
81478 {
81479- long x = atomic_long_read(&zone->vm_stat[item]);
81480+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
81481 #ifdef CONFIG_SMP
81482 if (x < 0)
81483 x = 0;
81484@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
81485 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
81486 enum zone_stat_item item)
81487 {
81488- long x = atomic_long_read(&zone->vm_stat[item]);
81489+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
81490
81491 #ifdef CONFIG_SMP
81492 int cpu;
81493@@ -220,8 +220,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
81494
81495 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
81496 {
81497- atomic_long_inc(&zone->vm_stat[item]);
81498- atomic_long_inc(&vm_stat[item]);
81499+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
81500+ atomic_long_inc_unchecked(&vm_stat[item]);
81501 }
81502
81503 static inline void __inc_zone_page_state(struct page *page,
81504@@ -232,8 +232,8 @@ static inline void __inc_zone_page_state(struct page *page,
81505
81506 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
81507 {
81508- atomic_long_dec(&zone->vm_stat[item]);
81509- atomic_long_dec(&vm_stat[item]);
81510+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
81511+ atomic_long_dec_unchecked(&vm_stat[item]);
81512 }
81513
81514 static inline void __dec_zone_page_state(struct page *page,
81515diff --git a/include/linux/xattr.h b/include/linux/xattr.h
81516index 91b0a68..0e9adf6 100644
81517--- a/include/linux/xattr.h
81518+++ b/include/linux/xattr.h
81519@@ -28,7 +28,7 @@ struct xattr_handler {
81520 size_t size, int handler_flags);
81521 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
81522 size_t size, int flags, int handler_flags);
81523-};
81524+} __do_const;
81525
81526 struct xattr {
81527 const char *name;
81528@@ -37,6 +37,9 @@ struct xattr {
81529 };
81530
81531 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
81532+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
81533+ssize_t pax_getxattr(struct dentry *, void *, size_t);
81534+#endif
81535 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
81536 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
81537 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
81538diff --git a/include/linux/zlib.h b/include/linux/zlib.h
81539index 9c5a6b4..09c9438 100644
81540--- a/include/linux/zlib.h
81541+++ b/include/linux/zlib.h
81542@@ -31,6 +31,7 @@
81543 #define _ZLIB_H
81544
81545 #include <linux/zconf.h>
81546+#include <linux/compiler.h>
81547
81548 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
81549 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
81550@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
81551
81552 /* basic functions */
81553
81554-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
81555+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
81556 /*
81557 Returns the number of bytes that needs to be allocated for a per-
81558 stream workspace with the specified parameters. A pointer to this
81559diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
81560index c768c9f..bdcaa5a 100644
81561--- a/include/media/v4l2-dev.h
81562+++ b/include/media/v4l2-dev.h
81563@@ -76,7 +76,7 @@ struct v4l2_file_operations {
81564 int (*mmap) (struct file *, struct vm_area_struct *);
81565 int (*open) (struct file *);
81566 int (*release) (struct file *);
81567-};
81568+} __do_const;
81569
81570 /*
81571 * Newer version of video_device, handled by videodev2.c
81572diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
81573index c9b1593..a572459 100644
81574--- a/include/media/v4l2-device.h
81575+++ b/include/media/v4l2-device.h
81576@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
81577 this function returns 0. If the name ends with a digit (e.g. cx18),
81578 then the name will be set to cx18-0 since cx180 looks really odd. */
81579 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
81580- atomic_t *instance);
81581+ atomic_unchecked_t *instance);
81582
81583 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
81584 Since the parent disappears this ensures that v4l2_dev doesn't have an
81585diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
81586index 9a36d92..0aafe2a 100644
81587--- a/include/net/9p/transport.h
81588+++ b/include/net/9p/transport.h
81589@@ -60,7 +60,7 @@ struct p9_trans_module {
81590 int (*cancel) (struct p9_client *, struct p9_req_t *req);
81591 int (*zc_request)(struct p9_client *, struct p9_req_t *,
81592 char *, char *, int , int, int, int);
81593-};
81594+} __do_const;
81595
81596 void v9fs_register_trans(struct p9_trans_module *m);
81597 void v9fs_unregister_trans(struct p9_trans_module *m);
81598diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
81599index c853b16d..37fccb7 100644
81600--- a/include/net/bluetooth/l2cap.h
81601+++ b/include/net/bluetooth/l2cap.h
81602@@ -557,7 +557,7 @@ struct l2cap_ops {
81603 long (*get_sndtimeo) (struct l2cap_chan *chan);
81604 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
81605 unsigned long len, int nb);
81606-};
81607+} __do_const;
81608
81609 struct l2cap_conn {
81610 struct hci_conn *hcon;
81611diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
81612index f2ae33d..c457cf0 100644
81613--- a/include/net/caif/cfctrl.h
81614+++ b/include/net/caif/cfctrl.h
81615@@ -52,7 +52,7 @@ struct cfctrl_rsp {
81616 void (*radioset_rsp)(void);
81617 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
81618 struct cflayer *client_layer);
81619-};
81620+} __no_const;
81621
81622 /* Link Setup Parameters for CAIF-Links. */
81623 struct cfctrl_link_param {
81624@@ -101,8 +101,8 @@ struct cfctrl_request_info {
81625 struct cfctrl {
81626 struct cfsrvl serv;
81627 struct cfctrl_rsp res;
81628- atomic_t req_seq_no;
81629- atomic_t rsp_seq_no;
81630+ atomic_unchecked_t req_seq_no;
81631+ atomic_unchecked_t rsp_seq_no;
81632 struct list_head list;
81633 /* Protects from simultaneous access to first_req list */
81634 spinlock_t info_list_lock;
81635diff --git a/include/net/flow.h b/include/net/flow.h
81636index 65ce471..b7bbe9b 100644
81637--- a/include/net/flow.h
81638+++ b/include/net/flow.h
81639@@ -222,6 +222,6 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
81640
81641 void flow_cache_flush(void);
81642 void flow_cache_flush_deferred(void);
81643-extern atomic_t flow_cache_genid;
81644+extern atomic_unchecked_t flow_cache_genid;
81645
81646 #endif
81647diff --git a/include/net/genetlink.h b/include/net/genetlink.h
81648index 1b177ed..a24a138 100644
81649--- a/include/net/genetlink.h
81650+++ b/include/net/genetlink.h
81651@@ -118,7 +118,7 @@ struct genl_ops {
81652 u8 cmd;
81653 u8 internal_flags;
81654 u8 flags;
81655-};
81656+} __do_const;
81657
81658 int __genl_register_family(struct genl_family *family);
81659
81660diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
81661index 734d9b5..48a9a4b 100644
81662--- a/include/net/gro_cells.h
81663+++ b/include/net/gro_cells.h
81664@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
81665 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
81666
81667 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
81668- atomic_long_inc(&dev->rx_dropped);
81669+ atomic_long_inc_unchecked(&dev->rx_dropped);
81670 kfree_skb(skb);
81671 return;
81672 }
81673diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
81674index c55aeed..b3393f4 100644
81675--- a/include/net/inet_connection_sock.h
81676+++ b/include/net/inet_connection_sock.h
81677@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
81678 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
81679 int (*bind_conflict)(const struct sock *sk,
81680 const struct inet_bind_bucket *tb, bool relax);
81681-};
81682+} __do_const;
81683
81684 /** inet_connection_sock - INET connection oriented sock
81685 *
81686diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
81687index f4e127a..c3d5e9c 100644
81688--- a/include/net/inetpeer.h
81689+++ b/include/net/inetpeer.h
81690@@ -47,8 +47,8 @@ struct inet_peer {
81691 */
81692 union {
81693 struct {
81694- atomic_t rid; /* Frag reception counter */
81695- atomic_t ip_id_count; /* IP ID for the next packet */
81696+ atomic_unchecked_t rid; /* Frag reception counter */
81697+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
81698 };
81699 struct rcu_head rcu;
81700 struct inet_peer *gc_next;
81701@@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
81702 /* can be called with or without local BH being disabled */
81703 static inline int inet_getid(struct inet_peer *p, int more)
81704 {
81705- int old, new;
81706+ int id;
81707 more++;
81708 inet_peer_refcheck(p);
81709- do {
81710- old = atomic_read(&p->ip_id_count);
81711- new = old + more;
81712- if (!new)
81713- new = 1;
81714- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
81715- return new;
81716+ id = atomic_add_return_unchecked(more, &p->ip_id_count);
81717+ if (!id)
81718+ id = atomic_inc_return_unchecked(&p->ip_id_count);
81719+ return id;
81720 }
81721
81722 #endif /* _NET_INETPEER_H */
81723diff --git a/include/net/ip.h b/include/net/ip.h
81724index 5a25f36..2e73203 100644
81725--- a/include/net/ip.h
81726+++ b/include/net/ip.h
81727@@ -219,7 +219,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
81728
81729 void inet_get_local_port_range(struct net *net, int *low, int *high);
81730
81731-extern unsigned long *sysctl_local_reserved_ports;
81732+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
81733 static inline int inet_is_reserved_local_port(int port)
81734 {
81735 return test_bit(port, sysctl_local_reserved_ports);
81736diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
81737index 9922093..a1755d6 100644
81738--- a/include/net/ip_fib.h
81739+++ b/include/net/ip_fib.h
81740@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
81741
81742 #define FIB_RES_SADDR(net, res) \
81743 ((FIB_RES_NH(res).nh_saddr_genid == \
81744- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
81745+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
81746 FIB_RES_NH(res).nh_saddr : \
81747 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
81748 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
81749diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
81750index 5679d92..2e7a690 100644
81751--- a/include/net/ip_vs.h
81752+++ b/include/net/ip_vs.h
81753@@ -558,7 +558,7 @@ struct ip_vs_conn {
81754 struct ip_vs_conn *control; /* Master control connection */
81755 atomic_t n_control; /* Number of controlled ones */
81756 struct ip_vs_dest *dest; /* real server */
81757- atomic_t in_pkts; /* incoming packet counter */
81758+ atomic_unchecked_t in_pkts; /* incoming packet counter */
81759
81760 /* packet transmitter for different forwarding methods. If it
81761 mangles the packet, it must return NF_DROP or better NF_STOLEN,
81762@@ -705,7 +705,7 @@ struct ip_vs_dest {
81763 __be16 port; /* port number of the server */
81764 union nf_inet_addr addr; /* IP address of the server */
81765 volatile unsigned int flags; /* dest status flags */
81766- atomic_t conn_flags; /* flags to copy to conn */
81767+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
81768 atomic_t weight; /* server weight */
81769
81770 atomic_t refcnt; /* reference counter */
81771@@ -960,11 +960,11 @@ struct netns_ipvs {
81772 /* ip_vs_lblc */
81773 int sysctl_lblc_expiration;
81774 struct ctl_table_header *lblc_ctl_header;
81775- struct ctl_table *lblc_ctl_table;
81776+ ctl_table_no_const *lblc_ctl_table;
81777 /* ip_vs_lblcr */
81778 int sysctl_lblcr_expiration;
81779 struct ctl_table_header *lblcr_ctl_header;
81780- struct ctl_table *lblcr_ctl_table;
81781+ ctl_table_no_const *lblcr_ctl_table;
81782 /* ip_vs_est */
81783 struct list_head est_list; /* estimator list */
81784 spinlock_t est_lock;
81785diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
81786index 0224402..dafaf94a 100644
81787--- a/include/net/irda/ircomm_tty.h
81788+++ b/include/net/irda/ircomm_tty.h
81789@@ -35,6 +35,7 @@
81790 #include <linux/termios.h>
81791 #include <linux/timer.h>
81792 #include <linux/tty.h> /* struct tty_struct */
81793+#include <asm/local.h>
81794
81795 #include <net/irda/irias_object.h>
81796 #include <net/irda/ircomm_core.h>
81797diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
81798index 714cc9a..ea05f3e 100644
81799--- a/include/net/iucv/af_iucv.h
81800+++ b/include/net/iucv/af_iucv.h
81801@@ -149,7 +149,7 @@ struct iucv_skb_cb {
81802 struct iucv_sock_list {
81803 struct hlist_head head;
81804 rwlock_t lock;
81805- atomic_t autobind_name;
81806+ atomic_unchecked_t autobind_name;
81807 };
81808
81809 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
81810diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
81811index f3be818..bf46196 100644
81812--- a/include/net/llc_c_ac.h
81813+++ b/include/net/llc_c_ac.h
81814@@ -87,7 +87,7 @@
81815 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
81816 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
81817
81818-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
81819+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
81820
81821 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
81822 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
81823diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
81824index 3948cf1..83b28c4 100644
81825--- a/include/net/llc_c_ev.h
81826+++ b/include/net/llc_c_ev.h
81827@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
81828 return (struct llc_conn_state_ev *)skb->cb;
81829 }
81830
81831-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
81832-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
81833+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
81834+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
81835
81836 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
81837 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
81838diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
81839index 0e79cfb..f46db31 100644
81840--- a/include/net/llc_c_st.h
81841+++ b/include/net/llc_c_st.h
81842@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
81843 u8 next_state;
81844 llc_conn_ev_qfyr_t *ev_qualifiers;
81845 llc_conn_action_t *ev_actions;
81846-};
81847+} __do_const;
81848
81849 struct llc_conn_state {
81850 u8 current_state;
81851diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
81852index a61b98c..aade1eb 100644
81853--- a/include/net/llc_s_ac.h
81854+++ b/include/net/llc_s_ac.h
81855@@ -23,7 +23,7 @@
81856 #define SAP_ACT_TEST_IND 9
81857
81858 /* All action functions must look like this */
81859-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
81860+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
81861
81862 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
81863 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
81864diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
81865index 567c681..cd73ac0 100644
81866--- a/include/net/llc_s_st.h
81867+++ b/include/net/llc_s_st.h
81868@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
81869 llc_sap_ev_t ev;
81870 u8 next_state;
81871 llc_sap_action_t *ev_actions;
81872-};
81873+} __do_const;
81874
81875 struct llc_sap_state {
81876 u8 curr_state;
81877diff --git a/include/net/mac80211.h b/include/net/mac80211.h
81878index 7ceed99..d3ffaa2 100644
81879--- a/include/net/mac80211.h
81880+++ b/include/net/mac80211.h
81881@@ -4407,7 +4407,7 @@ struct rate_control_ops {
81882 void (*add_sta_debugfs)(void *priv, void *priv_sta,
81883 struct dentry *dir);
81884 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
81885-};
81886+} __do_const;
81887
81888 static inline int rate_supported(struct ieee80211_sta *sta,
81889 enum ieee80211_band band,
81890diff --git a/include/net/neighbour.h b/include/net/neighbour.h
81891index 536501a..74ad02bc 100644
81892--- a/include/net/neighbour.h
81893+++ b/include/net/neighbour.h
81894@@ -123,7 +123,7 @@ struct neigh_ops {
81895 void (*error_report)(struct neighbour *, struct sk_buff *);
81896 int (*output)(struct neighbour *, struct sk_buff *);
81897 int (*connected_output)(struct neighbour *, struct sk_buff *);
81898-};
81899+} __do_const;
81900
81901 struct pneigh_entry {
81902 struct pneigh_entry *next;
81903@@ -178,7 +178,7 @@ struct neigh_table {
81904 struct neigh_statistics __percpu *stats;
81905 struct neigh_hash_table __rcu *nht;
81906 struct pneigh_entry **phash_buckets;
81907-};
81908+} __randomize_layout;
81909
81910 #define NEIGH_PRIV_ALIGN sizeof(long long)
81911 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
81912diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
81913index da68c9a..c4a0720 100644
81914--- a/include/net/net_namespace.h
81915+++ b/include/net/net_namespace.h
81916@@ -124,8 +124,8 @@ struct net {
81917 struct netns_ipvs *ipvs;
81918 #endif
81919 struct sock *diag_nlsk;
81920- atomic_t fnhe_genid;
81921-};
81922+ atomic_unchecked_t fnhe_genid;
81923+} __randomize_layout;
81924
81925 /*
81926 * ifindex generation is per-net namespace, and loopback is
81927@@ -281,7 +281,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
81928 #define __net_init __init
81929 #define __net_exit __exit_refok
81930 #define __net_initdata __initdata
81931+#ifdef CONSTIFY_PLUGIN
81932 #define __net_initconst __initconst
81933+#else
81934+#define __net_initconst __initdata
81935+#endif
81936 #endif
81937
81938 struct pernet_operations {
81939@@ -291,7 +295,7 @@ struct pernet_operations {
81940 void (*exit_batch)(struct list_head *net_exit_list);
81941 int *id;
81942 size_t size;
81943-};
81944+} __do_const;
81945
81946 /*
81947 * Use these carefully. If you implement a network device and it
81948@@ -339,23 +343,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
81949
81950 static inline int rt_genid_ipv4(struct net *net)
81951 {
81952- return atomic_read(&net->ipv4.rt_genid);
81953+ return atomic_read_unchecked(&net->ipv4.rt_genid);
81954 }
81955
81956 static inline void rt_genid_bump_ipv4(struct net *net)
81957 {
81958- atomic_inc(&net->ipv4.rt_genid);
81959+ atomic_inc_unchecked(&net->ipv4.rt_genid);
81960 }
81961
81962 #if IS_ENABLED(CONFIG_IPV6)
81963 static inline int rt_genid_ipv6(struct net *net)
81964 {
81965- return atomic_read(&net->ipv6.rt_genid);
81966+ return atomic_read_unchecked(&net->ipv6.rt_genid);
81967 }
81968
81969 static inline void rt_genid_bump_ipv6(struct net *net)
81970 {
81971- atomic_inc(&net->ipv6.rt_genid);
81972+ atomic_inc_unchecked(&net->ipv6.rt_genid);
81973 }
81974 #else
81975 static inline int rt_genid_ipv6(struct net *net)
81976@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
81977
81978 static inline int fnhe_genid(struct net *net)
81979 {
81980- return atomic_read(&net->fnhe_genid);
81981+ return atomic_read_unchecked(&net->fnhe_genid);
81982 }
81983
81984 static inline void fnhe_genid_bump(struct net *net)
81985 {
81986- atomic_inc(&net->fnhe_genid);
81987+ atomic_inc_unchecked(&net->fnhe_genid);
81988 }
81989
81990 #endif /* __NET_NET_NAMESPACE_H */
81991diff --git a/include/net/netdma.h b/include/net/netdma.h
81992index 8ba8ce2..99b7fff 100644
81993--- a/include/net/netdma.h
81994+++ b/include/net/netdma.h
81995@@ -24,7 +24,7 @@
81996 #include <linux/dmaengine.h>
81997 #include <linux/skbuff.h>
81998
81999-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82000+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82001 struct sk_buff *skb, int offset, struct iovec *to,
82002 size_t len, struct dma_pinned_list *pinned_list);
82003
82004diff --git a/include/net/netlink.h b/include/net/netlink.h
82005index 2b47eaa..6d5bcc2 100644
82006--- a/include/net/netlink.h
82007+++ b/include/net/netlink.h
82008@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
82009 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
82010 {
82011 if (mark)
82012- skb_trim(skb, (unsigned char *) mark - skb->data);
82013+ skb_trim(skb, (const unsigned char *) mark - skb->data);
82014 }
82015
82016 /**
82017diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
82018index c9c0c53..53f24c3 100644
82019--- a/include/net/netns/conntrack.h
82020+++ b/include/net/netns/conntrack.h
82021@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
82022 struct nf_proto_net {
82023 #ifdef CONFIG_SYSCTL
82024 struct ctl_table_header *ctl_table_header;
82025- struct ctl_table *ctl_table;
82026+ ctl_table_no_const *ctl_table;
82027 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
82028 struct ctl_table_header *ctl_compat_header;
82029- struct ctl_table *ctl_compat_table;
82030+ ctl_table_no_const *ctl_compat_table;
82031 #endif
82032 #endif
82033 unsigned int users;
82034@@ -58,7 +58,7 @@ struct nf_ip_net {
82035 struct nf_icmp_net icmpv6;
82036 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
82037 struct ctl_table_header *ctl_table_header;
82038- struct ctl_table *ctl_table;
82039+ ctl_table_no_const *ctl_table;
82040 #endif
82041 };
82042
82043diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
82044index ee520cb..9a0fd88 100644
82045--- a/include/net/netns/ipv4.h
82046+++ b/include/net/netns/ipv4.h
82047@@ -72,7 +72,7 @@ struct netns_ipv4 {
82048
82049 kgid_t sysctl_ping_group_range[2];
82050
82051- atomic_t dev_addr_genid;
82052+ atomic_unchecked_t dev_addr_genid;
82053
82054 #ifdef CONFIG_IP_MROUTE
82055 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
82056@@ -82,6 +82,6 @@ struct netns_ipv4 {
82057 struct fib_rules_ops *mr_rules_ops;
82058 #endif
82059 #endif
82060- atomic_t rt_genid;
82061+ atomic_unchecked_t rt_genid;
82062 };
82063 #endif
82064diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
82065index 0fb2401..477d81c 100644
82066--- a/include/net/netns/ipv6.h
82067+++ b/include/net/netns/ipv6.h
82068@@ -71,8 +71,8 @@ struct netns_ipv6 {
82069 struct fib_rules_ops *mr6_rules_ops;
82070 #endif
82071 #endif
82072- atomic_t dev_addr_genid;
82073- atomic_t rt_genid;
82074+ atomic_unchecked_t dev_addr_genid;
82075+ atomic_unchecked_t rt_genid;
82076 };
82077
82078 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
82079diff --git a/include/net/ping.h b/include/net/ping.h
82080index 90f4841..74446a8 100644
82081--- a/include/net/ping.h
82082+++ b/include/net/ping.h
82083@@ -56,7 +56,7 @@ struct ping_iter_state {
82084 extern struct proto ping_prot;
82085 extern struct ping_table ping_table;
82086 #if IS_ENABLED(CONFIG_IPV6)
82087-extern struct pingv6_ops pingv6_ops;
82088+extern struct pingv6_ops *pingv6_ops;
82089 #endif
82090
82091 struct pingfakehdr {
82092diff --git a/include/net/protocol.h b/include/net/protocol.h
82093index fbf7676..a5e21c3 100644
82094--- a/include/net/protocol.h
82095+++ b/include/net/protocol.h
82096@@ -44,7 +44,7 @@ struct net_protocol {
82097 void (*err_handler)(struct sk_buff *skb, u32 info);
82098 unsigned int no_policy:1,
82099 netns_ok:1;
82100-};
82101+} __do_const;
82102
82103 #if IS_ENABLED(CONFIG_IPV6)
82104 struct inet6_protocol {
82105@@ -57,7 +57,7 @@ struct inet6_protocol {
82106 u8 type, u8 code, int offset,
82107 __be32 info);
82108 unsigned int flags; /* INET6_PROTO_xxx */
82109-};
82110+} __do_const;
82111
82112 #define INET6_PROTO_NOPOLICY 0x1
82113 #define INET6_PROTO_FINAL 0x2
82114diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
82115index bb13a18..e734116 100644
82116--- a/include/net/rtnetlink.h
82117+++ b/include/net/rtnetlink.h
82118@@ -79,7 +79,7 @@ struct rtnl_link_ops {
82119 const struct net_device *dev);
82120 unsigned int (*get_num_tx_queues)(void);
82121 unsigned int (*get_num_rx_queues)(void);
82122-};
82123+} __do_const;
82124
82125 int __rtnl_link_register(struct rtnl_link_ops *ops);
82126 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
82127diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
82128index 6bd44fe..96f364e 100644
82129--- a/include/net/sctp/checksum.h
82130+++ b/include/net/sctp/checksum.h
82131@@ -62,8 +62,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
82132 unsigned int offset)
82133 {
82134 struct sctphdr *sh = sctp_hdr(skb);
82135- __le32 ret, old = sh->checksum;
82136- const struct skb_checksum_ops ops = {
82137+ __le32 ret, old = sh->checksum;
82138+ static const struct skb_checksum_ops ops = {
82139 .update = sctp_csum_update,
82140 .combine = sctp_csum_combine,
82141 };
82142diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
82143index 4ef75af..5aa073a 100644
82144--- a/include/net/sctp/sm.h
82145+++ b/include/net/sctp/sm.h
82146@@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
82147 typedef struct {
82148 sctp_state_fn_t *fn;
82149 const char *name;
82150-} sctp_sm_table_entry_t;
82151+} __do_const sctp_sm_table_entry_t;
82152
82153 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
82154 * currently in use.
82155@@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
82156 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
82157
82158 /* Extern declarations for major data structures. */
82159-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
82160+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
82161
82162
82163 /* Get the size of a DATA chunk payload. */
82164diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
82165index 0a248b3..4dcbe5c 100644
82166--- a/include/net/sctp/structs.h
82167+++ b/include/net/sctp/structs.h
82168@@ -508,7 +508,7 @@ struct sctp_pf {
82169 struct sctp_association *asoc);
82170 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
82171 struct sctp_af *af;
82172-};
82173+} __do_const;
82174
82175
82176 /* Structure to track chunk fragments that have been acked, but peer
82177diff --git a/include/net/sock.h b/include/net/sock.h
82178index 2ef3c3e..e02013e 100644
82179--- a/include/net/sock.h
82180+++ b/include/net/sock.h
82181@@ -348,7 +348,7 @@ struct sock {
82182 unsigned int sk_napi_id;
82183 unsigned int sk_ll_usec;
82184 #endif
82185- atomic_t sk_drops;
82186+ atomic_unchecked_t sk_drops;
82187 int sk_rcvbuf;
82188
82189 struct sk_filter __rcu *sk_filter;
82190@@ -1209,7 +1209,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
82191 return ret >> PAGE_SHIFT;
82192 }
82193
82194-static inline long
82195+static inline long __intentional_overflow(-1)
82196 sk_memory_allocated(const struct sock *sk)
82197 {
82198 struct proto *prot = sk->sk_prot;
82199@@ -1813,7 +1813,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
82200 }
82201
82202 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
82203- char __user *from, char *to,
82204+ char __user *from, unsigned char *to,
82205 int copy, int offset)
82206 {
82207 if (skb->ip_summed == CHECKSUM_NONE) {
82208@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
82209 }
82210 }
82211
82212-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
82213+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
82214
82215 /**
82216 * sk_page_frag - return an appropriate page_frag
82217diff --git a/include/net/tcp.h b/include/net/tcp.h
82218index 70e55d2..c5d8d53 100644
82219--- a/include/net/tcp.h
82220+++ b/include/net/tcp.h
82221@@ -540,7 +540,7 @@ void tcp_retransmit_timer(struct sock *sk);
82222 void tcp_xmit_retransmit_queue(struct sock *);
82223 void tcp_simple_retransmit(struct sock *);
82224 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
82225-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
82226+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
82227
82228 void tcp_send_probe0(struct sock *);
82229 void tcp_send_partial(struct sock *);
82230@@ -711,8 +711,8 @@ struct tcp_skb_cb {
82231 struct inet6_skb_parm h6;
82232 #endif
82233 } header; /* For incoming frames */
82234- __u32 seq; /* Starting sequence number */
82235- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
82236+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
82237+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
82238 __u32 when; /* used to compute rtt's */
82239 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
82240
82241@@ -726,7 +726,7 @@ struct tcp_skb_cb {
82242
82243 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
82244 /* 1 byte hole */
82245- __u32 ack_seq; /* Sequence number ACK'd */
82246+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
82247 };
82248
82249 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
82250diff --git a/include/net/xfrm.h b/include/net/xfrm.h
82251index 6b82fdf..14d74d2 100644
82252--- a/include/net/xfrm.h
82253+++ b/include/net/xfrm.h
82254@@ -287,7 +287,6 @@ struct xfrm_dst;
82255 struct xfrm_policy_afinfo {
82256 unsigned short family;
82257 struct dst_ops *dst_ops;
82258- void (*garbage_collect)(struct net *net);
82259 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
82260 const xfrm_address_t *saddr,
82261 const xfrm_address_t *daddr);
82262@@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
82263 struct net_device *dev,
82264 const struct flowi *fl);
82265 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
82266-};
82267+} __do_const;
82268
82269 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
82270 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
82271@@ -344,7 +343,7 @@ struct xfrm_state_afinfo {
82272 int (*transport_finish)(struct sk_buff *skb,
82273 int async);
82274 void (*local_error)(struct sk_buff *skb, u32 mtu);
82275-};
82276+} __do_const;
82277
82278 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
82279 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
82280@@ -429,7 +428,7 @@ struct xfrm_mode {
82281 struct module *owner;
82282 unsigned int encap;
82283 int flags;
82284-};
82285+} __do_const;
82286
82287 /* Flags for xfrm_mode. */
82288 enum {
82289@@ -526,7 +525,7 @@ struct xfrm_policy {
82290 struct timer_list timer;
82291
82292 struct flow_cache_object flo;
82293- atomic_t genid;
82294+ atomic_unchecked_t genid;
82295 u32 priority;
82296 u32 index;
82297 struct xfrm_mark mark;
82298@@ -1166,6 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
82299 }
82300
82301 void xfrm_garbage_collect(struct net *net);
82302+void xfrm_garbage_collect_deferred(struct net *net);
82303
82304 #else
82305
82306@@ -1204,6 +1204,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
82307 static inline void xfrm_garbage_collect(struct net *net)
82308 {
82309 }
82310+static inline void xfrm_garbage_collect_deferred(struct net *net)
82311+{
82312+}
82313 #endif
82314
82315 static __inline__
82316diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
82317index 1017e0b..227aa4d 100644
82318--- a/include/rdma/iw_cm.h
82319+++ b/include/rdma/iw_cm.h
82320@@ -122,7 +122,7 @@ struct iw_cm_verbs {
82321 int backlog);
82322
82323 int (*destroy_listen)(struct iw_cm_id *cm_id);
82324-};
82325+} __no_const;
82326
82327 /**
82328 * iw_create_cm_id - Create an IW CM identifier.
82329diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
82330index 52beadf..598734c 100644
82331--- a/include/scsi/libfc.h
82332+++ b/include/scsi/libfc.h
82333@@ -771,6 +771,7 @@ struct libfc_function_template {
82334 */
82335 void (*disc_stop_final) (struct fc_lport *);
82336 };
82337+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
82338
82339 /**
82340 * struct fc_disc - Discovery context
82341@@ -875,7 +876,7 @@ struct fc_lport {
82342 struct fc_vport *vport;
82343
82344 /* Operational Information */
82345- struct libfc_function_template tt;
82346+ libfc_function_template_no_const tt;
82347 u8 link_up;
82348 u8 qfull;
82349 enum fc_lport_state state;
82350diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
82351index d65fbec..f80fef2 100644
82352--- a/include/scsi/scsi_device.h
82353+++ b/include/scsi/scsi_device.h
82354@@ -180,9 +180,9 @@ struct scsi_device {
82355 unsigned int max_device_blocked; /* what device_blocked counts down from */
82356 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
82357
82358- atomic_t iorequest_cnt;
82359- atomic_t iodone_cnt;
82360- atomic_t ioerr_cnt;
82361+ atomic_unchecked_t iorequest_cnt;
82362+ atomic_unchecked_t iodone_cnt;
82363+ atomic_unchecked_t ioerr_cnt;
82364
82365 struct device sdev_gendev,
82366 sdev_dev;
82367diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
82368index b797e8f..8e2c3aa 100644
82369--- a/include/scsi/scsi_transport_fc.h
82370+++ b/include/scsi/scsi_transport_fc.h
82371@@ -751,7 +751,8 @@ struct fc_function_template {
82372 unsigned long show_host_system_hostname:1;
82373
82374 unsigned long disable_target_scan:1;
82375-};
82376+} __do_const;
82377+typedef struct fc_function_template __no_const fc_function_template_no_const;
82378
82379
82380 /**
82381diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
82382index ae6c3b8..fd748ac 100644
82383--- a/include/sound/compress_driver.h
82384+++ b/include/sound/compress_driver.h
82385@@ -128,7 +128,7 @@ struct snd_compr_ops {
82386 struct snd_compr_caps *caps);
82387 int (*get_codec_caps) (struct snd_compr_stream *stream,
82388 struct snd_compr_codec_caps *codec);
82389-};
82390+} __no_const;
82391
82392 /**
82393 * struct snd_compr: Compressed device
82394diff --git a/include/sound/soc.h b/include/sound/soc.h
82395index 1f741cb..8cefc08 100644
82396--- a/include/sound/soc.h
82397+++ b/include/sound/soc.h
82398@@ -763,7 +763,7 @@ struct snd_soc_codec_driver {
82399 /* probe ordering - for components with runtime dependencies */
82400 int probe_order;
82401 int remove_order;
82402-};
82403+} __do_const;
82404
82405 /* SoC platform interface */
82406 struct snd_soc_platform_driver {
82407@@ -809,7 +809,7 @@ struct snd_soc_platform_driver {
82408 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
82409 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
82410 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
82411-};
82412+} __do_const;
82413
82414 struct snd_soc_platform {
82415 const char *name;
82416diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
82417index 321301c..2ae5cb0 100644
82418--- a/include/target/target_core_base.h
82419+++ b/include/target/target_core_base.h
82420@@ -687,7 +687,7 @@ struct se_device {
82421 atomic_long_t write_bytes;
82422 /* Active commands on this virtual SE device */
82423 atomic_t simple_cmds;
82424- atomic_t dev_ordered_id;
82425+ atomic_unchecked_t dev_ordered_id;
82426 atomic_t dev_ordered_sync;
82427 atomic_t dev_qf_count;
82428 int export_count;
82429diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
82430new file mode 100644
82431index 0000000..fb634b7
82432--- /dev/null
82433+++ b/include/trace/events/fs.h
82434@@ -0,0 +1,53 @@
82435+#undef TRACE_SYSTEM
82436+#define TRACE_SYSTEM fs
82437+
82438+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
82439+#define _TRACE_FS_H
82440+
82441+#include <linux/fs.h>
82442+#include <linux/tracepoint.h>
82443+
82444+TRACE_EVENT(do_sys_open,
82445+
82446+ TP_PROTO(const char *filename, int flags, int mode),
82447+
82448+ TP_ARGS(filename, flags, mode),
82449+
82450+ TP_STRUCT__entry(
82451+ __string( filename, filename )
82452+ __field( int, flags )
82453+ __field( int, mode )
82454+ ),
82455+
82456+ TP_fast_assign(
82457+ __assign_str(filename, filename);
82458+ __entry->flags = flags;
82459+ __entry->mode = mode;
82460+ ),
82461+
82462+ TP_printk("\"%s\" %x %o",
82463+ __get_str(filename), __entry->flags, __entry->mode)
82464+);
82465+
82466+TRACE_EVENT(open_exec,
82467+
82468+ TP_PROTO(const char *filename),
82469+
82470+ TP_ARGS(filename),
82471+
82472+ TP_STRUCT__entry(
82473+ __string( filename, filename )
82474+ ),
82475+
82476+ TP_fast_assign(
82477+ __assign_str(filename, filename);
82478+ ),
82479+
82480+ TP_printk("\"%s\"",
82481+ __get_str(filename))
82482+);
82483+
82484+#endif /* _TRACE_FS_H */
82485+
82486+/* This part must be outside protection */
82487+#include <trace/define_trace.h>
82488diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
82489index 1c09820..7f5ec79 100644
82490--- a/include/trace/events/irq.h
82491+++ b/include/trace/events/irq.h
82492@@ -36,7 +36,7 @@ struct softirq_action;
82493 */
82494 TRACE_EVENT(irq_handler_entry,
82495
82496- TP_PROTO(int irq, struct irqaction *action),
82497+ TP_PROTO(int irq, const struct irqaction *action),
82498
82499 TP_ARGS(irq, action),
82500
82501@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
82502 */
82503 TRACE_EVENT(irq_handler_exit,
82504
82505- TP_PROTO(int irq, struct irqaction *action, int ret),
82506+ TP_PROTO(int irq, const struct irqaction *action, int ret),
82507
82508 TP_ARGS(irq, action, ret),
82509
82510diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
82511index 7caf44c..23c6f27 100644
82512--- a/include/uapi/linux/a.out.h
82513+++ b/include/uapi/linux/a.out.h
82514@@ -39,6 +39,14 @@ enum machine_type {
82515 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
82516 };
82517
82518+/* Constants for the N_FLAGS field */
82519+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
82520+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
82521+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
82522+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
82523+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
82524+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
82525+
82526 #if !defined (N_MAGIC)
82527 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
82528 #endif
82529diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
82530index d876736..ccce5c0 100644
82531--- a/include/uapi/linux/byteorder/little_endian.h
82532+++ b/include/uapi/linux/byteorder/little_endian.h
82533@@ -42,51 +42,51 @@
82534
82535 static inline __le64 __cpu_to_le64p(const __u64 *p)
82536 {
82537- return (__force __le64)*p;
82538+ return (__force const __le64)*p;
82539 }
82540-static inline __u64 __le64_to_cpup(const __le64 *p)
82541+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
82542 {
82543- return (__force __u64)*p;
82544+ return (__force const __u64)*p;
82545 }
82546 static inline __le32 __cpu_to_le32p(const __u32 *p)
82547 {
82548- return (__force __le32)*p;
82549+ return (__force const __le32)*p;
82550 }
82551 static inline __u32 __le32_to_cpup(const __le32 *p)
82552 {
82553- return (__force __u32)*p;
82554+ return (__force const __u32)*p;
82555 }
82556 static inline __le16 __cpu_to_le16p(const __u16 *p)
82557 {
82558- return (__force __le16)*p;
82559+ return (__force const __le16)*p;
82560 }
82561 static inline __u16 __le16_to_cpup(const __le16 *p)
82562 {
82563- return (__force __u16)*p;
82564+ return (__force const __u16)*p;
82565 }
82566 static inline __be64 __cpu_to_be64p(const __u64 *p)
82567 {
82568- return (__force __be64)__swab64p(p);
82569+ return (__force const __be64)__swab64p(p);
82570 }
82571 static inline __u64 __be64_to_cpup(const __be64 *p)
82572 {
82573- return __swab64p((__u64 *)p);
82574+ return __swab64p((const __u64 *)p);
82575 }
82576 static inline __be32 __cpu_to_be32p(const __u32 *p)
82577 {
82578- return (__force __be32)__swab32p(p);
82579+ return (__force const __be32)__swab32p(p);
82580 }
82581-static inline __u32 __be32_to_cpup(const __be32 *p)
82582+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
82583 {
82584- return __swab32p((__u32 *)p);
82585+ return __swab32p((const __u32 *)p);
82586 }
82587 static inline __be16 __cpu_to_be16p(const __u16 *p)
82588 {
82589- return (__force __be16)__swab16p(p);
82590+ return (__force const __be16)__swab16p(p);
82591 }
82592 static inline __u16 __be16_to_cpup(const __be16 *p)
82593 {
82594- return __swab16p((__u16 *)p);
82595+ return __swab16p((const __u16 *)p);
82596 }
82597 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
82598 #define __le64_to_cpus(x) do { (void)(x); } while (0)
82599diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
82600index ef6103b..d4e65dd 100644
82601--- a/include/uapi/linux/elf.h
82602+++ b/include/uapi/linux/elf.h
82603@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
82604 #define PT_GNU_EH_FRAME 0x6474e550
82605
82606 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
82607+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
82608+
82609+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
82610+
82611+/* Constants for the e_flags field */
82612+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
82613+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
82614+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
82615+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
82616+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
82617+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
82618
82619 /*
82620 * Extended Numbering
82621@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
82622 #define DT_DEBUG 21
82623 #define DT_TEXTREL 22
82624 #define DT_JMPREL 23
82625+#define DT_FLAGS 30
82626+ #define DF_TEXTREL 0x00000004
82627 #define DT_ENCODING 32
82628 #define OLD_DT_LOOS 0x60000000
82629 #define DT_LOOS 0x6000000d
82630@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
82631 #define PF_W 0x2
82632 #define PF_X 0x1
82633
82634+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
82635+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
82636+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
82637+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
82638+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
82639+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
82640+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
82641+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
82642+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
82643+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
82644+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
82645+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
82646+
82647 typedef struct elf32_phdr{
82648 Elf32_Word p_type;
82649 Elf32_Off p_offset;
82650@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
82651 #define EI_OSABI 7
82652 #define EI_PAD 8
82653
82654+#define EI_PAX 14
82655+
82656 #define ELFMAG0 0x7f /* EI_MAG */
82657 #define ELFMAG1 'E'
82658 #define ELFMAG2 'L'
82659diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
82660index aa169c4..6a2771d 100644
82661--- a/include/uapi/linux/personality.h
82662+++ b/include/uapi/linux/personality.h
82663@@ -30,6 +30,7 @@ enum {
82664 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
82665 ADDR_NO_RANDOMIZE | \
82666 ADDR_COMPAT_LAYOUT | \
82667+ ADDR_LIMIT_3GB | \
82668 MMAP_PAGE_ZERO)
82669
82670 /*
82671diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
82672index 7530e74..e714828 100644
82673--- a/include/uapi/linux/screen_info.h
82674+++ b/include/uapi/linux/screen_info.h
82675@@ -43,7 +43,8 @@ struct screen_info {
82676 __u16 pages; /* 0x32 */
82677 __u16 vesa_attributes; /* 0x34 */
82678 __u32 capabilities; /* 0x36 */
82679- __u8 _reserved[6]; /* 0x3a */
82680+ __u16 vesapm_size; /* 0x3a */
82681+ __u8 _reserved[4]; /* 0x3c */
82682 } __attribute__((packed));
82683
82684 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
82685diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
82686index 0e011eb..82681b1 100644
82687--- a/include/uapi/linux/swab.h
82688+++ b/include/uapi/linux/swab.h
82689@@ -43,7 +43,7 @@
82690 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
82691 */
82692
82693-static inline __attribute_const__ __u16 __fswab16(__u16 val)
82694+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
82695 {
82696 #ifdef __HAVE_BUILTIN_BSWAP16__
82697 return __builtin_bswap16(val);
82698@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
82699 #endif
82700 }
82701
82702-static inline __attribute_const__ __u32 __fswab32(__u32 val)
82703+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
82704 {
82705 #ifdef __HAVE_BUILTIN_BSWAP32__
82706 return __builtin_bswap32(val);
82707@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
82708 #endif
82709 }
82710
82711-static inline __attribute_const__ __u64 __fswab64(__u64 val)
82712+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
82713 {
82714 #ifdef __HAVE_BUILTIN_BSWAP64__
82715 return __builtin_bswap64(val);
82716diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
82717index 6d67213..552fdd9 100644
82718--- a/include/uapi/linux/sysctl.h
82719+++ b/include/uapi/linux/sysctl.h
82720@@ -155,8 +155,6 @@ enum
82721 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
82722 };
82723
82724-
82725-
82726 /* CTL_VM names: */
82727 enum
82728 {
82729diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
82730index 437f1b0..0eeb38d 100644
82731--- a/include/uapi/linux/videodev2.h
82732+++ b/include/uapi/linux/videodev2.h
82733@@ -1227,7 +1227,7 @@ struct v4l2_ext_control {
82734 union {
82735 __s32 value;
82736 __s64 value64;
82737- char *string;
82738+ char __user *string;
82739 };
82740 } __attribute__ ((packed));
82741
82742diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
82743index e4629b9..6958086 100644
82744--- a/include/uapi/linux/xattr.h
82745+++ b/include/uapi/linux/xattr.h
82746@@ -63,5 +63,9 @@
82747 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
82748 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
82749
82750+/* User namespace */
82751+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
82752+#define XATTR_PAX_FLAGS_SUFFIX "flags"
82753+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
82754
82755 #endif /* _UAPI_LINUX_XATTR_H */
82756diff --git a/include/video/udlfb.h b/include/video/udlfb.h
82757index f9466fa..f4e2b81 100644
82758--- a/include/video/udlfb.h
82759+++ b/include/video/udlfb.h
82760@@ -53,10 +53,10 @@ struct dlfb_data {
82761 u32 pseudo_palette[256];
82762 int blank_mode; /*one of FB_BLANK_ */
82763 /* blit-only rendering path metrics, exposed through sysfs */
82764- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
82765- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
82766- atomic_t bytes_sent; /* to usb, after compression including overhead */
82767- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
82768+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
82769+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
82770+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
82771+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
82772 };
82773
82774 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
82775diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
82776index 30f5362..8ed8ac9 100644
82777--- a/include/video/uvesafb.h
82778+++ b/include/video/uvesafb.h
82779@@ -122,6 +122,7 @@ struct uvesafb_par {
82780 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
82781 u8 pmi_setpal; /* PMI for palette changes */
82782 u16 *pmi_base; /* protected mode interface location */
82783+ u8 *pmi_code; /* protected mode code location */
82784 void *pmi_start;
82785 void *pmi_pal;
82786 u8 *vbe_state_orig; /*
82787diff --git a/init/Kconfig b/init/Kconfig
82788index 4e5d96a..93cd8a1 100644
82789--- a/init/Kconfig
82790+++ b/init/Kconfig
82791@@ -1079,6 +1079,7 @@ endif # CGROUPS
82792
82793 config CHECKPOINT_RESTORE
82794 bool "Checkpoint/restore support" if EXPERT
82795+ depends on !GRKERNSEC
82796 default n
82797 help
82798 Enables additional kernel features in a sake of checkpoint/restore.
82799@@ -1550,7 +1551,7 @@ config SLUB_DEBUG
82800
82801 config COMPAT_BRK
82802 bool "Disable heap randomization"
82803- default y
82804+ default n
82805 help
82806 Randomizing heap placement makes heap exploits harder, but it
82807 also breaks ancient binaries (including anything libc5 based).
82808@@ -1838,7 +1839,7 @@ config INIT_ALL_POSSIBLE
82809 config STOP_MACHINE
82810 bool
82811 default y
82812- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
82813+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
82814 help
82815 Need stop_machine() primitive.
82816
82817diff --git a/init/Makefile b/init/Makefile
82818index 7bc47ee..6da2dc7 100644
82819--- a/init/Makefile
82820+++ b/init/Makefile
82821@@ -2,6 +2,9 @@
82822 # Makefile for the linux kernel.
82823 #
82824
82825+ccflags-y := $(GCC_PLUGINS_CFLAGS)
82826+asflags-y := $(GCC_PLUGINS_AFLAGS)
82827+
82828 obj-y := main.o version.o mounts.o
82829 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
82830 obj-y += noinitramfs.o
82831diff --git a/init/do_mounts.c b/init/do_mounts.c
82832index 8e5addc..c96ea61 100644
82833--- a/init/do_mounts.c
82834+++ b/init/do_mounts.c
82835@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
82836 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
82837 {
82838 struct super_block *s;
82839- int err = sys_mount(name, "/root", fs, flags, data);
82840+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
82841 if (err)
82842 return err;
82843
82844- sys_chdir("/root");
82845+ sys_chdir((const char __force_user *)"/root");
82846 s = current->fs->pwd.dentry->d_sb;
82847 ROOT_DEV = s->s_dev;
82848 printk(KERN_INFO
82849@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
82850 va_start(args, fmt);
82851 vsprintf(buf, fmt, args);
82852 va_end(args);
82853- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
82854+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
82855 if (fd >= 0) {
82856 sys_ioctl(fd, FDEJECT, 0);
82857 sys_close(fd);
82858 }
82859 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
82860- fd = sys_open("/dev/console", O_RDWR, 0);
82861+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
82862 if (fd >= 0) {
82863 sys_ioctl(fd, TCGETS, (long)&termios);
82864 termios.c_lflag &= ~ICANON;
82865 sys_ioctl(fd, TCSETSF, (long)&termios);
82866- sys_read(fd, &c, 1);
82867+ sys_read(fd, (char __user *)&c, 1);
82868 termios.c_lflag |= ICANON;
82869 sys_ioctl(fd, TCSETSF, (long)&termios);
82870 sys_close(fd);
82871@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
82872 mount_root();
82873 out:
82874 devtmpfs_mount("dev");
82875- sys_mount(".", "/", NULL, MS_MOVE, NULL);
82876- sys_chroot(".");
82877+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
82878+ sys_chroot((const char __force_user *)".");
82879 }
82880
82881 static bool is_tmpfs;
82882diff --git a/init/do_mounts.h b/init/do_mounts.h
82883index f5b978a..69dbfe8 100644
82884--- a/init/do_mounts.h
82885+++ b/init/do_mounts.h
82886@@ -15,15 +15,15 @@ extern int root_mountflags;
82887
82888 static inline int create_dev(char *name, dev_t dev)
82889 {
82890- sys_unlink(name);
82891- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
82892+ sys_unlink((char __force_user *)name);
82893+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
82894 }
82895
82896 #if BITS_PER_LONG == 32
82897 static inline u32 bstat(char *name)
82898 {
82899 struct stat64 stat;
82900- if (sys_stat64(name, &stat) != 0)
82901+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
82902 return 0;
82903 if (!S_ISBLK(stat.st_mode))
82904 return 0;
82905@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
82906 static inline u32 bstat(char *name)
82907 {
82908 struct stat stat;
82909- if (sys_newstat(name, &stat) != 0)
82910+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
82911 return 0;
82912 if (!S_ISBLK(stat.st_mode))
82913 return 0;
82914diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
82915index 3e0878e..8a9d7a0 100644
82916--- a/init/do_mounts_initrd.c
82917+++ b/init/do_mounts_initrd.c
82918@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
82919 {
82920 sys_unshare(CLONE_FS | CLONE_FILES);
82921 /* stdin/stdout/stderr for /linuxrc */
82922- sys_open("/dev/console", O_RDWR, 0);
82923+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
82924 sys_dup(0);
82925 sys_dup(0);
82926 /* move initrd over / and chdir/chroot in initrd root */
82927- sys_chdir("/root");
82928- sys_mount(".", "/", NULL, MS_MOVE, NULL);
82929- sys_chroot(".");
82930+ sys_chdir((const char __force_user *)"/root");
82931+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
82932+ sys_chroot((const char __force_user *)".");
82933 sys_setsid();
82934 return 0;
82935 }
82936@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
82937 create_dev("/dev/root.old", Root_RAM0);
82938 /* mount initrd on rootfs' /root */
82939 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
82940- sys_mkdir("/old", 0700);
82941- sys_chdir("/old");
82942+ sys_mkdir((const char __force_user *)"/old", 0700);
82943+ sys_chdir((const char __force_user *)"/old");
82944
82945 /* try loading default modules from initrd */
82946 load_default_modules();
82947@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
82948 current->flags &= ~PF_FREEZER_SKIP;
82949
82950 /* move initrd to rootfs' /old */
82951- sys_mount("..", ".", NULL, MS_MOVE, NULL);
82952+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
82953 /* switch root and cwd back to / of rootfs */
82954- sys_chroot("..");
82955+ sys_chroot((const char __force_user *)"..");
82956
82957 if (new_decode_dev(real_root_dev) == Root_RAM0) {
82958- sys_chdir("/old");
82959+ sys_chdir((const char __force_user *)"/old");
82960 return;
82961 }
82962
82963- sys_chdir("/");
82964+ sys_chdir((const char __force_user *)"/");
82965 ROOT_DEV = new_decode_dev(real_root_dev);
82966 mount_root();
82967
82968 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
82969- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
82970+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
82971 if (!error)
82972 printk("okay\n");
82973 else {
82974- int fd = sys_open("/dev/root.old", O_RDWR, 0);
82975+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
82976 if (error == -ENOENT)
82977 printk("/initrd does not exist. Ignored.\n");
82978 else
82979 printk("failed\n");
82980 printk(KERN_NOTICE "Unmounting old root\n");
82981- sys_umount("/old", MNT_DETACH);
82982+ sys_umount((char __force_user *)"/old", MNT_DETACH);
82983 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
82984 if (fd < 0) {
82985 error = fd;
82986@@ -127,11 +127,11 @@ int __init initrd_load(void)
82987 * mounted in the normal path.
82988 */
82989 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
82990- sys_unlink("/initrd.image");
82991+ sys_unlink((const char __force_user *)"/initrd.image");
82992 handle_initrd();
82993 return 1;
82994 }
82995 }
82996- sys_unlink("/initrd.image");
82997+ sys_unlink((const char __force_user *)"/initrd.image");
82998 return 0;
82999 }
83000diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
83001index 8cb6db5..d729f50 100644
83002--- a/init/do_mounts_md.c
83003+++ b/init/do_mounts_md.c
83004@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
83005 partitioned ? "_d" : "", minor,
83006 md_setup_args[ent].device_names);
83007
83008- fd = sys_open(name, 0, 0);
83009+ fd = sys_open((char __force_user *)name, 0, 0);
83010 if (fd < 0) {
83011 printk(KERN_ERR "md: open failed - cannot start "
83012 "array %s\n", name);
83013@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
83014 * array without it
83015 */
83016 sys_close(fd);
83017- fd = sys_open(name, 0, 0);
83018+ fd = sys_open((char __force_user *)name, 0, 0);
83019 sys_ioctl(fd, BLKRRPART, 0);
83020 }
83021 sys_close(fd);
83022@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
83023
83024 wait_for_device_probe();
83025
83026- fd = sys_open("/dev/md0", 0, 0);
83027+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
83028 if (fd >= 0) {
83029 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
83030 sys_close(fd);
83031diff --git a/init/init_task.c b/init/init_task.c
83032index ba0a7f36..2bcf1d5 100644
83033--- a/init/init_task.c
83034+++ b/init/init_task.c
83035@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
83036 * Initial thread structure. Alignment of this is handled by a special
83037 * linker map entry.
83038 */
83039+#ifdef CONFIG_X86
83040+union thread_union init_thread_union __init_task_data;
83041+#else
83042 union thread_union init_thread_union __init_task_data =
83043 { INIT_THREAD_INFO(init_task) };
83044+#endif
83045diff --git a/init/initramfs.c b/init/initramfs.c
83046index a67ef9d..2d17ed9 100644
83047--- a/init/initramfs.c
83048+++ b/init/initramfs.c
83049@@ -84,7 +84,7 @@ static void __init free_hash(void)
83050 }
83051 }
83052
83053-static long __init do_utime(char *filename, time_t mtime)
83054+static long __init do_utime(char __force_user *filename, time_t mtime)
83055 {
83056 struct timespec t[2];
83057
83058@@ -119,7 +119,7 @@ static void __init dir_utime(void)
83059 struct dir_entry *de, *tmp;
83060 list_for_each_entry_safe(de, tmp, &dir_list, list) {
83061 list_del(&de->list);
83062- do_utime(de->name, de->mtime);
83063+ do_utime((char __force_user *)de->name, de->mtime);
83064 kfree(de->name);
83065 kfree(de);
83066 }
83067@@ -281,7 +281,7 @@ static int __init maybe_link(void)
83068 if (nlink >= 2) {
83069 char *old = find_link(major, minor, ino, mode, collected);
83070 if (old)
83071- return (sys_link(old, collected) < 0) ? -1 : 1;
83072+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
83073 }
83074 return 0;
83075 }
83076@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
83077 {
83078 struct stat st;
83079
83080- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
83081+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
83082 if (S_ISDIR(st.st_mode))
83083- sys_rmdir(path);
83084+ sys_rmdir((char __force_user *)path);
83085 else
83086- sys_unlink(path);
83087+ sys_unlink((char __force_user *)path);
83088 }
83089 }
83090
83091@@ -315,7 +315,7 @@ static int __init do_name(void)
83092 int openflags = O_WRONLY|O_CREAT;
83093 if (ml != 1)
83094 openflags |= O_TRUNC;
83095- wfd = sys_open(collected, openflags, mode);
83096+ wfd = sys_open((char __force_user *)collected, openflags, mode);
83097
83098 if (wfd >= 0) {
83099 sys_fchown(wfd, uid, gid);
83100@@ -327,17 +327,17 @@ static int __init do_name(void)
83101 }
83102 }
83103 } else if (S_ISDIR(mode)) {
83104- sys_mkdir(collected, mode);
83105- sys_chown(collected, uid, gid);
83106- sys_chmod(collected, mode);
83107+ sys_mkdir((char __force_user *)collected, mode);
83108+ sys_chown((char __force_user *)collected, uid, gid);
83109+ sys_chmod((char __force_user *)collected, mode);
83110 dir_add(collected, mtime);
83111 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
83112 S_ISFIFO(mode) || S_ISSOCK(mode)) {
83113 if (maybe_link() == 0) {
83114- sys_mknod(collected, mode, rdev);
83115- sys_chown(collected, uid, gid);
83116- sys_chmod(collected, mode);
83117- do_utime(collected, mtime);
83118+ sys_mknod((char __force_user *)collected, mode, rdev);
83119+ sys_chown((char __force_user *)collected, uid, gid);
83120+ sys_chmod((char __force_user *)collected, mode);
83121+ do_utime((char __force_user *)collected, mtime);
83122 }
83123 }
83124 return 0;
83125@@ -346,15 +346,15 @@ static int __init do_name(void)
83126 static int __init do_copy(void)
83127 {
83128 if (count >= body_len) {
83129- sys_write(wfd, victim, body_len);
83130+ sys_write(wfd, (char __force_user *)victim, body_len);
83131 sys_close(wfd);
83132- do_utime(vcollected, mtime);
83133+ do_utime((char __force_user *)vcollected, mtime);
83134 kfree(vcollected);
83135 eat(body_len);
83136 state = SkipIt;
83137 return 0;
83138 } else {
83139- sys_write(wfd, victim, count);
83140+ sys_write(wfd, (char __force_user *)victim, count);
83141 body_len -= count;
83142 eat(count);
83143 return 1;
83144@@ -365,9 +365,9 @@ static int __init do_symlink(void)
83145 {
83146 collected[N_ALIGN(name_len) + body_len] = '\0';
83147 clean_path(collected, 0);
83148- sys_symlink(collected + N_ALIGN(name_len), collected);
83149- sys_lchown(collected, uid, gid);
83150- do_utime(collected, mtime);
83151+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
83152+ sys_lchown((char __force_user *)collected, uid, gid);
83153+ do_utime((char __force_user *)collected, mtime);
83154 state = SkipIt;
83155 next_state = Reset;
83156 return 0;
83157@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
83158 {
83159 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
83160 if (err)
83161- panic(err); /* Failed to decompress INTERNAL initramfs */
83162+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
83163 if (initrd_start) {
83164 #ifdef CONFIG_BLK_DEV_RAM
83165 int fd;
83166diff --git a/init/main.c b/init/main.c
83167index febc511..f0851763 100644
83168--- a/init/main.c
83169+++ b/init/main.c
83170@@ -103,6 +103,8 @@ static inline void mark_rodata_ro(void) { }
83171 extern void tc_init(void);
83172 #endif
83173
83174+extern void grsecurity_init(void);
83175+
83176 /*
83177 * Debug helper: via this flag we know that we are in 'early bootup code'
83178 * where only the boot processor is running with IRQ disabled. This means
83179@@ -164,6 +166,75 @@ static int __init set_reset_devices(char *str)
83180
83181 __setup("reset_devices", set_reset_devices);
83182
83183+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83184+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
83185+static int __init setup_grsec_proc_gid(char *str)
83186+{
83187+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
83188+ return 1;
83189+}
83190+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
83191+#endif
83192+
83193+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
83194+unsigned long pax_user_shadow_base __read_only;
83195+EXPORT_SYMBOL(pax_user_shadow_base);
83196+extern char pax_enter_kernel_user[];
83197+extern char pax_exit_kernel_user[];
83198+#endif
83199+
83200+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
83201+static int __init setup_pax_nouderef(char *str)
83202+{
83203+#ifdef CONFIG_X86_32
83204+ unsigned int cpu;
83205+ struct desc_struct *gdt;
83206+
83207+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
83208+ gdt = get_cpu_gdt_table(cpu);
83209+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
83210+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
83211+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
83212+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
83213+ }
83214+ loadsegment(ds, __KERNEL_DS);
83215+ loadsegment(es, __KERNEL_DS);
83216+ loadsegment(ss, __KERNEL_DS);
83217+#else
83218+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
83219+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
83220+ clone_pgd_mask = ~(pgdval_t)0UL;
83221+ pax_user_shadow_base = 0UL;
83222+ setup_clear_cpu_cap(X86_FEATURE_PCID);
83223+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
83224+#endif
83225+
83226+ return 0;
83227+}
83228+early_param("pax_nouderef", setup_pax_nouderef);
83229+
83230+#ifdef CONFIG_X86_64
83231+static int __init setup_pax_weakuderef(char *str)
83232+{
83233+ if (clone_pgd_mask != ~(pgdval_t)0UL)
83234+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
83235+ return 1;
83236+}
83237+__setup("pax_weakuderef", setup_pax_weakuderef);
83238+#endif
83239+#endif
83240+
83241+#ifdef CONFIG_PAX_SOFTMODE
83242+int pax_softmode;
83243+
83244+static int __init setup_pax_softmode(char *str)
83245+{
83246+ get_option(&str, &pax_softmode);
83247+ return 1;
83248+}
83249+__setup("pax_softmode=", setup_pax_softmode);
83250+#endif
83251+
83252 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
83253 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
83254 static const char *panic_later, *panic_param;
83255@@ -691,25 +762,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
83256 {
83257 int count = preempt_count();
83258 int ret;
83259- char msgbuf[64];
83260+ const char *msg1 = "", *msg2 = "";
83261
83262 if (initcall_debug)
83263 ret = do_one_initcall_debug(fn);
83264 else
83265 ret = fn();
83266
83267- msgbuf[0] = 0;
83268-
83269 if (preempt_count() != count) {
83270- sprintf(msgbuf, "preemption imbalance ");
83271+ msg1 = " preemption imbalance";
83272 preempt_count_set(count);
83273 }
83274 if (irqs_disabled()) {
83275- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
83276+ msg2 = " disabled interrupts";
83277 local_irq_enable();
83278 }
83279- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
83280+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
83281
83282+ add_latent_entropy();
83283 return ret;
83284 }
83285
83286@@ -816,8 +886,8 @@ static int run_init_process(const char *init_filename)
83287 {
83288 argv_init[0] = init_filename;
83289 return do_execve(init_filename,
83290- (const char __user *const __user *)argv_init,
83291- (const char __user *const __user *)envp_init);
83292+ (const char __user *const __force_user *)argv_init,
83293+ (const char __user *const __force_user *)envp_init);
83294 }
83295
83296 static int try_to_run_init_process(const char *init_filename)
83297@@ -834,6 +904,10 @@ static int try_to_run_init_process(const char *init_filename)
83298 return ret;
83299 }
83300
83301+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
83302+extern int gr_init_ran;
83303+#endif
83304+
83305 static noinline void __init kernel_init_freeable(void);
83306
83307 static int __ref kernel_init(void *unused)
83308@@ -858,6 +932,11 @@ static int __ref kernel_init(void *unused)
83309 ramdisk_execute_command, ret);
83310 }
83311
83312+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
83313+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
83314+ gr_init_ran = 1;
83315+#endif
83316+
83317 /*
83318 * We try each of these until one succeeds.
83319 *
83320@@ -913,7 +992,7 @@ static noinline void __init kernel_init_freeable(void)
83321 do_basic_setup();
83322
83323 /* Open the /dev/console on the rootfs, this should never fail */
83324- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
83325+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
83326 pr_err("Warning: unable to open an initial console.\n");
83327
83328 (void) sys_dup(0);
83329@@ -926,11 +1005,13 @@ static noinline void __init kernel_init_freeable(void)
83330 if (!ramdisk_execute_command)
83331 ramdisk_execute_command = "/init";
83332
83333- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
83334+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
83335 ramdisk_execute_command = NULL;
83336 prepare_namespace();
83337 }
83338
83339+ grsecurity_init();
83340+
83341 /*
83342 * Ok, we have completed the initial bootup, and
83343 * we're essentially up and running. Get rid of the
83344diff --git a/ipc/compat.c b/ipc/compat.c
83345index 892f658..e7c6320 100644
83346--- a/ipc/compat.c
83347+++ b/ipc/compat.c
83348@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
83349 COMPAT_SHMLBA);
83350 if (err < 0)
83351 return err;
83352- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
83353+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
83354 }
83355 case SHMDT:
83356 return sys_shmdt(compat_ptr(ptr));
83357diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
83358index b0e99de..09f385c 100644
83359--- a/ipc/ipc_sysctl.c
83360+++ b/ipc/ipc_sysctl.c
83361@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
83362 static int proc_ipc_dointvec(ctl_table *table, int write,
83363 void __user *buffer, size_t *lenp, loff_t *ppos)
83364 {
83365- struct ctl_table ipc_table;
83366+ ctl_table_no_const ipc_table;
83367
83368 memcpy(&ipc_table, table, sizeof(ipc_table));
83369 ipc_table.data = get_ipc(table);
83370@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
83371 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
83372 void __user *buffer, size_t *lenp, loff_t *ppos)
83373 {
83374- struct ctl_table ipc_table;
83375+ ctl_table_no_const ipc_table;
83376
83377 memcpy(&ipc_table, table, sizeof(ipc_table));
83378 ipc_table.data = get_ipc(table);
83379@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
83380 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
83381 void __user *buffer, size_t *lenp, loff_t *ppos)
83382 {
83383- struct ctl_table ipc_table;
83384+ ctl_table_no_const ipc_table;
83385 size_t lenp_bef = *lenp;
83386 int rc;
83387
83388@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
83389 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
83390 void __user *buffer, size_t *lenp, loff_t *ppos)
83391 {
83392- struct ctl_table ipc_table;
83393+ ctl_table_no_const ipc_table;
83394 memcpy(&ipc_table, table, sizeof(ipc_table));
83395 ipc_table.data = get_ipc(table);
83396
83397@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
83398 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
83399 void __user *buffer, size_t *lenp, loff_t *ppos)
83400 {
83401- struct ctl_table ipc_table;
83402+ ctl_table_no_const ipc_table;
83403 size_t lenp_bef = *lenp;
83404 int oldval;
83405 int rc;
83406diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
83407index 383d638..943fdbb 100644
83408--- a/ipc/mq_sysctl.c
83409+++ b/ipc/mq_sysctl.c
83410@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
83411 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
83412 void __user *buffer, size_t *lenp, loff_t *ppos)
83413 {
83414- struct ctl_table mq_table;
83415+ ctl_table_no_const mq_table;
83416 memcpy(&mq_table, table, sizeof(mq_table));
83417 mq_table.data = get_mq(table);
83418
83419diff --git a/ipc/mqueue.c b/ipc/mqueue.c
83420index 95827ce..09e6d38 100644
83421--- a/ipc/mqueue.c
83422+++ b/ipc/mqueue.c
83423@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
83424 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
83425 info->attr.mq_msgsize);
83426
83427+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
83428 spin_lock(&mq_lock);
83429 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
83430 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
83431diff --git a/ipc/msg.c b/ipc/msg.c
83432index 558aa91..359e718 100644
83433--- a/ipc/msg.c
83434+++ b/ipc/msg.c
83435@@ -297,18 +297,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
83436 return security_msg_queue_associate(msq, msgflg);
83437 }
83438
83439+static struct ipc_ops msg_ops = {
83440+ .getnew = newque,
83441+ .associate = msg_security,
83442+ .more_checks = NULL
83443+};
83444+
83445 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
83446 {
83447 struct ipc_namespace *ns;
83448- struct ipc_ops msg_ops;
83449 struct ipc_params msg_params;
83450
83451 ns = current->nsproxy->ipc_ns;
83452
83453- msg_ops.getnew = newque;
83454- msg_ops.associate = msg_security;
83455- msg_ops.more_checks = NULL;
83456-
83457 msg_params.key = key;
83458 msg_params.flg = msgflg;
83459
83460diff --git a/ipc/sem.c b/ipc/sem.c
83461index db9d241..bc8427c 100644
83462--- a/ipc/sem.c
83463+++ b/ipc/sem.c
83464@@ -562,10 +562,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
83465 return 0;
83466 }
83467
83468+static struct ipc_ops sem_ops = {
83469+ .getnew = newary,
83470+ .associate = sem_security,
83471+ .more_checks = sem_more_checks
83472+};
83473+
83474 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
83475 {
83476 struct ipc_namespace *ns;
83477- struct ipc_ops sem_ops;
83478 struct ipc_params sem_params;
83479
83480 ns = current->nsproxy->ipc_ns;
83481@@ -573,10 +578,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
83482 if (nsems < 0 || nsems > ns->sc_semmsl)
83483 return -EINVAL;
83484
83485- sem_ops.getnew = newary;
83486- sem_ops.associate = sem_security;
83487- sem_ops.more_checks = sem_more_checks;
83488-
83489 sem_params.key = key;
83490 sem_params.flg = semflg;
83491 sem_params.u.nsems = nsems;
83492diff --git a/ipc/shm.c b/ipc/shm.c
83493index 7a51443..3a257d8 100644
83494--- a/ipc/shm.c
83495+++ b/ipc/shm.c
83496@@ -72,6 +72,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
83497 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
83498 #endif
83499
83500+#ifdef CONFIG_GRKERNSEC
83501+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
83502+ const time_t shm_createtime, const kuid_t cuid,
83503+ const int shmid);
83504+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
83505+ const time_t shm_createtime);
83506+#endif
83507+
83508 void shm_init_ns(struct ipc_namespace *ns)
83509 {
83510 ns->shm_ctlmax = SHMMAX;
83511@@ -554,6 +562,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
83512 shp->shm_lprid = 0;
83513 shp->shm_atim = shp->shm_dtim = 0;
83514 shp->shm_ctim = get_seconds();
83515+#ifdef CONFIG_GRKERNSEC
83516+ {
83517+ struct timespec timeval;
83518+ do_posix_clock_monotonic_gettime(&timeval);
83519+
83520+ shp->shm_createtime = timeval.tv_sec;
83521+ }
83522+#endif
83523 shp->shm_segsz = size;
83524 shp->shm_nattch = 0;
83525 shp->shm_file = file;
83526@@ -607,18 +623,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
83527 return 0;
83528 }
83529
83530+static struct ipc_ops shm_ops = {
83531+ .getnew = newseg,
83532+ .associate = shm_security,
83533+ .more_checks = shm_more_checks
83534+};
83535+
83536 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
83537 {
83538 struct ipc_namespace *ns;
83539- struct ipc_ops shm_ops;
83540 struct ipc_params shm_params;
83541
83542 ns = current->nsproxy->ipc_ns;
83543
83544- shm_ops.getnew = newseg;
83545- shm_ops.associate = shm_security;
83546- shm_ops.more_checks = shm_more_checks;
83547-
83548 shm_params.key = key;
83549 shm_params.flg = shmflg;
83550 shm_params.u.size = size;
83551@@ -1089,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
83552 f_mode = FMODE_READ | FMODE_WRITE;
83553 }
83554 if (shmflg & SHM_EXEC) {
83555+
83556+#ifdef CONFIG_PAX_MPROTECT
83557+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
83558+ goto out;
83559+#endif
83560+
83561 prot |= PROT_EXEC;
83562 acc_mode |= S_IXUGO;
83563 }
83564@@ -1113,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
83565 if (err)
83566 goto out_unlock;
83567
83568+#ifdef CONFIG_GRKERNSEC
83569+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
83570+ shp->shm_perm.cuid, shmid) ||
83571+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
83572+ err = -EACCES;
83573+ goto out_unlock;
83574+ }
83575+#endif
83576+
83577 ipc_lock_object(&shp->shm_perm);
83578
83579 /* check if shm_destroy() is tearing down shp */
83580@@ -1125,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
83581 path = shp->shm_file->f_path;
83582 path_get(&path);
83583 shp->shm_nattch++;
83584+#ifdef CONFIG_GRKERNSEC
83585+ shp->shm_lapid = current->pid;
83586+#endif
83587 size = i_size_read(path.dentry->d_inode);
83588 ipc_unlock_object(&shp->shm_perm);
83589 rcu_read_unlock();
83590diff --git a/ipc/util.c b/ipc/util.c
83591index 3ae17a4..d67c32f 100644
83592--- a/ipc/util.c
83593+++ b/ipc/util.c
83594@@ -71,6 +71,8 @@ struct ipc_proc_iface {
83595 int (*show)(struct seq_file *, void *);
83596 };
83597
83598+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
83599+
83600 static void ipc_memory_notifier(struct work_struct *work)
83601 {
83602 ipcns_notify(IPCNS_MEMCHANGED);
83603@@ -558,6 +560,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
83604 granted_mode >>= 6;
83605 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
83606 granted_mode >>= 3;
83607+
83608+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
83609+ return -1;
83610+
83611 /* is there some bit set in requested_mode but not in granted_mode? */
83612 if ((requested_mode & ~granted_mode & 0007) &&
83613 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
83614diff --git a/kernel/acct.c b/kernel/acct.c
83615index 8d6e145..33e0b1e 100644
83616--- a/kernel/acct.c
83617+++ b/kernel/acct.c
83618@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
83619 */
83620 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
83621 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
83622- file->f_op->write(file, (char *)&ac,
83623+ file->f_op->write(file, (char __force_user *)&ac,
83624 sizeof(acct_t), &file->f_pos);
83625 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
83626 set_fs(fs);
83627diff --git a/kernel/audit.c b/kernel/audit.c
83628index 906ae5a0..a7ad0b4 100644
83629--- a/kernel/audit.c
83630+++ b/kernel/audit.c
83631@@ -117,7 +117,7 @@ u32 audit_sig_sid = 0;
83632 3) suppressed due to audit_rate_limit
83633 4) suppressed due to audit_backlog_limit
83634 */
83635-static atomic_t audit_lost = ATOMIC_INIT(0);
83636+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
83637
83638 /* The netlink socket. */
83639 static struct sock *audit_sock;
83640@@ -250,7 +250,7 @@ void audit_log_lost(const char *message)
83641 unsigned long now;
83642 int print;
83643
83644- atomic_inc(&audit_lost);
83645+ atomic_inc_unchecked(&audit_lost);
83646
83647 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
83648
83649@@ -269,7 +269,7 @@ void audit_log_lost(const char *message)
83650 printk(KERN_WARNING
83651 "audit: audit_lost=%d audit_rate_limit=%d "
83652 "audit_backlog_limit=%d\n",
83653- atomic_read(&audit_lost),
83654+ atomic_read_unchecked(&audit_lost),
83655 audit_rate_limit,
83656 audit_backlog_limit);
83657 audit_panic(message);
83658@@ -765,7 +765,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
83659 status_set.pid = audit_pid;
83660 status_set.rate_limit = audit_rate_limit;
83661 status_set.backlog_limit = audit_backlog_limit;
83662- status_set.lost = atomic_read(&audit_lost);
83663+ status_set.lost = atomic_read_unchecked(&audit_lost);
83664 status_set.backlog = skb_queue_len(&audit_skb_queue);
83665 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
83666 &status_set, sizeof(status_set));
83667@@ -1356,7 +1356,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
83668 int i, avail, new_len;
83669 unsigned char *ptr;
83670 struct sk_buff *skb;
83671- static const unsigned char *hex = "0123456789ABCDEF";
83672+ static const unsigned char hex[] = "0123456789ABCDEF";
83673
83674 if (!ab)
83675 return;
83676diff --git a/kernel/auditsc.c b/kernel/auditsc.c
83677index 90594c9..abbeed7 100644
83678--- a/kernel/auditsc.c
83679+++ b/kernel/auditsc.c
83680@@ -1945,7 +1945,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
83681 }
83682
83683 /* global counter which is incremented every time something logs in */
83684-static atomic_t session_id = ATOMIC_INIT(0);
83685+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
83686
83687 static int audit_set_loginuid_perm(kuid_t loginuid)
83688 {
83689@@ -2008,7 +2008,7 @@ int audit_set_loginuid(kuid_t loginuid)
83690
83691 /* are we setting or clearing? */
83692 if (uid_valid(loginuid))
83693- sessionid = atomic_inc_return(&session_id);
83694+ sessionid = atomic_inc_return_unchecked(&session_id);
83695
83696 task->sessionid = sessionid;
83697 task->loginuid = loginuid;
83698diff --git a/kernel/capability.c b/kernel/capability.c
83699index 4e66bf9..cdccecf 100644
83700--- a/kernel/capability.c
83701+++ b/kernel/capability.c
83702@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
83703 * before modification is attempted and the application
83704 * fails.
83705 */
83706+ if (tocopy > ARRAY_SIZE(kdata))
83707+ return -EFAULT;
83708+
83709 if (copy_to_user(dataptr, kdata, tocopy
83710 * sizeof(struct __user_cap_data_struct))) {
83711 return -EFAULT;
83712@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
83713 int ret;
83714
83715 rcu_read_lock();
83716- ret = security_capable(__task_cred(t), ns, cap);
83717+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
83718+ gr_task_is_capable(t, __task_cred(t), cap);
83719 rcu_read_unlock();
83720
83721- return (ret == 0);
83722+ return ret;
83723 }
83724
83725 /**
83726@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
83727 int ret;
83728
83729 rcu_read_lock();
83730- ret = security_capable_noaudit(__task_cred(t), ns, cap);
83731+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
83732 rcu_read_unlock();
83733
83734- return (ret == 0);
83735+ return ret;
83736 }
83737
83738 /**
83739@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
83740 BUG();
83741 }
83742
83743- if (security_capable(current_cred(), ns, cap) == 0) {
83744+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
83745 current->flags |= PF_SUPERPRIV;
83746 return true;
83747 }
83748@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
83749 }
83750 EXPORT_SYMBOL(ns_capable);
83751
83752+bool ns_capable_nolog(struct user_namespace *ns, int cap)
83753+{
83754+ if (unlikely(!cap_valid(cap))) {
83755+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
83756+ BUG();
83757+ }
83758+
83759+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
83760+ current->flags |= PF_SUPERPRIV;
83761+ return true;
83762+ }
83763+ return false;
83764+}
83765+EXPORT_SYMBOL(ns_capable_nolog);
83766+
83767 /**
83768 * file_ns_capable - Determine if the file's opener had a capability in effect
83769 * @file: The file we want to check
83770@@ -432,6 +451,12 @@ bool capable(int cap)
83771 }
83772 EXPORT_SYMBOL(capable);
83773
83774+bool capable_nolog(int cap)
83775+{
83776+ return ns_capable_nolog(&init_user_ns, cap);
83777+}
83778+EXPORT_SYMBOL(capable_nolog);
83779+
83780 /**
83781 * inode_capable - Check superior capability over inode
83782 * @inode: The inode in question
83783@@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
83784 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
83785 }
83786 EXPORT_SYMBOL(inode_capable);
83787+
83788+bool inode_capable_nolog(const struct inode *inode, int cap)
83789+{
83790+ struct user_namespace *ns = current_user_ns();
83791+
83792+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
83793+}
83794+EXPORT_SYMBOL(inode_capable_nolog);
83795diff --git a/kernel/cgroup.c b/kernel/cgroup.c
83796index bc1dcab..f3a6b42 100644
83797--- a/kernel/cgroup.c
83798+++ b/kernel/cgroup.c
83799@@ -5607,7 +5607,7 @@ static int cgroup_css_links_read(struct cgroup_subsys_state *css,
83800 struct css_set *cset = link->cset;
83801 struct task_struct *task;
83802 int count = 0;
83803- seq_printf(seq, "css_set %p\n", cset);
83804+ seq_printf(seq, "css_set %pK\n", cset);
83805 list_for_each_entry(task, &cset->tasks, cg_list) {
83806 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
83807 seq_puts(seq, " ...\n");
83808diff --git a/kernel/compat.c b/kernel/compat.c
83809index 0a09e48..b46b3d78 100644
83810--- a/kernel/compat.c
83811+++ b/kernel/compat.c
83812@@ -13,6 +13,7 @@
83813
83814 #include <linux/linkage.h>
83815 #include <linux/compat.h>
83816+#include <linux/module.h>
83817 #include <linux/errno.h>
83818 #include <linux/time.h>
83819 #include <linux/signal.h>
83820@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
83821 mm_segment_t oldfs;
83822 long ret;
83823
83824- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
83825+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
83826 oldfs = get_fs();
83827 set_fs(KERNEL_DS);
83828 ret = hrtimer_nanosleep_restart(restart);
83829@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
83830 oldfs = get_fs();
83831 set_fs(KERNEL_DS);
83832 ret = hrtimer_nanosleep(&tu,
83833- rmtp ? (struct timespec __user *)&rmt : NULL,
83834+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
83835 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
83836 set_fs(oldfs);
83837
83838@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
83839 mm_segment_t old_fs = get_fs();
83840
83841 set_fs(KERNEL_DS);
83842- ret = sys_sigpending((old_sigset_t __user *) &s);
83843+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
83844 set_fs(old_fs);
83845 if (ret == 0)
83846 ret = put_user(s, set);
83847@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
83848 mm_segment_t old_fs = get_fs();
83849
83850 set_fs(KERNEL_DS);
83851- ret = sys_old_getrlimit(resource, &r);
83852+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
83853 set_fs(old_fs);
83854
83855 if (!ret) {
83856@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
83857 set_fs (KERNEL_DS);
83858 ret = sys_wait4(pid,
83859 (stat_addr ?
83860- (unsigned int __user *) &status : NULL),
83861- options, (struct rusage __user *) &r);
83862+ (unsigned int __force_user *) &status : NULL),
83863+ options, (struct rusage __force_user *) &r);
83864 set_fs (old_fs);
83865
83866 if (ret > 0) {
83867@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
83868 memset(&info, 0, sizeof(info));
83869
83870 set_fs(KERNEL_DS);
83871- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
83872- uru ? (struct rusage __user *)&ru : NULL);
83873+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
83874+ uru ? (struct rusage __force_user *)&ru : NULL);
83875 set_fs(old_fs);
83876
83877 if ((ret < 0) || (info.si_signo == 0))
83878@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
83879 oldfs = get_fs();
83880 set_fs(KERNEL_DS);
83881 err = sys_timer_settime(timer_id, flags,
83882- (struct itimerspec __user *) &newts,
83883- (struct itimerspec __user *) &oldts);
83884+ (struct itimerspec __force_user *) &newts,
83885+ (struct itimerspec __force_user *) &oldts);
83886 set_fs(oldfs);
83887 if (!err && old && put_compat_itimerspec(old, &oldts))
83888 return -EFAULT;
83889@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
83890 oldfs = get_fs();
83891 set_fs(KERNEL_DS);
83892 err = sys_timer_gettime(timer_id,
83893- (struct itimerspec __user *) &ts);
83894+ (struct itimerspec __force_user *) &ts);
83895 set_fs(oldfs);
83896 if (!err && put_compat_itimerspec(setting, &ts))
83897 return -EFAULT;
83898@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
83899 oldfs = get_fs();
83900 set_fs(KERNEL_DS);
83901 err = sys_clock_settime(which_clock,
83902- (struct timespec __user *) &ts);
83903+ (struct timespec __force_user *) &ts);
83904 set_fs(oldfs);
83905 return err;
83906 }
83907@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
83908 oldfs = get_fs();
83909 set_fs(KERNEL_DS);
83910 err = sys_clock_gettime(which_clock,
83911- (struct timespec __user *) &ts);
83912+ (struct timespec __force_user *) &ts);
83913 set_fs(oldfs);
83914 if (!err && put_compat_timespec(&ts, tp))
83915 return -EFAULT;
83916@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
83917
83918 oldfs = get_fs();
83919 set_fs(KERNEL_DS);
83920- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
83921+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
83922 set_fs(oldfs);
83923
83924 err = compat_put_timex(utp, &txc);
83925@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
83926 oldfs = get_fs();
83927 set_fs(KERNEL_DS);
83928 err = sys_clock_getres(which_clock,
83929- (struct timespec __user *) &ts);
83930+ (struct timespec __force_user *) &ts);
83931 set_fs(oldfs);
83932 if (!err && tp && put_compat_timespec(&ts, tp))
83933 return -EFAULT;
83934@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
83935 long err;
83936 mm_segment_t oldfs;
83937 struct timespec tu;
83938- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
83939+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
83940
83941- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
83942+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
83943 oldfs = get_fs();
83944 set_fs(KERNEL_DS);
83945 err = clock_nanosleep_restart(restart);
83946@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
83947 oldfs = get_fs();
83948 set_fs(KERNEL_DS);
83949 err = sys_clock_nanosleep(which_clock, flags,
83950- (struct timespec __user *) &in,
83951- (struct timespec __user *) &out);
83952+ (struct timespec __force_user *) &in,
83953+ (struct timespec __force_user *) &out);
83954 set_fs(oldfs);
83955
83956 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
83957@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
83958 mm_segment_t old_fs = get_fs();
83959
83960 set_fs(KERNEL_DS);
83961- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
83962+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
83963 set_fs(old_fs);
83964 if (put_compat_timespec(&t, interval))
83965 return -EFAULT;
83966diff --git a/kernel/configs.c b/kernel/configs.c
83967index c18b1f1..b9a0132 100644
83968--- a/kernel/configs.c
83969+++ b/kernel/configs.c
83970@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
83971 struct proc_dir_entry *entry;
83972
83973 /* create the current config file */
83974+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
83975+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
83976+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
83977+ &ikconfig_file_ops);
83978+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83979+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
83980+ &ikconfig_file_ops);
83981+#endif
83982+#else
83983 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
83984 &ikconfig_file_ops);
83985+#endif
83986+
83987 if (!entry)
83988 return -ENOMEM;
83989
83990diff --git a/kernel/cred.c b/kernel/cred.c
83991index e0573a4..3874e41 100644
83992--- a/kernel/cred.c
83993+++ b/kernel/cred.c
83994@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
83995 validate_creds(cred);
83996 alter_cred_subscribers(cred, -1);
83997 put_cred(cred);
83998+
83999+#ifdef CONFIG_GRKERNSEC_SETXID
84000+ cred = (struct cred *) tsk->delayed_cred;
84001+ if (cred != NULL) {
84002+ tsk->delayed_cred = NULL;
84003+ validate_creds(cred);
84004+ alter_cred_subscribers(cred, -1);
84005+ put_cred(cred);
84006+ }
84007+#endif
84008 }
84009
84010 /**
84011@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
84012 * Always returns 0 thus allowing this function to be tail-called at the end
84013 * of, say, sys_setgid().
84014 */
84015-int commit_creds(struct cred *new)
84016+static int __commit_creds(struct cred *new)
84017 {
84018 struct task_struct *task = current;
84019 const struct cred *old = task->real_cred;
84020@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
84021
84022 get_cred(new); /* we will require a ref for the subj creds too */
84023
84024+ gr_set_role_label(task, new->uid, new->gid);
84025+
84026 /* dumpability changes */
84027 if (!uid_eq(old->euid, new->euid) ||
84028 !gid_eq(old->egid, new->egid) ||
84029@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
84030 put_cred(old);
84031 return 0;
84032 }
84033+#ifdef CONFIG_GRKERNSEC_SETXID
84034+extern int set_user(struct cred *new);
84035+
84036+void gr_delayed_cred_worker(void)
84037+{
84038+ const struct cred *new = current->delayed_cred;
84039+ struct cred *ncred;
84040+
84041+ current->delayed_cred = NULL;
84042+
84043+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
84044+ // from doing get_cred on it when queueing this
84045+ put_cred(new);
84046+ return;
84047+ } else if (new == NULL)
84048+ return;
84049+
84050+ ncred = prepare_creds();
84051+ if (!ncred)
84052+ goto die;
84053+ // uids
84054+ ncred->uid = new->uid;
84055+ ncred->euid = new->euid;
84056+ ncred->suid = new->suid;
84057+ ncred->fsuid = new->fsuid;
84058+ // gids
84059+ ncred->gid = new->gid;
84060+ ncred->egid = new->egid;
84061+ ncred->sgid = new->sgid;
84062+ ncred->fsgid = new->fsgid;
84063+ // groups
84064+ if (set_groups(ncred, new->group_info) < 0) {
84065+ abort_creds(ncred);
84066+ goto die;
84067+ }
84068+ // caps
84069+ ncred->securebits = new->securebits;
84070+ ncred->cap_inheritable = new->cap_inheritable;
84071+ ncred->cap_permitted = new->cap_permitted;
84072+ ncred->cap_effective = new->cap_effective;
84073+ ncred->cap_bset = new->cap_bset;
84074+
84075+ if (set_user(ncred)) {
84076+ abort_creds(ncred);
84077+ goto die;
84078+ }
84079+
84080+ // from doing get_cred on it when queueing this
84081+ put_cred(new);
84082+
84083+ __commit_creds(ncred);
84084+ return;
84085+die:
84086+ // from doing get_cred on it when queueing this
84087+ put_cred(new);
84088+ do_group_exit(SIGKILL);
84089+}
84090+#endif
84091+
84092+int commit_creds(struct cred *new)
84093+{
84094+#ifdef CONFIG_GRKERNSEC_SETXID
84095+ int ret;
84096+ int schedule_it = 0;
84097+ struct task_struct *t;
84098+
84099+ /* we won't get called with tasklist_lock held for writing
84100+ and interrupts disabled as the cred struct in that case is
84101+ init_cred
84102+ */
84103+ if (grsec_enable_setxid && !current_is_single_threaded() &&
84104+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
84105+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
84106+ schedule_it = 1;
84107+ }
84108+ ret = __commit_creds(new);
84109+ if (schedule_it) {
84110+ rcu_read_lock();
84111+ read_lock(&tasklist_lock);
84112+ for (t = next_thread(current); t != current;
84113+ t = next_thread(t)) {
84114+ if (t->delayed_cred == NULL) {
84115+ t->delayed_cred = get_cred(new);
84116+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
84117+ set_tsk_need_resched(t);
84118+ }
84119+ }
84120+ read_unlock(&tasklist_lock);
84121+ rcu_read_unlock();
84122+ }
84123+ return ret;
84124+#else
84125+ return __commit_creds(new);
84126+#endif
84127+}
84128+
84129 EXPORT_SYMBOL(commit_creds);
84130
84131 /**
84132diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
84133index 7d2f35e..1bafcd0 100644
84134--- a/kernel/debug/debug_core.c
84135+++ b/kernel/debug/debug_core.c
84136@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
84137 */
84138 static atomic_t masters_in_kgdb;
84139 static atomic_t slaves_in_kgdb;
84140-static atomic_t kgdb_break_tasklet_var;
84141+static atomic_unchecked_t kgdb_break_tasklet_var;
84142 atomic_t kgdb_setting_breakpoint;
84143
84144 struct task_struct *kgdb_usethread;
84145@@ -133,7 +133,7 @@ int kgdb_single_step;
84146 static pid_t kgdb_sstep_pid;
84147
84148 /* to keep track of the CPU which is doing the single stepping*/
84149-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
84150+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
84151
84152 /*
84153 * If you are debugging a problem where roundup (the collection of
84154@@ -541,7 +541,7 @@ return_normal:
84155 * kernel will only try for the value of sstep_tries before
84156 * giving up and continuing on.
84157 */
84158- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
84159+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
84160 (kgdb_info[cpu].task &&
84161 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
84162 atomic_set(&kgdb_active, -1);
84163@@ -639,8 +639,8 @@ cpu_master_loop:
84164 }
84165
84166 kgdb_restore:
84167- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
84168- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
84169+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
84170+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
84171 if (kgdb_info[sstep_cpu].task)
84172 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
84173 else
84174@@ -916,18 +916,18 @@ static void kgdb_unregister_callbacks(void)
84175 static void kgdb_tasklet_bpt(unsigned long ing)
84176 {
84177 kgdb_breakpoint();
84178- atomic_set(&kgdb_break_tasklet_var, 0);
84179+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
84180 }
84181
84182 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
84183
84184 void kgdb_schedule_breakpoint(void)
84185 {
84186- if (atomic_read(&kgdb_break_tasklet_var) ||
84187+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
84188 atomic_read(&kgdb_active) != -1 ||
84189 atomic_read(&kgdb_setting_breakpoint))
84190 return;
84191- atomic_inc(&kgdb_break_tasklet_var);
84192+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
84193 tasklet_schedule(&kgdb_tasklet_breakpoint);
84194 }
84195 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
84196diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
84197index 0b097c8..11dd5c5 100644
84198--- a/kernel/debug/kdb/kdb_main.c
84199+++ b/kernel/debug/kdb/kdb_main.c
84200@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
84201 continue;
84202
84203 kdb_printf("%-20s%8u 0x%p ", mod->name,
84204- mod->core_size, (void *)mod);
84205+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
84206 #ifdef CONFIG_MODULE_UNLOAD
84207 kdb_printf("%4ld ", module_refcount(mod));
84208 #endif
84209@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
84210 kdb_printf(" (Loading)");
84211 else
84212 kdb_printf(" (Live)");
84213- kdb_printf(" 0x%p", mod->module_core);
84214+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
84215
84216 #ifdef CONFIG_MODULE_UNLOAD
84217 {
84218diff --git a/kernel/events/core.c b/kernel/events/core.c
84219index f574401..11b21f0 100644
84220--- a/kernel/events/core.c
84221+++ b/kernel/events/core.c
84222@@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
84223 * 0 - disallow raw tracepoint access for unpriv
84224 * 1 - disallow cpu events for unpriv
84225 * 2 - disallow kernel profiling for unpriv
84226+ * 3 - disallow all unpriv perf event use
84227 */
84228-int sysctl_perf_event_paranoid __read_mostly = 1;
84229+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84230+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
84231+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
84232+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
84233+#else
84234+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
84235+#endif
84236
84237 /* Minimum for 512 kiB + 1 user control page */
84238 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
84239@@ -184,7 +191,7 @@ void update_perf_cpu_limits(void)
84240
84241 tmp *= sysctl_perf_cpu_time_max_percent;
84242 do_div(tmp, 100);
84243- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
84244+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
84245 }
84246
84247 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
84248@@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_len_ns)
84249 update_perf_cpu_limits();
84250 }
84251
84252-static atomic64_t perf_event_id;
84253+static atomic64_unchecked_t perf_event_id;
84254
84255 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
84256 enum event_type_t event_type);
84257@@ -2985,7 +2992,7 @@ static void __perf_event_read(void *info)
84258
84259 static inline u64 perf_event_count(struct perf_event *event)
84260 {
84261- return local64_read(&event->count) + atomic64_read(&event->child_count);
84262+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
84263 }
84264
84265 static u64 perf_event_read(struct perf_event *event)
84266@@ -3353,9 +3360,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
84267 mutex_lock(&event->child_mutex);
84268 total += perf_event_read(event);
84269 *enabled += event->total_time_enabled +
84270- atomic64_read(&event->child_total_time_enabled);
84271+ atomic64_read_unchecked(&event->child_total_time_enabled);
84272 *running += event->total_time_running +
84273- atomic64_read(&event->child_total_time_running);
84274+ atomic64_read_unchecked(&event->child_total_time_running);
84275
84276 list_for_each_entry(child, &event->child_list, child_list) {
84277 total += perf_event_read(child);
84278@@ -3770,10 +3777,10 @@ void perf_event_update_userpage(struct perf_event *event)
84279 userpg->offset -= local64_read(&event->hw.prev_count);
84280
84281 userpg->time_enabled = enabled +
84282- atomic64_read(&event->child_total_time_enabled);
84283+ atomic64_read_unchecked(&event->child_total_time_enabled);
84284
84285 userpg->time_running = running +
84286- atomic64_read(&event->child_total_time_running);
84287+ atomic64_read_unchecked(&event->child_total_time_running);
84288
84289 arch_perf_update_userpage(userpg, now);
84290
84291@@ -4324,7 +4331,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
84292
84293 /* Data. */
84294 sp = perf_user_stack_pointer(regs);
84295- rem = __output_copy_user(handle, (void *) sp, dump_size);
84296+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
84297 dyn_size = dump_size - rem;
84298
84299 perf_output_skip(handle, rem);
84300@@ -4415,11 +4422,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
84301 values[n++] = perf_event_count(event);
84302 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
84303 values[n++] = enabled +
84304- atomic64_read(&event->child_total_time_enabled);
84305+ atomic64_read_unchecked(&event->child_total_time_enabled);
84306 }
84307 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
84308 values[n++] = running +
84309- atomic64_read(&event->child_total_time_running);
84310+ atomic64_read_unchecked(&event->child_total_time_running);
84311 }
84312 if (read_format & PERF_FORMAT_ID)
84313 values[n++] = primary_event_id(event);
84314@@ -6686,7 +6693,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
84315 event->parent = parent_event;
84316
84317 event->ns = get_pid_ns(task_active_pid_ns(current));
84318- event->id = atomic64_inc_return(&perf_event_id);
84319+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
84320
84321 event->state = PERF_EVENT_STATE_INACTIVE;
84322
84323@@ -6985,6 +6992,11 @@ SYSCALL_DEFINE5(perf_event_open,
84324 if (flags & ~PERF_FLAG_ALL)
84325 return -EINVAL;
84326
84327+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84328+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
84329+ return -EACCES;
84330+#endif
84331+
84332 err = perf_copy_attr(attr_uptr, &attr);
84333 if (err)
84334 return err;
84335@@ -7316,10 +7328,10 @@ static void sync_child_event(struct perf_event *child_event,
84336 /*
84337 * Add back the child's count to the parent's count:
84338 */
84339- atomic64_add(child_val, &parent_event->child_count);
84340- atomic64_add(child_event->total_time_enabled,
84341+ atomic64_add_unchecked(child_val, &parent_event->child_count);
84342+ atomic64_add_unchecked(child_event->total_time_enabled,
84343 &parent_event->child_total_time_enabled);
84344- atomic64_add(child_event->total_time_running,
84345+ atomic64_add_unchecked(child_event->total_time_running,
84346 &parent_event->child_total_time_running);
84347
84348 /*
84349diff --git a/kernel/events/internal.h b/kernel/events/internal.h
84350index 569b2187..19940d9 100644
84351--- a/kernel/events/internal.h
84352+++ b/kernel/events/internal.h
84353@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
84354 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
84355 }
84356
84357-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
84358+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
84359 static inline unsigned long \
84360 func_name(struct perf_output_handle *handle, \
84361- const void *buf, unsigned long len) \
84362+ const void user *buf, unsigned long len) \
84363 { \
84364 unsigned long size, written; \
84365 \
84366@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
84367 return 0;
84368 }
84369
84370-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
84371+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
84372
84373 static inline unsigned long
84374 memcpy_skip(void *dst, const void *src, unsigned long n)
84375@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
84376 return 0;
84377 }
84378
84379-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
84380+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
84381
84382 #ifndef arch_perf_out_copy_user
84383 #define arch_perf_out_copy_user arch_perf_out_copy_user
84384@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
84385 }
84386 #endif
84387
84388-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
84389+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
84390
84391 /* Callchain handling */
84392 extern struct perf_callchain_entry *
84393diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
84394index 24b7d6c..40cf797 100644
84395--- a/kernel/events/uprobes.c
84396+++ b/kernel/events/uprobes.c
84397@@ -1640,7 +1640,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
84398 {
84399 struct page *page;
84400 uprobe_opcode_t opcode;
84401- int result;
84402+ long result;
84403
84404 pagefault_disable();
84405 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
84406diff --git a/kernel/exit.c b/kernel/exit.c
84407index a949819..a5f127d 100644
84408--- a/kernel/exit.c
84409+++ b/kernel/exit.c
84410@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
84411 struct task_struct *leader;
84412 int zap_leader;
84413 repeat:
84414+#ifdef CONFIG_NET
84415+ gr_del_task_from_ip_table(p);
84416+#endif
84417+
84418 /* don't need to get the RCU readlock here - the process is dead and
84419 * can't be modifying its own credentials. But shut RCU-lockdep up */
84420 rcu_read_lock();
84421@@ -329,7 +333,7 @@ int allow_signal(int sig)
84422 * know it'll be handled, so that they don't get converted to
84423 * SIGKILL or just silently dropped.
84424 */
84425- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
84426+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
84427 recalc_sigpending();
84428 spin_unlock_irq(&current->sighand->siglock);
84429 return 0;
84430@@ -698,6 +702,8 @@ void do_exit(long code)
84431 struct task_struct *tsk = current;
84432 int group_dead;
84433
84434+ set_fs(USER_DS);
84435+
84436 profile_task_exit(tsk);
84437
84438 WARN_ON(blk_needs_flush_plug(tsk));
84439@@ -714,7 +720,6 @@ void do_exit(long code)
84440 * mm_release()->clear_child_tid() from writing to a user-controlled
84441 * kernel address.
84442 */
84443- set_fs(USER_DS);
84444
84445 ptrace_event(PTRACE_EVENT_EXIT, code);
84446
84447@@ -773,6 +778,9 @@ void do_exit(long code)
84448 tsk->exit_code = code;
84449 taskstats_exit(tsk, group_dead);
84450
84451+ gr_acl_handle_psacct(tsk, code);
84452+ gr_acl_handle_exit();
84453+
84454 exit_mm(tsk);
84455
84456 if (group_dead)
84457@@ -894,7 +902,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
84458 * Take down every thread in the group. This is called by fatal signals
84459 * as well as by sys_exit_group (below).
84460 */
84461-void
84462+__noreturn void
84463 do_group_exit(int exit_code)
84464 {
84465 struct signal_struct *sig = current->signal;
84466diff --git a/kernel/fork.c b/kernel/fork.c
84467index dfa736c..d170f9b 100644
84468--- a/kernel/fork.c
84469+++ b/kernel/fork.c
84470@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
84471 *stackend = STACK_END_MAGIC; /* for overflow detection */
84472
84473 #ifdef CONFIG_CC_STACKPROTECTOR
84474- tsk->stack_canary = get_random_int();
84475+ tsk->stack_canary = pax_get_random_long();
84476 #endif
84477
84478 /*
84479@@ -345,12 +345,80 @@ free_tsk:
84480 }
84481
84482 #ifdef CONFIG_MMU
84483-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84484+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
84485+{
84486+ struct vm_area_struct *tmp;
84487+ unsigned long charge;
84488+ struct file *file;
84489+ int retval;
84490+
84491+ charge = 0;
84492+ if (mpnt->vm_flags & VM_ACCOUNT) {
84493+ unsigned long len = vma_pages(mpnt);
84494+
84495+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
84496+ goto fail_nomem;
84497+ charge = len;
84498+ }
84499+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
84500+ if (!tmp)
84501+ goto fail_nomem;
84502+ *tmp = *mpnt;
84503+ tmp->vm_mm = mm;
84504+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
84505+ retval = vma_dup_policy(mpnt, tmp);
84506+ if (retval)
84507+ goto fail_nomem_policy;
84508+ if (anon_vma_fork(tmp, mpnt))
84509+ goto fail_nomem_anon_vma_fork;
84510+ tmp->vm_flags &= ~VM_LOCKED;
84511+ tmp->vm_next = tmp->vm_prev = NULL;
84512+ tmp->vm_mirror = NULL;
84513+ file = tmp->vm_file;
84514+ if (file) {
84515+ struct inode *inode = file_inode(file);
84516+ struct address_space *mapping = file->f_mapping;
84517+
84518+ get_file(file);
84519+ if (tmp->vm_flags & VM_DENYWRITE)
84520+ atomic_dec(&inode->i_writecount);
84521+ mutex_lock(&mapping->i_mmap_mutex);
84522+ if (tmp->vm_flags & VM_SHARED)
84523+ mapping->i_mmap_writable++;
84524+ flush_dcache_mmap_lock(mapping);
84525+ /* insert tmp into the share list, just after mpnt */
84526+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
84527+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
84528+ else
84529+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
84530+ flush_dcache_mmap_unlock(mapping);
84531+ mutex_unlock(&mapping->i_mmap_mutex);
84532+ }
84533+
84534+ /*
84535+ * Clear hugetlb-related page reserves for children. This only
84536+ * affects MAP_PRIVATE mappings. Faults generated by the child
84537+ * are not guaranteed to succeed, even if read-only
84538+ */
84539+ if (is_vm_hugetlb_page(tmp))
84540+ reset_vma_resv_huge_pages(tmp);
84541+
84542+ return tmp;
84543+
84544+fail_nomem_anon_vma_fork:
84545+ mpol_put(vma_policy(tmp));
84546+fail_nomem_policy:
84547+ kmem_cache_free(vm_area_cachep, tmp);
84548+fail_nomem:
84549+ vm_unacct_memory(charge);
84550+ return NULL;
84551+}
84552+
84553+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84554 {
84555 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
84556 struct rb_node **rb_link, *rb_parent;
84557 int retval;
84558- unsigned long charge;
84559
84560 uprobe_start_dup_mmap();
84561 down_write(&oldmm->mmap_sem);
84562@@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84563
84564 prev = NULL;
84565 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
84566- struct file *file;
84567-
84568 if (mpnt->vm_flags & VM_DONTCOPY) {
84569 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
84570 -vma_pages(mpnt));
84571 continue;
84572 }
84573- charge = 0;
84574- if (mpnt->vm_flags & VM_ACCOUNT) {
84575- unsigned long len = vma_pages(mpnt);
84576-
84577- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
84578- goto fail_nomem;
84579- charge = len;
84580- }
84581- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
84582- if (!tmp)
84583- goto fail_nomem;
84584- *tmp = *mpnt;
84585- INIT_LIST_HEAD(&tmp->anon_vma_chain);
84586- retval = vma_dup_policy(mpnt, tmp);
84587- if (retval)
84588- goto fail_nomem_policy;
84589- tmp->vm_mm = mm;
84590- if (anon_vma_fork(tmp, mpnt))
84591- goto fail_nomem_anon_vma_fork;
84592- tmp->vm_flags &= ~VM_LOCKED;
84593- tmp->vm_next = tmp->vm_prev = NULL;
84594- file = tmp->vm_file;
84595- if (file) {
84596- struct inode *inode = file_inode(file);
84597- struct address_space *mapping = file->f_mapping;
84598-
84599- get_file(file);
84600- if (tmp->vm_flags & VM_DENYWRITE)
84601- atomic_dec(&inode->i_writecount);
84602- mutex_lock(&mapping->i_mmap_mutex);
84603- if (tmp->vm_flags & VM_SHARED)
84604- mapping->i_mmap_writable++;
84605- flush_dcache_mmap_lock(mapping);
84606- /* insert tmp into the share list, just after mpnt */
84607- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
84608- vma_nonlinear_insert(tmp,
84609- &mapping->i_mmap_nonlinear);
84610- else
84611- vma_interval_tree_insert_after(tmp, mpnt,
84612- &mapping->i_mmap);
84613- flush_dcache_mmap_unlock(mapping);
84614- mutex_unlock(&mapping->i_mmap_mutex);
84615+ tmp = dup_vma(mm, oldmm, mpnt);
84616+ if (!tmp) {
84617+ retval = -ENOMEM;
84618+ goto out;
84619 }
84620
84621 /*
84622@@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84623 if (retval)
84624 goto out;
84625 }
84626+
84627+#ifdef CONFIG_PAX_SEGMEXEC
84628+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
84629+ struct vm_area_struct *mpnt_m;
84630+
84631+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
84632+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
84633+
84634+ if (!mpnt->vm_mirror)
84635+ continue;
84636+
84637+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
84638+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
84639+ mpnt->vm_mirror = mpnt_m;
84640+ } else {
84641+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
84642+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
84643+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
84644+ mpnt->vm_mirror->vm_mirror = mpnt;
84645+ }
84646+ }
84647+ BUG_ON(mpnt_m);
84648+ }
84649+#endif
84650+
84651 /* a new mm has just been created */
84652 arch_dup_mmap(oldmm, mm);
84653 retval = 0;
84654@@ -468,14 +521,6 @@ out:
84655 up_write(&oldmm->mmap_sem);
84656 uprobe_end_dup_mmap();
84657 return retval;
84658-fail_nomem_anon_vma_fork:
84659- mpol_put(vma_policy(tmp));
84660-fail_nomem_policy:
84661- kmem_cache_free(vm_area_cachep, tmp);
84662-fail_nomem:
84663- retval = -ENOMEM;
84664- vm_unacct_memory(charge);
84665- goto out;
84666 }
84667
84668 static inline int mm_alloc_pgd(struct mm_struct *mm)
84669@@ -689,8 +734,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
84670 return ERR_PTR(err);
84671
84672 mm = get_task_mm(task);
84673- if (mm && mm != current->mm &&
84674- !ptrace_may_access(task, mode)) {
84675+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
84676+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
84677 mmput(mm);
84678 mm = ERR_PTR(-EACCES);
84679 }
84680@@ -909,13 +954,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
84681 spin_unlock(&fs->lock);
84682 return -EAGAIN;
84683 }
84684- fs->users++;
84685+ atomic_inc(&fs->users);
84686 spin_unlock(&fs->lock);
84687 return 0;
84688 }
84689 tsk->fs = copy_fs_struct(fs);
84690 if (!tsk->fs)
84691 return -ENOMEM;
84692+ /* Carry through gr_chroot_dentry and is_chrooted instead
84693+ of recomputing it here. Already copied when the task struct
84694+ is duplicated. This allows pivot_root to not be treated as
84695+ a chroot
84696+ */
84697+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
84698+
84699 return 0;
84700 }
84701
84702@@ -1126,7 +1178,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
84703 * parts of the process environment (as per the clone
84704 * flags). The actual kick-off is left to the caller.
84705 */
84706-static struct task_struct *copy_process(unsigned long clone_flags,
84707+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
84708 unsigned long stack_start,
84709 unsigned long stack_size,
84710 int __user *child_tidptr,
84711@@ -1198,6 +1250,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
84712 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
84713 #endif
84714 retval = -EAGAIN;
84715+
84716+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
84717+
84718 if (atomic_read(&p->real_cred->user->processes) >=
84719 task_rlimit(p, RLIMIT_NPROC)) {
84720 if (p->real_cred->user != INIT_USER &&
84721@@ -1446,6 +1501,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
84722 goto bad_fork_free_pid;
84723 }
84724
84725+ /* synchronizes with gr_set_acls()
84726+ we need to call this past the point of no return for fork()
84727+ */
84728+ gr_copy_label(p);
84729+
84730 if (likely(p->pid)) {
84731 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
84732
84733@@ -1532,6 +1592,8 @@ bad_fork_cleanup_count:
84734 bad_fork_free:
84735 free_task(p);
84736 fork_out:
84737+ gr_log_forkfail(retval);
84738+
84739 return ERR_PTR(retval);
84740 }
84741
84742@@ -1593,6 +1655,7 @@ long do_fork(unsigned long clone_flags,
84743
84744 p = copy_process(clone_flags, stack_start, stack_size,
84745 child_tidptr, NULL, trace);
84746+ add_latent_entropy();
84747 /*
84748 * Do this prior waking up the new thread - the thread pointer
84749 * might get invalid after that point, if the thread exits quickly.
84750@@ -1607,6 +1670,8 @@ long do_fork(unsigned long clone_flags,
84751 if (clone_flags & CLONE_PARENT_SETTID)
84752 put_user(nr, parent_tidptr);
84753
84754+ gr_handle_brute_check();
84755+
84756 if (clone_flags & CLONE_VFORK) {
84757 p->vfork_done = &vfork;
84758 init_completion(&vfork);
84759@@ -1723,7 +1788,7 @@ void __init proc_caches_init(void)
84760 mm_cachep = kmem_cache_create("mm_struct",
84761 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
84762 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
84763- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
84764+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
84765 mmap_init();
84766 nsproxy_cache_init();
84767 }
84768@@ -1763,7 +1828,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
84769 return 0;
84770
84771 /* don't need lock here; in the worst case we'll do useless copy */
84772- if (fs->users == 1)
84773+ if (atomic_read(&fs->users) == 1)
84774 return 0;
84775
84776 *new_fsp = copy_fs_struct(fs);
84777@@ -1870,7 +1935,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
84778 fs = current->fs;
84779 spin_lock(&fs->lock);
84780 current->fs = new_fs;
84781- if (--fs->users)
84782+ gr_set_chroot_entries(current, &current->fs->root);
84783+ if (atomic_dec_return(&fs->users))
84784 new_fs = NULL;
84785 else
84786 new_fs = fs;
84787diff --git a/kernel/futex.c b/kernel/futex.c
84788index f6ff019..ac53307 100644
84789--- a/kernel/futex.c
84790+++ b/kernel/futex.c
84791@@ -54,6 +54,7 @@
84792 #include <linux/mount.h>
84793 #include <linux/pagemap.h>
84794 #include <linux/syscalls.h>
84795+#include <linux/ptrace.h>
84796 #include <linux/signal.h>
84797 #include <linux/export.h>
84798 #include <linux/magic.h>
84799@@ -243,6 +244,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
84800 struct page *page, *page_head;
84801 int err, ro = 0;
84802
84803+#ifdef CONFIG_PAX_SEGMEXEC
84804+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
84805+ return -EFAULT;
84806+#endif
84807+
84808 /*
84809 * The futex address must be "naturally" aligned.
84810 */
84811@@ -442,7 +448,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
84812
84813 static int get_futex_value_locked(u32 *dest, u32 __user *from)
84814 {
84815- int ret;
84816+ unsigned long ret;
84817
84818 pagefault_disable();
84819 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
84820@@ -2735,6 +2741,7 @@ static int __init futex_init(void)
84821 {
84822 u32 curval;
84823 int i;
84824+ mm_segment_t oldfs;
84825
84826 /*
84827 * This will fail and we want it. Some arch implementations do
84828@@ -2746,8 +2753,11 @@ static int __init futex_init(void)
84829 * implementation, the non-functional ones will return
84830 * -ENOSYS.
84831 */
84832+ oldfs = get_fs();
84833+ set_fs(USER_DS);
84834 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
84835 futex_cmpxchg_enabled = 1;
84836+ set_fs(oldfs);
84837
84838 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
84839 plist_head_init(&futex_queues[i].chain);
84840diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
84841index f9f44fd..29885e4 100644
84842--- a/kernel/futex_compat.c
84843+++ b/kernel/futex_compat.c
84844@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
84845 return 0;
84846 }
84847
84848-static void __user *futex_uaddr(struct robust_list __user *entry,
84849+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
84850 compat_long_t futex_offset)
84851 {
84852 compat_uptr_t base = ptr_to_compat(entry);
84853diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
84854index f45b75b..bfac6d5 100644
84855--- a/kernel/gcov/base.c
84856+++ b/kernel/gcov/base.c
84857@@ -108,11 +108,6 @@ void gcov_enable_events(void)
84858 }
84859
84860 #ifdef CONFIG_MODULES
84861-static inline int within(void *addr, void *start, unsigned long size)
84862-{
84863- return ((addr >= start) && (addr < start + size));
84864-}
84865-
84866 /* Update list and generate events when modules are unloaded. */
84867 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
84868 void *data)
84869@@ -127,7 +122,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
84870
84871 /* Remove entries located in module from linked list. */
84872 while ((info = gcov_info_next(info))) {
84873- if (within(info, mod->module_core, mod->core_size)) {
84874+ if (within_module_core_rw((unsigned long)info, mod)) {
84875 gcov_info_unlink(prev, info);
84876 if (gcov_events_enabled)
84877 gcov_event(GCOV_REMOVE, info);
84878diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
84879index 383319b..56ebb13 100644
84880--- a/kernel/hrtimer.c
84881+++ b/kernel/hrtimer.c
84882@@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
84883 local_irq_restore(flags);
84884 }
84885
84886-static void run_hrtimer_softirq(struct softirq_action *h)
84887+static __latent_entropy void run_hrtimer_softirq(void)
84888 {
84889 hrtimer_peek_ahead_timers();
84890 }
84891diff --git a/kernel/irq_work.c b/kernel/irq_work.c
84892index 55fcce6..0e4cf34 100644
84893--- a/kernel/irq_work.c
84894+++ b/kernel/irq_work.c
84895@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
84896 return NOTIFY_OK;
84897 }
84898
84899-static struct notifier_block cpu_notify;
84900+static struct notifier_block cpu_notify = {
84901+ .notifier_call = irq_work_cpu_notify,
84902+ .priority = 0,
84903+};
84904
84905 static __init int irq_work_init_cpu_notifier(void)
84906 {
84907- cpu_notify.notifier_call = irq_work_cpu_notify;
84908- cpu_notify.priority = 0;
84909 register_cpu_notifier(&cpu_notify);
84910 return 0;
84911 }
84912diff --git a/kernel/jump_label.c b/kernel/jump_label.c
84913index 9019f15..9a3c42e 100644
84914--- a/kernel/jump_label.c
84915+++ b/kernel/jump_label.c
84916@@ -14,6 +14,7 @@
84917 #include <linux/err.h>
84918 #include <linux/static_key.h>
84919 #include <linux/jump_label_ratelimit.h>
84920+#include <linux/mm.h>
84921
84922 #ifdef HAVE_JUMP_LABEL
84923
84924@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
84925
84926 size = (((unsigned long)stop - (unsigned long)start)
84927 / sizeof(struct jump_entry));
84928+ pax_open_kernel();
84929 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
84930+ pax_close_kernel();
84931 }
84932
84933 static void jump_label_update(struct static_key *key, int enable);
84934@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
84935 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
84936 struct jump_entry *iter;
84937
84938+ pax_open_kernel();
84939 for (iter = iter_start; iter < iter_stop; iter++) {
84940 if (within_module_init(iter->code, mod))
84941 iter->code = 0;
84942 }
84943+ pax_close_kernel();
84944 }
84945
84946 static int
84947diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
84948index 3127ad5..159d880 100644
84949--- a/kernel/kallsyms.c
84950+++ b/kernel/kallsyms.c
84951@@ -11,6 +11,9 @@
84952 * Changed the compression method from stem compression to "table lookup"
84953 * compression (see scripts/kallsyms.c for a more complete description)
84954 */
84955+#ifdef CONFIG_GRKERNSEC_HIDESYM
84956+#define __INCLUDED_BY_HIDESYM 1
84957+#endif
84958 #include <linux/kallsyms.h>
84959 #include <linux/module.h>
84960 #include <linux/init.h>
84961@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
84962
84963 static inline int is_kernel_inittext(unsigned long addr)
84964 {
84965+ if (system_state != SYSTEM_BOOTING)
84966+ return 0;
84967+
84968 if (addr >= (unsigned long)_sinittext
84969 && addr <= (unsigned long)_einittext)
84970 return 1;
84971 return 0;
84972 }
84973
84974+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84975+#ifdef CONFIG_MODULES
84976+static inline int is_module_text(unsigned long addr)
84977+{
84978+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
84979+ return 1;
84980+
84981+ addr = ktla_ktva(addr);
84982+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
84983+}
84984+#else
84985+static inline int is_module_text(unsigned long addr)
84986+{
84987+ return 0;
84988+}
84989+#endif
84990+#endif
84991+
84992 static inline int is_kernel_text(unsigned long addr)
84993 {
84994 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
84995@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
84996
84997 static inline int is_kernel(unsigned long addr)
84998 {
84999+
85000+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85001+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
85002+ return 1;
85003+
85004+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
85005+#else
85006 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
85007+#endif
85008+
85009 return 1;
85010 return in_gate_area_no_mm(addr);
85011 }
85012
85013 static int is_ksym_addr(unsigned long addr)
85014 {
85015+
85016+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85017+ if (is_module_text(addr))
85018+ return 0;
85019+#endif
85020+
85021 if (all_var)
85022 return is_kernel(addr);
85023
85024@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
85025
85026 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
85027 {
85028- iter->name[0] = '\0';
85029 iter->nameoff = get_symbol_offset(new_pos);
85030 iter->pos = new_pos;
85031 }
85032@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
85033 {
85034 struct kallsym_iter *iter = m->private;
85035
85036+#ifdef CONFIG_GRKERNSEC_HIDESYM
85037+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
85038+ return 0;
85039+#endif
85040+
85041 /* Some debugging symbols have no name. Ignore them. */
85042 if (!iter->name[0])
85043 return 0;
85044@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
85045 */
85046 type = iter->exported ? toupper(iter->type) :
85047 tolower(iter->type);
85048+
85049 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
85050 type, iter->name, iter->module_name);
85051 } else
85052@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
85053 struct kallsym_iter *iter;
85054 int ret;
85055
85056- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
85057+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
85058 if (!iter)
85059 return -ENOMEM;
85060 reset_iter(iter, 0);
85061diff --git a/kernel/kcmp.c b/kernel/kcmp.c
85062index e30ac0f..3528cac 100644
85063--- a/kernel/kcmp.c
85064+++ b/kernel/kcmp.c
85065@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
85066 struct task_struct *task1, *task2;
85067 int ret;
85068
85069+#ifdef CONFIG_GRKERNSEC
85070+ return -ENOSYS;
85071+#endif
85072+
85073 rcu_read_lock();
85074
85075 /*
85076diff --git a/kernel/kexec.c b/kernel/kexec.c
85077index 9c97016..df438f8 100644
85078--- a/kernel/kexec.c
85079+++ b/kernel/kexec.c
85080@@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
85081 unsigned long flags)
85082 {
85083 struct compat_kexec_segment in;
85084- struct kexec_segment out, __user *ksegments;
85085+ struct kexec_segment out;
85086+ struct kexec_segment __user *ksegments;
85087 unsigned long i, result;
85088
85089 /* Don't allow clients that don't understand the native
85090diff --git a/kernel/kmod.c b/kernel/kmod.c
85091index b086006..655e2aa 100644
85092--- a/kernel/kmod.c
85093+++ b/kernel/kmod.c
85094@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
85095 kfree(info->argv);
85096 }
85097
85098-static int call_modprobe(char *module_name, int wait)
85099+static int call_modprobe(char *module_name, char *module_param, int wait)
85100 {
85101 struct subprocess_info *info;
85102 static char *envp[] = {
85103@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
85104 NULL
85105 };
85106
85107- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
85108+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
85109 if (!argv)
85110 goto out;
85111
85112@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
85113 argv[1] = "-q";
85114 argv[2] = "--";
85115 argv[3] = module_name; /* check free_modprobe_argv() */
85116- argv[4] = NULL;
85117+ argv[4] = module_param;
85118+ argv[5] = NULL;
85119
85120 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
85121 NULL, free_modprobe_argv, NULL);
85122@@ -129,9 +130,8 @@ out:
85123 * If module auto-loading support is disabled then this function
85124 * becomes a no-operation.
85125 */
85126-int __request_module(bool wait, const char *fmt, ...)
85127+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
85128 {
85129- va_list args;
85130 char module_name[MODULE_NAME_LEN];
85131 unsigned int max_modprobes;
85132 int ret;
85133@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
85134 if (!modprobe_path[0])
85135 return 0;
85136
85137- va_start(args, fmt);
85138- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
85139- va_end(args);
85140+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
85141 if (ret >= MODULE_NAME_LEN)
85142 return -ENAMETOOLONG;
85143
85144@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
85145 if (ret)
85146 return ret;
85147
85148+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85149+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
85150+ /* hack to workaround consolekit/udisks stupidity */
85151+ read_lock(&tasklist_lock);
85152+ if (!strcmp(current->comm, "mount") &&
85153+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
85154+ read_unlock(&tasklist_lock);
85155+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
85156+ return -EPERM;
85157+ }
85158+ read_unlock(&tasklist_lock);
85159+ }
85160+#endif
85161+
85162 /* If modprobe needs a service that is in a module, we get a recursive
85163 * loop. Limit the number of running kmod threads to max_threads/2 or
85164 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
85165@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
85166
85167 trace_module_request(module_name, wait, _RET_IP_);
85168
85169- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
85170+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
85171
85172 atomic_dec(&kmod_concurrent);
85173 return ret;
85174 }
85175+
85176+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
85177+{
85178+ va_list args;
85179+ int ret;
85180+
85181+ va_start(args, fmt);
85182+ ret = ____request_module(wait, module_param, fmt, args);
85183+ va_end(args);
85184+
85185+ return ret;
85186+}
85187+
85188+int __request_module(bool wait, const char *fmt, ...)
85189+{
85190+ va_list args;
85191+ int ret;
85192+
85193+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85194+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
85195+ char module_param[MODULE_NAME_LEN];
85196+
85197+ memset(module_param, 0, sizeof(module_param));
85198+
85199+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
85200+
85201+ va_start(args, fmt);
85202+ ret = ____request_module(wait, module_param, fmt, args);
85203+ va_end(args);
85204+
85205+ return ret;
85206+ }
85207+#endif
85208+
85209+ va_start(args, fmt);
85210+ ret = ____request_module(wait, NULL, fmt, args);
85211+ va_end(args);
85212+
85213+ return ret;
85214+}
85215+
85216 EXPORT_SYMBOL(__request_module);
85217 #endif /* CONFIG_MODULES */
85218
85219@@ -218,6 +271,19 @@ static int ____call_usermodehelper(void *data)
85220 */
85221 set_user_nice(current, 0);
85222
85223+#ifdef CONFIG_GRKERNSEC
85224+ /* this is race-free as far as userland is concerned as we copied
85225+ out the path to be used prior to this point and are now operating
85226+ on that copy
85227+ */
85228+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
85229+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7)) || strstr(sub_info->path, "..")) {
85230+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
85231+ retval = -EPERM;
85232+ goto fail;
85233+ }
85234+#endif
85235+
85236 retval = -ENOMEM;
85237 new = prepare_kernel_cred(current);
85238 if (!new)
85239@@ -240,8 +306,8 @@ static int ____call_usermodehelper(void *data)
85240 commit_creds(new);
85241
85242 retval = do_execve(sub_info->path,
85243- (const char __user *const __user *)sub_info->argv,
85244- (const char __user *const __user *)sub_info->envp);
85245+ (const char __user *const __force_user *)sub_info->argv,
85246+ (const char __user *const __force_user *)sub_info->envp);
85247 if (!retval)
85248 return 0;
85249
85250@@ -260,6 +326,10 @@ static int call_helper(void *data)
85251
85252 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
85253 {
85254+#ifdef CONFIG_GRKERNSEC
85255+ kfree(info->path);
85256+ info->path = info->origpath;
85257+#endif
85258 if (info->cleanup)
85259 (*info->cleanup)(info);
85260 kfree(info);
85261@@ -303,7 +373,7 @@ static int wait_for_helper(void *data)
85262 *
85263 * Thus the __user pointer cast is valid here.
85264 */
85265- sys_wait4(pid, (int __user *)&ret, 0, NULL);
85266+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
85267
85268 /*
85269 * If ret is 0, either ____call_usermodehelper failed and the
85270@@ -542,7 +612,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
85271 goto out;
85272
85273 INIT_WORK(&sub_info->work, __call_usermodehelper);
85274+#ifdef CONFIG_GRKERNSEC
85275+ sub_info->origpath = path;
85276+ sub_info->path = kstrdup(path, gfp_mask);
85277+#else
85278 sub_info->path = path;
85279+#endif
85280 sub_info->argv = argv;
85281 sub_info->envp = envp;
85282
85283@@ -650,7 +725,7 @@ EXPORT_SYMBOL(call_usermodehelper);
85284 static int proc_cap_handler(struct ctl_table *table, int write,
85285 void __user *buffer, size_t *lenp, loff_t *ppos)
85286 {
85287- struct ctl_table t;
85288+ ctl_table_no_const t;
85289 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
85290 kernel_cap_t new_cap;
85291 int err, i;
85292diff --git a/kernel/kprobes.c b/kernel/kprobes.c
85293index ceeadfc..11c18b6 100644
85294--- a/kernel/kprobes.c
85295+++ b/kernel/kprobes.c
85296@@ -31,6 +31,9 @@
85297 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
85298 * <prasanna@in.ibm.com> added function-return probes.
85299 */
85300+#ifdef CONFIG_GRKERNSEC_HIDESYM
85301+#define __INCLUDED_BY_HIDESYM 1
85302+#endif
85303 #include <linux/kprobes.h>
85304 #include <linux/hash.h>
85305 #include <linux/init.h>
85306@@ -135,12 +138,12 @@ enum kprobe_slot_state {
85307
85308 static void *alloc_insn_page(void)
85309 {
85310- return module_alloc(PAGE_SIZE);
85311+ return module_alloc_exec(PAGE_SIZE);
85312 }
85313
85314 static void free_insn_page(void *page)
85315 {
85316- module_free(NULL, page);
85317+ module_free_exec(NULL, page);
85318 }
85319
85320 struct kprobe_insn_cache kprobe_insn_slots = {
85321@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
85322 kprobe_type = "k";
85323
85324 if (sym)
85325- seq_printf(pi, "%p %s %s+0x%x %s ",
85326+ seq_printf(pi, "%pK %s %s+0x%x %s ",
85327 p->addr, kprobe_type, sym, offset,
85328 (modname ? modname : " "));
85329 else
85330- seq_printf(pi, "%p %s %p ",
85331+ seq_printf(pi, "%pK %s %pK ",
85332 p->addr, kprobe_type, p->addr);
85333
85334 if (!pp)
85335diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
85336index 9659d38..bffd520 100644
85337--- a/kernel/ksysfs.c
85338+++ b/kernel/ksysfs.c
85339@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
85340 {
85341 if (count+1 > UEVENT_HELPER_PATH_LEN)
85342 return -ENOENT;
85343+ if (!capable(CAP_SYS_ADMIN))
85344+ return -EPERM;
85345 memcpy(uevent_helper, buf, count);
85346 uevent_helper[count] = '\0';
85347 if (count && uevent_helper[count-1] == '\n')
85348@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
85349 return count;
85350 }
85351
85352-static struct bin_attribute notes_attr = {
85353+static bin_attribute_no_const notes_attr __read_only = {
85354 .attr = {
85355 .name = "notes",
85356 .mode = S_IRUGO,
85357diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
85358index 576ba75..7c256e4 100644
85359--- a/kernel/locking/lockdep.c
85360+++ b/kernel/locking/lockdep.c
85361@@ -596,6 +596,10 @@ static int static_obj(void *obj)
85362 end = (unsigned long) &_end,
85363 addr = (unsigned long) obj;
85364
85365+#ifdef CONFIG_PAX_KERNEXEC
85366+ start = ktla_ktva(start);
85367+#endif
85368+
85369 /*
85370 * static variable?
85371 */
85372@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
85373 if (!static_obj(lock->key)) {
85374 debug_locks_off();
85375 printk("INFO: trying to register non-static key.\n");
85376+ printk("lock:%pS key:%pS.\n", lock, lock->key);
85377 printk("the code is fine but needs lockdep annotation.\n");
85378 printk("turning off the locking correctness validator.\n");
85379 dump_stack();
85380@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
85381 if (!class)
85382 return 0;
85383 }
85384- atomic_inc((atomic_t *)&class->ops);
85385+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
85386 if (very_verbose(class)) {
85387 printk("\nacquire class [%p] %s", class->key, class->name);
85388 if (class->name_version > 1)
85389diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
85390index ef43ac4..2720dfa 100644
85391--- a/kernel/locking/lockdep_proc.c
85392+++ b/kernel/locking/lockdep_proc.c
85393@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
85394 return 0;
85395 }
85396
85397- seq_printf(m, "%p", class->key);
85398+ seq_printf(m, "%pK", class->key);
85399 #ifdef CONFIG_DEBUG_LOCKDEP
85400 seq_printf(m, " OPS:%8ld", class->ops);
85401 #endif
85402@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
85403
85404 list_for_each_entry(entry, &class->locks_after, entry) {
85405 if (entry->distance == 1) {
85406- seq_printf(m, " -> [%p] ", entry->class->key);
85407+ seq_printf(m, " -> [%pK] ", entry->class->key);
85408 print_name(m, entry->class);
85409 seq_puts(m, "\n");
85410 }
85411@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
85412 if (!class->key)
85413 continue;
85414
85415- seq_printf(m, "[%p] ", class->key);
85416+ seq_printf(m, "[%pK] ", class->key);
85417 print_name(m, class);
85418 seq_puts(m, "\n");
85419 }
85420@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
85421 if (!i)
85422 seq_line(m, '-', 40-namelen, namelen);
85423
85424- snprintf(ip, sizeof(ip), "[<%p>]",
85425+ snprintf(ip, sizeof(ip), "[<%pK>]",
85426 (void *)class->contention_point[i]);
85427 seq_printf(m, "%40s %14lu %29s %pS\n",
85428 name, stats->contention_point[i],
85429@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
85430 if (!i)
85431 seq_line(m, '-', 40-namelen, namelen);
85432
85433- snprintf(ip, sizeof(ip), "[<%p>]",
85434+ snprintf(ip, sizeof(ip), "[<%pK>]",
85435 (void *)class->contending_point[i]);
85436 seq_printf(m, "%40s %14lu %29s %pS\n",
85437 name, stats->contending_point[i],
85438diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
85439index 7e3443f..b2a1e6b 100644
85440--- a/kernel/locking/mutex-debug.c
85441+++ b/kernel/locking/mutex-debug.c
85442@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
85443 }
85444
85445 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
85446- struct thread_info *ti)
85447+ struct task_struct *task)
85448 {
85449 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
85450
85451 /* Mark the current thread as blocked on the lock: */
85452- ti->task->blocked_on = waiter;
85453+ task->blocked_on = waiter;
85454 }
85455
85456 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
85457- struct thread_info *ti)
85458+ struct task_struct *task)
85459 {
85460 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
85461- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
85462- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
85463- ti->task->blocked_on = NULL;
85464+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
85465+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
85466+ task->blocked_on = NULL;
85467
85468 list_del_init(&waiter->list);
85469 waiter->task = NULL;
85470diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
85471index 0799fd3..d06ae3b 100644
85472--- a/kernel/locking/mutex-debug.h
85473+++ b/kernel/locking/mutex-debug.h
85474@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
85475 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
85476 extern void debug_mutex_add_waiter(struct mutex *lock,
85477 struct mutex_waiter *waiter,
85478- struct thread_info *ti);
85479+ struct task_struct *task);
85480 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
85481- struct thread_info *ti);
85482+ struct task_struct *task);
85483 extern void debug_mutex_unlock(struct mutex *lock);
85484 extern void debug_mutex_init(struct mutex *lock, const char *name,
85485 struct lock_class_key *key);
85486diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
85487index 4dd6e4c..df52693 100644
85488--- a/kernel/locking/mutex.c
85489+++ b/kernel/locking/mutex.c
85490@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
85491 node->locked = 1;
85492 return;
85493 }
85494- ACCESS_ONCE(prev->next) = node;
85495+ ACCESS_ONCE_RW(prev->next) = node;
85496 smp_wmb();
85497 /* Wait until the lock holder passes the lock down */
85498 while (!ACCESS_ONCE(node->locked))
85499@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
85500 while (!(next = ACCESS_ONCE(node->next)))
85501 arch_mutex_cpu_relax();
85502 }
85503- ACCESS_ONCE(next->locked) = 1;
85504+ ACCESS_ONCE_RW(next->locked) = 1;
85505 smp_wmb();
85506 }
85507
85508@@ -520,7 +520,7 @@ slowpath:
85509 goto skip_wait;
85510
85511 debug_mutex_lock_common(lock, &waiter);
85512- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
85513+ debug_mutex_add_waiter(lock, &waiter, task);
85514
85515 /* add waiting tasks to the end of the waitqueue (FIFO): */
85516 list_add_tail(&waiter.list, &lock->wait_list);
85517@@ -564,7 +564,7 @@ slowpath:
85518 schedule_preempt_disabled();
85519 spin_lock_mutex(&lock->wait_lock, flags);
85520 }
85521- mutex_remove_waiter(lock, &waiter, current_thread_info());
85522+ mutex_remove_waiter(lock, &waiter, task);
85523 /* set it to 0 if there are no waiters left: */
85524 if (likely(list_empty(&lock->wait_list)))
85525 atomic_set(&lock->count, 0);
85526@@ -601,7 +601,7 @@ skip_wait:
85527 return 0;
85528
85529 err:
85530- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
85531+ mutex_remove_waiter(lock, &waiter, task);
85532 spin_unlock_mutex(&lock->wait_lock, flags);
85533 debug_mutex_free_waiter(&waiter);
85534 mutex_release(&lock->dep_map, 1, ip);
85535diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
85536index 1d96dd0..994ff19 100644
85537--- a/kernel/locking/rtmutex-tester.c
85538+++ b/kernel/locking/rtmutex-tester.c
85539@@ -22,7 +22,7 @@
85540 #define MAX_RT_TEST_MUTEXES 8
85541
85542 static spinlock_t rttest_lock;
85543-static atomic_t rttest_event;
85544+static atomic_unchecked_t rttest_event;
85545
85546 struct test_thread_data {
85547 int opcode;
85548@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85549
85550 case RTTEST_LOCKCONT:
85551 td->mutexes[td->opdata] = 1;
85552- td->event = atomic_add_return(1, &rttest_event);
85553+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85554 return 0;
85555
85556 case RTTEST_RESET:
85557@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85558 return 0;
85559
85560 case RTTEST_RESETEVENT:
85561- atomic_set(&rttest_event, 0);
85562+ atomic_set_unchecked(&rttest_event, 0);
85563 return 0;
85564
85565 default:
85566@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85567 return ret;
85568
85569 td->mutexes[id] = 1;
85570- td->event = atomic_add_return(1, &rttest_event);
85571+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85572 rt_mutex_lock(&mutexes[id]);
85573- td->event = atomic_add_return(1, &rttest_event);
85574+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85575 td->mutexes[id] = 4;
85576 return 0;
85577
85578@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85579 return ret;
85580
85581 td->mutexes[id] = 1;
85582- td->event = atomic_add_return(1, &rttest_event);
85583+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85584 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
85585- td->event = atomic_add_return(1, &rttest_event);
85586+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85587 td->mutexes[id] = ret ? 0 : 4;
85588 return ret ? -EINTR : 0;
85589
85590@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85591 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
85592 return ret;
85593
85594- td->event = atomic_add_return(1, &rttest_event);
85595+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85596 rt_mutex_unlock(&mutexes[id]);
85597- td->event = atomic_add_return(1, &rttest_event);
85598+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85599 td->mutexes[id] = 0;
85600 return 0;
85601
85602@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85603 break;
85604
85605 td->mutexes[dat] = 2;
85606- td->event = atomic_add_return(1, &rttest_event);
85607+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85608 break;
85609
85610 default:
85611@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85612 return;
85613
85614 td->mutexes[dat] = 3;
85615- td->event = atomic_add_return(1, &rttest_event);
85616+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85617 break;
85618
85619 case RTTEST_LOCKNOWAIT:
85620@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85621 return;
85622
85623 td->mutexes[dat] = 1;
85624- td->event = atomic_add_return(1, &rttest_event);
85625+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85626 return;
85627
85628 default:
85629diff --git a/kernel/module.c b/kernel/module.c
85630index f5a3b1e..97ebb15 100644
85631--- a/kernel/module.c
85632+++ b/kernel/module.c
85633@@ -61,6 +61,7 @@
85634 #include <linux/pfn.h>
85635 #include <linux/bsearch.h>
85636 #include <linux/fips.h>
85637+#include <linux/grsecurity.h>
85638 #include <uapi/linux/module.h>
85639 #include "module-internal.h"
85640
85641@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
85642
85643 /* Bounds of module allocation, for speeding __module_address.
85644 * Protected by module_mutex. */
85645-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
85646+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
85647+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
85648
85649 int register_module_notifier(struct notifier_block * nb)
85650 {
85651@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
85652 return true;
85653
85654 list_for_each_entry_rcu(mod, &modules, list) {
85655- struct symsearch arr[] = {
85656+ struct symsearch modarr[] = {
85657 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
85658 NOT_GPL_ONLY, false },
85659 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
85660@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
85661 if (mod->state == MODULE_STATE_UNFORMED)
85662 continue;
85663
85664- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
85665+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
85666 return true;
85667 }
85668 return false;
85669@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
85670 if (!pcpusec->sh_size)
85671 return 0;
85672
85673- if (align > PAGE_SIZE) {
85674+ if (align-1 >= PAGE_SIZE) {
85675 pr_warn("%s: per-cpu alignment %li > %li\n",
85676 mod->name, align, PAGE_SIZE);
85677 align = PAGE_SIZE;
85678@@ -1064,7 +1066,7 @@ struct module_attribute module_uevent =
85679 static ssize_t show_coresize(struct module_attribute *mattr,
85680 struct module_kobject *mk, char *buffer)
85681 {
85682- return sprintf(buffer, "%u\n", mk->mod->core_size);
85683+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
85684 }
85685
85686 static struct module_attribute modinfo_coresize =
85687@@ -1073,7 +1075,7 @@ static struct module_attribute modinfo_coresize =
85688 static ssize_t show_initsize(struct module_attribute *mattr,
85689 struct module_kobject *mk, char *buffer)
85690 {
85691- return sprintf(buffer, "%u\n", mk->mod->init_size);
85692+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
85693 }
85694
85695 static struct module_attribute modinfo_initsize =
85696@@ -1165,12 +1167,29 @@ static int check_version(Elf_Shdr *sechdrs,
85697 goto bad_version;
85698 }
85699
85700+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
85701+ /*
85702+ * avoid potentially printing jibberish on attempted load
85703+ * of a module randomized with a different seed
85704+ */
85705+ pr_warn("no symbol version for %s\n", symname);
85706+#else
85707 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
85708+#endif
85709 return 0;
85710
85711 bad_version:
85712+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
85713+ /*
85714+ * avoid potentially printing jibberish on attempted load
85715+ * of a module randomized with a different seed
85716+ */
85717+ printk("attempted module disagrees about version of symbol %s\n",
85718+ symname);
85719+#else
85720 printk("%s: disagrees about version of symbol %s\n",
85721 mod->name, symname);
85722+#endif
85723 return 0;
85724 }
85725
85726@@ -1286,7 +1305,7 @@ resolve_symbol_wait(struct module *mod,
85727 */
85728 #ifdef CONFIG_SYSFS
85729
85730-#ifdef CONFIG_KALLSYMS
85731+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
85732 static inline bool sect_empty(const Elf_Shdr *sect)
85733 {
85734 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
85735@@ -1426,7 +1445,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
85736 {
85737 unsigned int notes, loaded, i;
85738 struct module_notes_attrs *notes_attrs;
85739- struct bin_attribute *nattr;
85740+ bin_attribute_no_const *nattr;
85741
85742 /* failed to create section attributes, so can't create notes */
85743 if (!mod->sect_attrs)
85744@@ -1538,7 +1557,7 @@ static void del_usage_links(struct module *mod)
85745 static int module_add_modinfo_attrs(struct module *mod)
85746 {
85747 struct module_attribute *attr;
85748- struct module_attribute *temp_attr;
85749+ module_attribute_no_const *temp_attr;
85750 int error = 0;
85751 int i;
85752
85753@@ -1759,21 +1778,21 @@ static void set_section_ro_nx(void *base,
85754
85755 static void unset_module_core_ro_nx(struct module *mod)
85756 {
85757- set_page_attributes(mod->module_core + mod->core_text_size,
85758- mod->module_core + mod->core_size,
85759+ set_page_attributes(mod->module_core_rw,
85760+ mod->module_core_rw + mod->core_size_rw,
85761 set_memory_x);
85762- set_page_attributes(mod->module_core,
85763- mod->module_core + mod->core_ro_size,
85764+ set_page_attributes(mod->module_core_rx,
85765+ mod->module_core_rx + mod->core_size_rx,
85766 set_memory_rw);
85767 }
85768
85769 static void unset_module_init_ro_nx(struct module *mod)
85770 {
85771- set_page_attributes(mod->module_init + mod->init_text_size,
85772- mod->module_init + mod->init_size,
85773+ set_page_attributes(mod->module_init_rw,
85774+ mod->module_init_rw + mod->init_size_rw,
85775 set_memory_x);
85776- set_page_attributes(mod->module_init,
85777- mod->module_init + mod->init_ro_size,
85778+ set_page_attributes(mod->module_init_rx,
85779+ mod->module_init_rx + mod->init_size_rx,
85780 set_memory_rw);
85781 }
85782
85783@@ -1786,14 +1805,14 @@ void set_all_modules_text_rw(void)
85784 list_for_each_entry_rcu(mod, &modules, list) {
85785 if (mod->state == MODULE_STATE_UNFORMED)
85786 continue;
85787- if ((mod->module_core) && (mod->core_text_size)) {
85788- set_page_attributes(mod->module_core,
85789- mod->module_core + mod->core_text_size,
85790+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
85791+ set_page_attributes(mod->module_core_rx,
85792+ mod->module_core_rx + mod->core_size_rx,
85793 set_memory_rw);
85794 }
85795- if ((mod->module_init) && (mod->init_text_size)) {
85796- set_page_attributes(mod->module_init,
85797- mod->module_init + mod->init_text_size,
85798+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
85799+ set_page_attributes(mod->module_init_rx,
85800+ mod->module_init_rx + mod->init_size_rx,
85801 set_memory_rw);
85802 }
85803 }
85804@@ -1809,14 +1828,14 @@ void set_all_modules_text_ro(void)
85805 list_for_each_entry_rcu(mod, &modules, list) {
85806 if (mod->state == MODULE_STATE_UNFORMED)
85807 continue;
85808- if ((mod->module_core) && (mod->core_text_size)) {
85809- set_page_attributes(mod->module_core,
85810- mod->module_core + mod->core_text_size,
85811+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
85812+ set_page_attributes(mod->module_core_rx,
85813+ mod->module_core_rx + mod->core_size_rx,
85814 set_memory_ro);
85815 }
85816- if ((mod->module_init) && (mod->init_text_size)) {
85817- set_page_attributes(mod->module_init,
85818- mod->module_init + mod->init_text_size,
85819+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
85820+ set_page_attributes(mod->module_init_rx,
85821+ mod->module_init_rx + mod->init_size_rx,
85822 set_memory_ro);
85823 }
85824 }
85825@@ -1867,16 +1886,19 @@ static void free_module(struct module *mod)
85826
85827 /* This may be NULL, but that's OK */
85828 unset_module_init_ro_nx(mod);
85829- module_free(mod, mod->module_init);
85830+ module_free(mod, mod->module_init_rw);
85831+ module_free_exec(mod, mod->module_init_rx);
85832 kfree(mod->args);
85833 percpu_modfree(mod);
85834
85835 /* Free lock-classes: */
85836- lockdep_free_key_range(mod->module_core, mod->core_size);
85837+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
85838+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
85839
85840 /* Finally, free the core (containing the module structure) */
85841 unset_module_core_ro_nx(mod);
85842- module_free(mod, mod->module_core);
85843+ module_free_exec(mod, mod->module_core_rx);
85844+ module_free(mod, mod->module_core_rw);
85845
85846 #ifdef CONFIG_MPU
85847 update_protections(current->mm);
85848@@ -1945,9 +1967,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
85849 int ret = 0;
85850 const struct kernel_symbol *ksym;
85851
85852+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85853+ int is_fs_load = 0;
85854+ int register_filesystem_found = 0;
85855+ char *p;
85856+
85857+ p = strstr(mod->args, "grsec_modharden_fs");
85858+ if (p) {
85859+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
85860+ /* copy \0 as well */
85861+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
85862+ is_fs_load = 1;
85863+ }
85864+#endif
85865+
85866 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
85867 const char *name = info->strtab + sym[i].st_name;
85868
85869+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85870+ /* it's a real shame this will never get ripped and copied
85871+ upstream! ;(
85872+ */
85873+ if (is_fs_load && !strcmp(name, "register_filesystem"))
85874+ register_filesystem_found = 1;
85875+#endif
85876+
85877 switch (sym[i].st_shndx) {
85878 case SHN_COMMON:
85879 /* We compiled with -fno-common. These are not
85880@@ -1968,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
85881 ksym = resolve_symbol_wait(mod, info, name);
85882 /* Ok if resolved. */
85883 if (ksym && !IS_ERR(ksym)) {
85884+ pax_open_kernel();
85885 sym[i].st_value = ksym->value;
85886+ pax_close_kernel();
85887 break;
85888 }
85889
85890@@ -1987,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
85891 secbase = (unsigned long)mod_percpu(mod);
85892 else
85893 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
85894+ pax_open_kernel();
85895 sym[i].st_value += secbase;
85896+ pax_close_kernel();
85897 break;
85898 }
85899 }
85900
85901+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85902+ if (is_fs_load && !register_filesystem_found) {
85903+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
85904+ ret = -EPERM;
85905+ }
85906+#endif
85907+
85908 return ret;
85909 }
85910
85911@@ -2075,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
85912 || s->sh_entsize != ~0UL
85913 || strstarts(sname, ".init"))
85914 continue;
85915- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
85916+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
85917+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
85918+ else
85919+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
85920 pr_debug("\t%s\n", sname);
85921 }
85922- switch (m) {
85923- case 0: /* executable */
85924- mod->core_size = debug_align(mod->core_size);
85925- mod->core_text_size = mod->core_size;
85926- break;
85927- case 1: /* RO: text and ro-data */
85928- mod->core_size = debug_align(mod->core_size);
85929- mod->core_ro_size = mod->core_size;
85930- break;
85931- case 3: /* whole core */
85932- mod->core_size = debug_align(mod->core_size);
85933- break;
85934- }
85935 }
85936
85937 pr_debug("Init section allocation order:\n");
85938@@ -2104,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
85939 || s->sh_entsize != ~0UL
85940 || !strstarts(sname, ".init"))
85941 continue;
85942- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
85943- | INIT_OFFSET_MASK);
85944+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
85945+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
85946+ else
85947+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
85948+ s->sh_entsize |= INIT_OFFSET_MASK;
85949 pr_debug("\t%s\n", sname);
85950 }
85951- switch (m) {
85952- case 0: /* executable */
85953- mod->init_size = debug_align(mod->init_size);
85954- mod->init_text_size = mod->init_size;
85955- break;
85956- case 1: /* RO: text and ro-data */
85957- mod->init_size = debug_align(mod->init_size);
85958- mod->init_ro_size = mod->init_size;
85959- break;
85960- case 3: /* whole init */
85961- mod->init_size = debug_align(mod->init_size);
85962- break;
85963- }
85964 }
85965 }
85966
85967@@ -2293,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
85968
85969 /* Put symbol section at end of init part of module. */
85970 symsect->sh_flags |= SHF_ALLOC;
85971- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
85972+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
85973 info->index.sym) | INIT_OFFSET_MASK;
85974 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
85975
85976@@ -2310,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
85977 }
85978
85979 /* Append room for core symbols at end of core part. */
85980- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
85981- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
85982- mod->core_size += strtab_size;
85983+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
85984+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
85985+ mod->core_size_rx += strtab_size;
85986
85987 /* Put string table section at end of init part of module. */
85988 strsect->sh_flags |= SHF_ALLOC;
85989- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
85990+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
85991 info->index.str) | INIT_OFFSET_MASK;
85992 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
85993 }
85994@@ -2334,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
85995 /* Make sure we get permanent strtab: don't use info->strtab. */
85996 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
85997
85998+ pax_open_kernel();
85999+
86000 /* Set types up while we still have access to sections. */
86001 for (i = 0; i < mod->num_symtab; i++)
86002 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
86003
86004- mod->core_symtab = dst = mod->module_core + info->symoffs;
86005- mod->core_strtab = s = mod->module_core + info->stroffs;
86006+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
86007+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
86008 src = mod->symtab;
86009 for (ndst = i = 0; i < mod->num_symtab; i++) {
86010 if (i == 0 ||
86011@@ -2351,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
86012 }
86013 }
86014 mod->core_num_syms = ndst;
86015+
86016+ pax_close_kernel();
86017 }
86018 #else
86019 static inline void layout_symtab(struct module *mod, struct load_info *info)
86020@@ -2384,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
86021 return vmalloc_exec(size);
86022 }
86023
86024-static void *module_alloc_update_bounds(unsigned long size)
86025+static void *module_alloc_update_bounds_rw(unsigned long size)
86026 {
86027 void *ret = module_alloc(size);
86028
86029 if (ret) {
86030 mutex_lock(&module_mutex);
86031 /* Update module bounds. */
86032- if ((unsigned long)ret < module_addr_min)
86033- module_addr_min = (unsigned long)ret;
86034- if ((unsigned long)ret + size > module_addr_max)
86035- module_addr_max = (unsigned long)ret + size;
86036+ if ((unsigned long)ret < module_addr_min_rw)
86037+ module_addr_min_rw = (unsigned long)ret;
86038+ if ((unsigned long)ret + size > module_addr_max_rw)
86039+ module_addr_max_rw = (unsigned long)ret + size;
86040+ mutex_unlock(&module_mutex);
86041+ }
86042+ return ret;
86043+}
86044+
86045+static void *module_alloc_update_bounds_rx(unsigned long size)
86046+{
86047+ void *ret = module_alloc_exec(size);
86048+
86049+ if (ret) {
86050+ mutex_lock(&module_mutex);
86051+ /* Update module bounds. */
86052+ if ((unsigned long)ret < module_addr_min_rx)
86053+ module_addr_min_rx = (unsigned long)ret;
86054+ if ((unsigned long)ret + size > module_addr_max_rx)
86055+ module_addr_max_rx = (unsigned long)ret + size;
86056 mutex_unlock(&module_mutex);
86057 }
86058 return ret;
86059@@ -2651,7 +2706,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
86060 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
86061
86062 if (info->index.sym == 0) {
86063+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86064+ /*
86065+ * avoid potentially printing jibberish on attempted load
86066+ * of a module randomized with a different seed
86067+ */
86068+ pr_warn("module has no symbols (stripped?)\n");
86069+#else
86070 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
86071+#endif
86072 return ERR_PTR(-ENOEXEC);
86073 }
86074
86075@@ -2667,8 +2730,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
86076 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
86077 {
86078 const char *modmagic = get_modinfo(info, "vermagic");
86079+ const char *license = get_modinfo(info, "license");
86080 int err;
86081
86082+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
86083+ if (!license || !license_is_gpl_compatible(license))
86084+ return -ENOEXEC;
86085+#endif
86086+
86087 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
86088 modmagic = NULL;
86089
86090@@ -2693,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
86091 }
86092
86093 /* Set up license info based on the info section */
86094- set_license(mod, get_modinfo(info, "license"));
86095+ set_license(mod, license);
86096
86097 return 0;
86098 }
86099@@ -2787,7 +2856,7 @@ static int move_module(struct module *mod, struct load_info *info)
86100 void *ptr;
86101
86102 /* Do the allocs. */
86103- ptr = module_alloc_update_bounds(mod->core_size);
86104+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
86105 /*
86106 * The pointer to this block is stored in the module structure
86107 * which is inside the block. Just mark it as not being a
86108@@ -2797,11 +2866,11 @@ static int move_module(struct module *mod, struct load_info *info)
86109 if (!ptr)
86110 return -ENOMEM;
86111
86112- memset(ptr, 0, mod->core_size);
86113- mod->module_core = ptr;
86114+ memset(ptr, 0, mod->core_size_rw);
86115+ mod->module_core_rw = ptr;
86116
86117- if (mod->init_size) {
86118- ptr = module_alloc_update_bounds(mod->init_size);
86119+ if (mod->init_size_rw) {
86120+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
86121 /*
86122 * The pointer to this block is stored in the module structure
86123 * which is inside the block. This block doesn't need to be
86124@@ -2810,13 +2879,45 @@ static int move_module(struct module *mod, struct load_info *info)
86125 */
86126 kmemleak_ignore(ptr);
86127 if (!ptr) {
86128- module_free(mod, mod->module_core);
86129+ module_free(mod, mod->module_core_rw);
86130 return -ENOMEM;
86131 }
86132- memset(ptr, 0, mod->init_size);
86133- mod->module_init = ptr;
86134+ memset(ptr, 0, mod->init_size_rw);
86135+ mod->module_init_rw = ptr;
86136 } else
86137- mod->module_init = NULL;
86138+ mod->module_init_rw = NULL;
86139+
86140+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
86141+ kmemleak_not_leak(ptr);
86142+ if (!ptr) {
86143+ if (mod->module_init_rw)
86144+ module_free(mod, mod->module_init_rw);
86145+ module_free(mod, mod->module_core_rw);
86146+ return -ENOMEM;
86147+ }
86148+
86149+ pax_open_kernel();
86150+ memset(ptr, 0, mod->core_size_rx);
86151+ pax_close_kernel();
86152+ mod->module_core_rx = ptr;
86153+
86154+ if (mod->init_size_rx) {
86155+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
86156+ kmemleak_ignore(ptr);
86157+ if (!ptr && mod->init_size_rx) {
86158+ module_free_exec(mod, mod->module_core_rx);
86159+ if (mod->module_init_rw)
86160+ module_free(mod, mod->module_init_rw);
86161+ module_free(mod, mod->module_core_rw);
86162+ return -ENOMEM;
86163+ }
86164+
86165+ pax_open_kernel();
86166+ memset(ptr, 0, mod->init_size_rx);
86167+ pax_close_kernel();
86168+ mod->module_init_rx = ptr;
86169+ } else
86170+ mod->module_init_rx = NULL;
86171
86172 /* Transfer each section which specifies SHF_ALLOC */
86173 pr_debug("final section addresses:\n");
86174@@ -2827,16 +2928,45 @@ static int move_module(struct module *mod, struct load_info *info)
86175 if (!(shdr->sh_flags & SHF_ALLOC))
86176 continue;
86177
86178- if (shdr->sh_entsize & INIT_OFFSET_MASK)
86179- dest = mod->module_init
86180- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86181- else
86182- dest = mod->module_core + shdr->sh_entsize;
86183+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
86184+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
86185+ dest = mod->module_init_rw
86186+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86187+ else
86188+ dest = mod->module_init_rx
86189+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86190+ } else {
86191+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
86192+ dest = mod->module_core_rw + shdr->sh_entsize;
86193+ else
86194+ dest = mod->module_core_rx + shdr->sh_entsize;
86195+ }
86196+
86197+ if (shdr->sh_type != SHT_NOBITS) {
86198+
86199+#ifdef CONFIG_PAX_KERNEXEC
86200+#ifdef CONFIG_X86_64
86201+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
86202+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
86203+#endif
86204+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
86205+ pax_open_kernel();
86206+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
86207+ pax_close_kernel();
86208+ } else
86209+#endif
86210
86211- if (shdr->sh_type != SHT_NOBITS)
86212 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
86213+ }
86214 /* Update sh_addr to point to copy in image. */
86215- shdr->sh_addr = (unsigned long)dest;
86216+
86217+#ifdef CONFIG_PAX_KERNEXEC
86218+ if (shdr->sh_flags & SHF_EXECINSTR)
86219+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
86220+ else
86221+#endif
86222+
86223+ shdr->sh_addr = (unsigned long)dest;
86224 pr_debug("\t0x%lx %s\n",
86225 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
86226 }
86227@@ -2893,12 +3023,12 @@ static void flush_module_icache(const struct module *mod)
86228 * Do it before processing of module parameters, so the module
86229 * can provide parameter accessor functions of its own.
86230 */
86231- if (mod->module_init)
86232- flush_icache_range((unsigned long)mod->module_init,
86233- (unsigned long)mod->module_init
86234- + mod->init_size);
86235- flush_icache_range((unsigned long)mod->module_core,
86236- (unsigned long)mod->module_core + mod->core_size);
86237+ if (mod->module_init_rx)
86238+ flush_icache_range((unsigned long)mod->module_init_rx,
86239+ (unsigned long)mod->module_init_rx
86240+ + mod->init_size_rx);
86241+ flush_icache_range((unsigned long)mod->module_core_rx,
86242+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
86243
86244 set_fs(old_fs);
86245 }
86246@@ -2955,8 +3085,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
86247 static void module_deallocate(struct module *mod, struct load_info *info)
86248 {
86249 percpu_modfree(mod);
86250- module_free(mod, mod->module_init);
86251- module_free(mod, mod->module_core);
86252+ module_free_exec(mod, mod->module_init_rx);
86253+ module_free_exec(mod, mod->module_core_rx);
86254+ module_free(mod, mod->module_init_rw);
86255+ module_free(mod, mod->module_core_rw);
86256 }
86257
86258 int __weak module_finalize(const Elf_Ehdr *hdr,
86259@@ -2969,7 +3101,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
86260 static int post_relocation(struct module *mod, const struct load_info *info)
86261 {
86262 /* Sort exception table now relocations are done. */
86263+ pax_open_kernel();
86264 sort_extable(mod->extable, mod->extable + mod->num_exentries);
86265+ pax_close_kernel();
86266
86267 /* Copy relocated percpu area over. */
86268 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
86269@@ -3023,16 +3157,16 @@ static int do_init_module(struct module *mod)
86270 MODULE_STATE_COMING, mod);
86271
86272 /* Set RO and NX regions for core */
86273- set_section_ro_nx(mod->module_core,
86274- mod->core_text_size,
86275- mod->core_ro_size,
86276- mod->core_size);
86277+ set_section_ro_nx(mod->module_core_rx,
86278+ mod->core_size_rx,
86279+ mod->core_size_rx,
86280+ mod->core_size_rx);
86281
86282 /* Set RO and NX regions for init */
86283- set_section_ro_nx(mod->module_init,
86284- mod->init_text_size,
86285- mod->init_ro_size,
86286- mod->init_size);
86287+ set_section_ro_nx(mod->module_init_rx,
86288+ mod->init_size_rx,
86289+ mod->init_size_rx,
86290+ mod->init_size_rx);
86291
86292 do_mod_ctors(mod);
86293 /* Start the module */
86294@@ -3093,11 +3227,12 @@ static int do_init_module(struct module *mod)
86295 mod->strtab = mod->core_strtab;
86296 #endif
86297 unset_module_init_ro_nx(mod);
86298- module_free(mod, mod->module_init);
86299- mod->module_init = NULL;
86300- mod->init_size = 0;
86301- mod->init_ro_size = 0;
86302- mod->init_text_size = 0;
86303+ module_free(mod, mod->module_init_rw);
86304+ module_free_exec(mod, mod->module_init_rx);
86305+ mod->module_init_rw = NULL;
86306+ mod->module_init_rx = NULL;
86307+ mod->init_size_rw = 0;
86308+ mod->init_size_rx = 0;
86309 mutex_unlock(&module_mutex);
86310 wake_up_all(&module_wq);
86311
86312@@ -3240,9 +3375,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
86313 if (err)
86314 goto free_unload;
86315
86316+ /* Now copy in args */
86317+ mod->args = strndup_user(uargs, ~0UL >> 1);
86318+ if (IS_ERR(mod->args)) {
86319+ err = PTR_ERR(mod->args);
86320+ goto free_unload;
86321+ }
86322+
86323 /* Set up MODINFO_ATTR fields */
86324 setup_modinfo(mod, info);
86325
86326+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86327+ {
86328+ char *p, *p2;
86329+
86330+ if (strstr(mod->args, "grsec_modharden_netdev")) {
86331+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
86332+ err = -EPERM;
86333+ goto free_modinfo;
86334+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
86335+ p += sizeof("grsec_modharden_normal") - 1;
86336+ p2 = strstr(p, "_");
86337+ if (p2) {
86338+ *p2 = '\0';
86339+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
86340+ *p2 = '_';
86341+ }
86342+ err = -EPERM;
86343+ goto free_modinfo;
86344+ }
86345+ }
86346+#endif
86347+
86348 /* Fix up syms, so that st_value is a pointer to location. */
86349 err = simplify_symbols(mod, info);
86350 if (err < 0)
86351@@ -3258,13 +3422,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
86352
86353 flush_module_icache(mod);
86354
86355- /* Now copy in args */
86356- mod->args = strndup_user(uargs, ~0UL >> 1);
86357- if (IS_ERR(mod->args)) {
86358- err = PTR_ERR(mod->args);
86359- goto free_arch_cleanup;
86360- }
86361-
86362 dynamic_debug_setup(info->debug, info->num_debug);
86363
86364 /* Finally it's fully formed, ready to start executing. */
86365@@ -3299,11 +3456,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
86366 ddebug_cleanup:
86367 dynamic_debug_remove(info->debug);
86368 synchronize_sched();
86369- kfree(mod->args);
86370- free_arch_cleanup:
86371 module_arch_cleanup(mod);
86372 free_modinfo:
86373 free_modinfo(mod);
86374+ kfree(mod->args);
86375 free_unload:
86376 module_unload_free(mod);
86377 unlink_mod:
86378@@ -3386,10 +3542,16 @@ static const char *get_ksymbol(struct module *mod,
86379 unsigned long nextval;
86380
86381 /* At worse, next value is at end of module */
86382- if (within_module_init(addr, mod))
86383- nextval = (unsigned long)mod->module_init+mod->init_text_size;
86384+ if (within_module_init_rx(addr, mod))
86385+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
86386+ else if (within_module_init_rw(addr, mod))
86387+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
86388+ else if (within_module_core_rx(addr, mod))
86389+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
86390+ else if (within_module_core_rw(addr, mod))
86391+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
86392 else
86393- nextval = (unsigned long)mod->module_core+mod->core_text_size;
86394+ return NULL;
86395
86396 /* Scan for closest preceding symbol, and next symbol. (ELF
86397 starts real symbols at 1). */
86398@@ -3640,7 +3802,7 @@ static int m_show(struct seq_file *m, void *p)
86399 return 0;
86400
86401 seq_printf(m, "%s %u",
86402- mod->name, mod->init_size + mod->core_size);
86403+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
86404 print_unload_info(m, mod);
86405
86406 /* Informative for users. */
86407@@ -3649,7 +3811,7 @@ static int m_show(struct seq_file *m, void *p)
86408 mod->state == MODULE_STATE_COMING ? "Loading":
86409 "Live");
86410 /* Used by oprofile and other similar tools. */
86411- seq_printf(m, " 0x%pK", mod->module_core);
86412+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
86413
86414 /* Taints info */
86415 if (mod->taints)
86416@@ -3685,7 +3847,17 @@ static const struct file_operations proc_modules_operations = {
86417
86418 static int __init proc_modules_init(void)
86419 {
86420+#ifndef CONFIG_GRKERNSEC_HIDESYM
86421+#ifdef CONFIG_GRKERNSEC_PROC_USER
86422+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
86423+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86424+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
86425+#else
86426 proc_create("modules", 0, NULL, &proc_modules_operations);
86427+#endif
86428+#else
86429+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
86430+#endif
86431 return 0;
86432 }
86433 module_init(proc_modules_init);
86434@@ -3746,14 +3918,14 @@ struct module *__module_address(unsigned long addr)
86435 {
86436 struct module *mod;
86437
86438- if (addr < module_addr_min || addr > module_addr_max)
86439+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
86440+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
86441 return NULL;
86442
86443 list_for_each_entry_rcu(mod, &modules, list) {
86444 if (mod->state == MODULE_STATE_UNFORMED)
86445 continue;
86446- if (within_module_core(addr, mod)
86447- || within_module_init(addr, mod))
86448+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
86449 return mod;
86450 }
86451 return NULL;
86452@@ -3788,11 +3960,20 @@ bool is_module_text_address(unsigned long addr)
86453 */
86454 struct module *__module_text_address(unsigned long addr)
86455 {
86456- struct module *mod = __module_address(addr);
86457+ struct module *mod;
86458+
86459+#ifdef CONFIG_X86_32
86460+ addr = ktla_ktva(addr);
86461+#endif
86462+
86463+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
86464+ return NULL;
86465+
86466+ mod = __module_address(addr);
86467+
86468 if (mod) {
86469 /* Make sure it's within the text section. */
86470- if (!within(addr, mod->module_init, mod->init_text_size)
86471- && !within(addr, mod->module_core, mod->core_text_size))
86472+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
86473 mod = NULL;
86474 }
86475 return mod;
86476diff --git a/kernel/notifier.c b/kernel/notifier.c
86477index 2d5cc4c..d9ea600 100644
86478--- a/kernel/notifier.c
86479+++ b/kernel/notifier.c
86480@@ -5,6 +5,7 @@
86481 #include <linux/rcupdate.h>
86482 #include <linux/vmalloc.h>
86483 #include <linux/reboot.h>
86484+#include <linux/mm.h>
86485
86486 /*
86487 * Notifier list for kernel code which wants to be called
86488@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
86489 while ((*nl) != NULL) {
86490 if (n->priority > (*nl)->priority)
86491 break;
86492- nl = &((*nl)->next);
86493+ nl = (struct notifier_block **)&((*nl)->next);
86494 }
86495- n->next = *nl;
86496+ pax_open_kernel();
86497+ *(const void **)&n->next = *nl;
86498 rcu_assign_pointer(*nl, n);
86499+ pax_close_kernel();
86500 return 0;
86501 }
86502
86503@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
86504 return 0;
86505 if (n->priority > (*nl)->priority)
86506 break;
86507- nl = &((*nl)->next);
86508+ nl = (struct notifier_block **)&((*nl)->next);
86509 }
86510- n->next = *nl;
86511+ pax_open_kernel();
86512+ *(const void **)&n->next = *nl;
86513 rcu_assign_pointer(*nl, n);
86514+ pax_close_kernel();
86515 return 0;
86516 }
86517
86518@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
86519 {
86520 while ((*nl) != NULL) {
86521 if ((*nl) == n) {
86522+ pax_open_kernel();
86523 rcu_assign_pointer(*nl, n->next);
86524+ pax_close_kernel();
86525 return 0;
86526 }
86527- nl = &((*nl)->next);
86528+ nl = (struct notifier_block **)&((*nl)->next);
86529 }
86530 return -ENOENT;
86531 }
86532diff --git a/kernel/padata.c b/kernel/padata.c
86533index 2abd25d..02c4faa 100644
86534--- a/kernel/padata.c
86535+++ b/kernel/padata.c
86536@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
86537 * seq_nr mod. number of cpus in use.
86538 */
86539
86540- seq_nr = atomic_inc_return(&pd->seq_nr);
86541+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
86542 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
86543
86544 return padata_index_to_cpu(pd, cpu_index);
86545@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
86546 padata_init_pqueues(pd);
86547 padata_init_squeues(pd);
86548 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
86549- atomic_set(&pd->seq_nr, -1);
86550+ atomic_set_unchecked(&pd->seq_nr, -1);
86551 atomic_set(&pd->reorder_objects, 0);
86552 atomic_set(&pd->refcnt, 0);
86553 pd->pinst = pinst;
86554diff --git a/kernel/panic.c b/kernel/panic.c
86555index c00b4ce..a846117 100644
86556--- a/kernel/panic.c
86557+++ b/kernel/panic.c
86558@@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
86559 disable_trace_on_warning();
86560
86561 pr_warn("------------[ cut here ]------------\n");
86562- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
86563+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
86564 raw_smp_processor_id(), current->pid, file, line, caller);
86565
86566 if (args)
86567@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
86568 */
86569 void __stack_chk_fail(void)
86570 {
86571- panic("stack-protector: Kernel stack is corrupted in: %p\n",
86572+ dump_stack();
86573+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
86574 __builtin_return_address(0));
86575 }
86576 EXPORT_SYMBOL(__stack_chk_fail);
86577diff --git a/kernel/pid.c b/kernel/pid.c
86578index 9b9a266..c20ef80 100644
86579--- a/kernel/pid.c
86580+++ b/kernel/pid.c
86581@@ -33,6 +33,7 @@
86582 #include <linux/rculist.h>
86583 #include <linux/bootmem.h>
86584 #include <linux/hash.h>
86585+#include <linux/security.h>
86586 #include <linux/pid_namespace.h>
86587 #include <linux/init_task.h>
86588 #include <linux/syscalls.h>
86589@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
86590
86591 int pid_max = PID_MAX_DEFAULT;
86592
86593-#define RESERVED_PIDS 300
86594+#define RESERVED_PIDS 500
86595
86596 int pid_max_min = RESERVED_PIDS + 1;
86597 int pid_max_max = PID_MAX_LIMIT;
86598@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
86599 */
86600 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
86601 {
86602+ struct task_struct *task;
86603+
86604 rcu_lockdep_assert(rcu_read_lock_held(),
86605 "find_task_by_pid_ns() needs rcu_read_lock()"
86606 " protection");
86607- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
86608+
86609+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
86610+
86611+ if (gr_pid_is_chrooted(task))
86612+ return NULL;
86613+
86614+ return task;
86615 }
86616
86617 struct task_struct *find_task_by_vpid(pid_t vnr)
86618@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
86619 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
86620 }
86621
86622+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
86623+{
86624+ rcu_lockdep_assert(rcu_read_lock_held(),
86625+ "find_task_by_pid_ns() needs rcu_read_lock()"
86626+ " protection");
86627+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
86628+}
86629+
86630 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
86631 {
86632 struct pid *pid;
86633diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
86634index 06c62de..b08cc6c 100644
86635--- a/kernel/pid_namespace.c
86636+++ b/kernel/pid_namespace.c
86637@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
86638 void __user *buffer, size_t *lenp, loff_t *ppos)
86639 {
86640 struct pid_namespace *pid_ns = task_active_pid_ns(current);
86641- struct ctl_table tmp = *table;
86642+ ctl_table_no_const tmp = *table;
86643
86644 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
86645 return -EPERM;
86646diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
86647index c7f31aa..2b44977 100644
86648--- a/kernel/posix-cpu-timers.c
86649+++ b/kernel/posix-cpu-timers.c
86650@@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
86651
86652 static __init int init_posix_cpu_timers(void)
86653 {
86654- struct k_clock process = {
86655+ static struct k_clock process = {
86656 .clock_getres = process_cpu_clock_getres,
86657 .clock_get = process_cpu_clock_get,
86658 .timer_create = process_cpu_timer_create,
86659 .nsleep = process_cpu_nsleep,
86660 .nsleep_restart = process_cpu_nsleep_restart,
86661 };
86662- struct k_clock thread = {
86663+ static struct k_clock thread = {
86664 .clock_getres = thread_cpu_clock_getres,
86665 .clock_get = thread_cpu_clock_get,
86666 .timer_create = thread_cpu_timer_create,
86667diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
86668index 424c2d4..679242f 100644
86669--- a/kernel/posix-timers.c
86670+++ b/kernel/posix-timers.c
86671@@ -43,6 +43,7 @@
86672 #include <linux/hash.h>
86673 #include <linux/posix-clock.h>
86674 #include <linux/posix-timers.h>
86675+#include <linux/grsecurity.h>
86676 #include <linux/syscalls.h>
86677 #include <linux/wait.h>
86678 #include <linux/workqueue.h>
86679@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
86680 * which we beg off on and pass to do_sys_settimeofday().
86681 */
86682
86683-static struct k_clock posix_clocks[MAX_CLOCKS];
86684+static struct k_clock *posix_clocks[MAX_CLOCKS];
86685
86686 /*
86687 * These ones are defined below.
86688@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
86689 */
86690 static __init int init_posix_timers(void)
86691 {
86692- struct k_clock clock_realtime = {
86693+ static struct k_clock clock_realtime = {
86694 .clock_getres = hrtimer_get_res,
86695 .clock_get = posix_clock_realtime_get,
86696 .clock_set = posix_clock_realtime_set,
86697@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
86698 .timer_get = common_timer_get,
86699 .timer_del = common_timer_del,
86700 };
86701- struct k_clock clock_monotonic = {
86702+ static struct k_clock clock_monotonic = {
86703 .clock_getres = hrtimer_get_res,
86704 .clock_get = posix_ktime_get_ts,
86705 .nsleep = common_nsleep,
86706@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
86707 .timer_get = common_timer_get,
86708 .timer_del = common_timer_del,
86709 };
86710- struct k_clock clock_monotonic_raw = {
86711+ static struct k_clock clock_monotonic_raw = {
86712 .clock_getres = hrtimer_get_res,
86713 .clock_get = posix_get_monotonic_raw,
86714 };
86715- struct k_clock clock_realtime_coarse = {
86716+ static struct k_clock clock_realtime_coarse = {
86717 .clock_getres = posix_get_coarse_res,
86718 .clock_get = posix_get_realtime_coarse,
86719 };
86720- struct k_clock clock_monotonic_coarse = {
86721+ static struct k_clock clock_monotonic_coarse = {
86722 .clock_getres = posix_get_coarse_res,
86723 .clock_get = posix_get_monotonic_coarse,
86724 };
86725- struct k_clock clock_tai = {
86726+ static struct k_clock clock_tai = {
86727 .clock_getres = hrtimer_get_res,
86728 .clock_get = posix_get_tai,
86729 .nsleep = common_nsleep,
86730@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
86731 .timer_get = common_timer_get,
86732 .timer_del = common_timer_del,
86733 };
86734- struct k_clock clock_boottime = {
86735+ static struct k_clock clock_boottime = {
86736 .clock_getres = hrtimer_get_res,
86737 .clock_get = posix_get_boottime,
86738 .nsleep = common_nsleep,
86739@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
86740 return;
86741 }
86742
86743- posix_clocks[clock_id] = *new_clock;
86744+ posix_clocks[clock_id] = new_clock;
86745 }
86746 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
86747
86748@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
86749 return (id & CLOCKFD_MASK) == CLOCKFD ?
86750 &clock_posix_dynamic : &clock_posix_cpu;
86751
86752- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
86753+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
86754 return NULL;
86755- return &posix_clocks[id];
86756+ return posix_clocks[id];
86757 }
86758
86759 static int common_timer_create(struct k_itimer *new_timer)
86760@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
86761 struct k_clock *kc = clockid_to_kclock(which_clock);
86762 struct k_itimer *new_timer;
86763 int error, new_timer_id;
86764- sigevent_t event;
86765+ sigevent_t event = { };
86766 int it_id_set = IT_ID_NOT_SET;
86767
86768 if (!kc)
86769@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
86770 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
86771 return -EFAULT;
86772
86773+ /* only the CLOCK_REALTIME clock can be set, all other clocks
86774+ have their clock_set fptr set to a nosettime dummy function
86775+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
86776+ call common_clock_set, which calls do_sys_settimeofday, which
86777+ we hook
86778+ */
86779+
86780 return kc->clock_set(which_clock, &new_tp);
86781 }
86782
86783diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
86784index 2fac9cc..56fef29 100644
86785--- a/kernel/power/Kconfig
86786+++ b/kernel/power/Kconfig
86787@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
86788 config HIBERNATION
86789 bool "Hibernation (aka 'suspend to disk')"
86790 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
86791+ depends on !GRKERNSEC_KMEM
86792+ depends on !PAX_MEMORY_SANITIZE
86793 select HIBERNATE_CALLBACKS
86794 select LZO_COMPRESS
86795 select LZO_DECOMPRESS
86796diff --git a/kernel/power/process.c b/kernel/power/process.c
86797index 06ec886..9dba35e 100644
86798--- a/kernel/power/process.c
86799+++ b/kernel/power/process.c
86800@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
86801 unsigned int elapsed_msecs;
86802 bool wakeup = false;
86803 int sleep_usecs = USEC_PER_MSEC;
86804+ bool timedout = false;
86805
86806 do_gettimeofday(&start);
86807
86808@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
86809
86810 while (true) {
86811 todo = 0;
86812+ if (time_after(jiffies, end_time))
86813+ timedout = true;
86814 read_lock(&tasklist_lock);
86815 do_each_thread(g, p) {
86816 if (p == current || !freeze_task(p))
86817 continue;
86818
86819- if (!freezer_should_skip(p))
86820+ if (!freezer_should_skip(p)) {
86821 todo++;
86822+ if (timedout) {
86823+ printk(KERN_ERR "Task refusing to freeze:\n");
86824+ sched_show_task(p);
86825+ }
86826+ }
86827 } while_each_thread(g, p);
86828 read_unlock(&tasklist_lock);
86829
86830@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
86831 todo += wq_busy;
86832 }
86833
86834- if (!todo || time_after(jiffies, end_time))
86835+ if (!todo || timedout)
86836 break;
86837
86838 if (pm_wakeup_pending()) {
86839diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
86840index be7c86b..c741464 100644
86841--- a/kernel/printk/printk.c
86842+++ b/kernel/printk/printk.c
86843@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
86844 if (from_file && type != SYSLOG_ACTION_OPEN)
86845 return 0;
86846
86847+#ifdef CONFIG_GRKERNSEC_DMESG
86848+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
86849+ return -EPERM;
86850+#endif
86851+
86852 if (syslog_action_restricted(type)) {
86853 if (capable(CAP_SYSLOG))
86854 return 0;
86855diff --git a/kernel/profile.c b/kernel/profile.c
86856index 6631e1e..310c266 100644
86857--- a/kernel/profile.c
86858+++ b/kernel/profile.c
86859@@ -37,7 +37,7 @@ struct profile_hit {
86860 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
86861 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
86862
86863-static atomic_t *prof_buffer;
86864+static atomic_unchecked_t *prof_buffer;
86865 static unsigned long prof_len, prof_shift;
86866
86867 int prof_on __read_mostly;
86868@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
86869 hits[i].pc = 0;
86870 continue;
86871 }
86872- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
86873+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
86874 hits[i].hits = hits[i].pc = 0;
86875 }
86876 }
86877@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
86878 * Add the current hit(s) and flush the write-queue out
86879 * to the global buffer:
86880 */
86881- atomic_add(nr_hits, &prof_buffer[pc]);
86882+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
86883 for (i = 0; i < NR_PROFILE_HIT; ++i) {
86884- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
86885+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
86886 hits[i].pc = hits[i].hits = 0;
86887 }
86888 out:
86889@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
86890 {
86891 unsigned long pc;
86892 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
86893- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
86894+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
86895 }
86896 #endif /* !CONFIG_SMP */
86897
86898@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
86899 return -EFAULT;
86900 buf++; p++; count--; read++;
86901 }
86902- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
86903+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
86904 if (copy_to_user(buf, (void *)pnt, count))
86905 return -EFAULT;
86906 read += count;
86907@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
86908 }
86909 #endif
86910 profile_discard_flip_buffers();
86911- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
86912+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
86913 return count;
86914 }
86915
86916diff --git a/kernel/ptrace.c b/kernel/ptrace.c
86917index 1f4bcb3..99cf7ab 100644
86918--- a/kernel/ptrace.c
86919+++ b/kernel/ptrace.c
86920@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
86921 if (seize)
86922 flags |= PT_SEIZED;
86923 rcu_read_lock();
86924- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
86925+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
86926 flags |= PT_PTRACE_CAP;
86927 rcu_read_unlock();
86928 task->ptrace = flags;
86929@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
86930 break;
86931 return -EIO;
86932 }
86933- if (copy_to_user(dst, buf, retval))
86934+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
86935 return -EFAULT;
86936 copied += retval;
86937 src += retval;
86938@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
86939 bool seized = child->ptrace & PT_SEIZED;
86940 int ret = -EIO;
86941 siginfo_t siginfo, *si;
86942- void __user *datavp = (void __user *) data;
86943+ void __user *datavp = (__force void __user *) data;
86944 unsigned long __user *datalp = datavp;
86945 unsigned long flags;
86946
86947@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
86948 goto out;
86949 }
86950
86951+ if (gr_handle_ptrace(child, request)) {
86952+ ret = -EPERM;
86953+ goto out_put_task_struct;
86954+ }
86955+
86956 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
86957 ret = ptrace_attach(child, request, addr, data);
86958 /*
86959 * Some architectures need to do book-keeping after
86960 * a ptrace attach.
86961 */
86962- if (!ret)
86963+ if (!ret) {
86964 arch_ptrace_attach(child);
86965+ gr_audit_ptrace(child);
86966+ }
86967 goto out_put_task_struct;
86968 }
86969
86970@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
86971 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
86972 if (copied != sizeof(tmp))
86973 return -EIO;
86974- return put_user(tmp, (unsigned long __user *)data);
86975+ return put_user(tmp, (__force unsigned long __user *)data);
86976 }
86977
86978 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
86979@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
86980 }
86981
86982 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
86983- compat_long_t addr, compat_long_t data)
86984+ compat_ulong_t addr, compat_ulong_t data)
86985 {
86986 struct task_struct *child;
86987 long ret;
86988@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
86989 goto out;
86990 }
86991
86992+ if (gr_handle_ptrace(child, request)) {
86993+ ret = -EPERM;
86994+ goto out_put_task_struct;
86995+ }
86996+
86997 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
86998 ret = ptrace_attach(child, request, addr, data);
86999 /*
87000 * Some architectures need to do book-keeping after
87001 * a ptrace attach.
87002 */
87003- if (!ret)
87004+ if (!ret) {
87005 arch_ptrace_attach(child);
87006+ gr_audit_ptrace(child);
87007+ }
87008 goto out_put_task_struct;
87009 }
87010
87011diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
87012index 01d5ccb..cdcbee6 100644
87013--- a/kernel/rcu/srcu.c
87014+++ b/kernel/rcu/srcu.c
87015@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
87016
87017 idx = ACCESS_ONCE(sp->completed) & 0x1;
87018 preempt_disable();
87019- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
87020+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
87021 smp_mb(); /* B */ /* Avoid leaking the critical section. */
87022- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
87023+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
87024 preempt_enable();
87025 return idx;
87026 }
87027diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
87028index 1254f31..16258dc 100644
87029--- a/kernel/rcu/tiny.c
87030+++ b/kernel/rcu/tiny.c
87031@@ -46,7 +46,7 @@
87032 /* Forward declarations for tiny_plugin.h. */
87033 struct rcu_ctrlblk;
87034 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
87035-static void rcu_process_callbacks(struct softirq_action *unused);
87036+static void rcu_process_callbacks(void);
87037 static void __call_rcu(struct rcu_head *head,
87038 void (*func)(struct rcu_head *rcu),
87039 struct rcu_ctrlblk *rcp);
87040@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
87041 false));
87042 }
87043
87044-static void rcu_process_callbacks(struct softirq_action *unused)
87045+static __latent_entropy void rcu_process_callbacks(void)
87046 {
87047 __rcu_process_callbacks(&rcu_sched_ctrlblk);
87048 __rcu_process_callbacks(&rcu_bh_ctrlblk);
87049diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
87050index 3929cd4..421624d 100644
87051--- a/kernel/rcu/torture.c
87052+++ b/kernel/rcu/torture.c
87053@@ -176,12 +176,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
87054 { 0 };
87055 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
87056 { 0 };
87057-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
87058-static atomic_t n_rcu_torture_alloc;
87059-static atomic_t n_rcu_torture_alloc_fail;
87060-static atomic_t n_rcu_torture_free;
87061-static atomic_t n_rcu_torture_mberror;
87062-static atomic_t n_rcu_torture_error;
87063+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
87064+static atomic_unchecked_t n_rcu_torture_alloc;
87065+static atomic_unchecked_t n_rcu_torture_alloc_fail;
87066+static atomic_unchecked_t n_rcu_torture_free;
87067+static atomic_unchecked_t n_rcu_torture_mberror;
87068+static atomic_unchecked_t n_rcu_torture_error;
87069 static long n_rcu_torture_barrier_error;
87070 static long n_rcu_torture_boost_ktrerror;
87071 static long n_rcu_torture_boost_rterror;
87072@@ -299,11 +299,11 @@ rcu_torture_alloc(void)
87073
87074 spin_lock_bh(&rcu_torture_lock);
87075 if (list_empty(&rcu_torture_freelist)) {
87076- atomic_inc(&n_rcu_torture_alloc_fail);
87077+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
87078 spin_unlock_bh(&rcu_torture_lock);
87079 return NULL;
87080 }
87081- atomic_inc(&n_rcu_torture_alloc);
87082+ atomic_inc_unchecked(&n_rcu_torture_alloc);
87083 p = rcu_torture_freelist.next;
87084 list_del_init(p);
87085 spin_unlock_bh(&rcu_torture_lock);
87086@@ -316,7 +316,7 @@ rcu_torture_alloc(void)
87087 static void
87088 rcu_torture_free(struct rcu_torture *p)
87089 {
87090- atomic_inc(&n_rcu_torture_free);
87091+ atomic_inc_unchecked(&n_rcu_torture_free);
87092 spin_lock_bh(&rcu_torture_lock);
87093 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
87094 spin_unlock_bh(&rcu_torture_lock);
87095@@ -437,7 +437,7 @@ rcu_torture_cb(struct rcu_head *p)
87096 i = rp->rtort_pipe_count;
87097 if (i > RCU_TORTURE_PIPE_LEN)
87098 i = RCU_TORTURE_PIPE_LEN;
87099- atomic_inc(&rcu_torture_wcount[i]);
87100+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87101 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
87102 rp->rtort_mbtest = 0;
87103 rcu_torture_free(rp);
87104@@ -827,7 +827,7 @@ rcu_torture_writer(void *arg)
87105 i = old_rp->rtort_pipe_count;
87106 if (i > RCU_TORTURE_PIPE_LEN)
87107 i = RCU_TORTURE_PIPE_LEN;
87108- atomic_inc(&rcu_torture_wcount[i]);
87109+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87110 old_rp->rtort_pipe_count++;
87111 if (gp_normal == gp_exp)
87112 exp = !!(rcu_random(&rand) & 0x80);
87113@@ -845,7 +845,7 @@ rcu_torture_writer(void *arg)
87114 i = rp->rtort_pipe_count;
87115 if (i > RCU_TORTURE_PIPE_LEN)
87116 i = RCU_TORTURE_PIPE_LEN;
87117- atomic_inc(&rcu_torture_wcount[i]);
87118+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87119 if (++rp->rtort_pipe_count >=
87120 RCU_TORTURE_PIPE_LEN) {
87121 rp->rtort_mbtest = 0;
87122@@ -944,7 +944,7 @@ static void rcu_torture_timer(unsigned long unused)
87123 return;
87124 }
87125 if (p->rtort_mbtest == 0)
87126- atomic_inc(&n_rcu_torture_mberror);
87127+ atomic_inc_unchecked(&n_rcu_torture_mberror);
87128 spin_lock(&rand_lock);
87129 cur_ops->read_delay(&rand);
87130 n_rcu_torture_timers++;
87131@@ -1014,7 +1014,7 @@ rcu_torture_reader(void *arg)
87132 continue;
87133 }
87134 if (p->rtort_mbtest == 0)
87135- atomic_inc(&n_rcu_torture_mberror);
87136+ atomic_inc_unchecked(&n_rcu_torture_mberror);
87137 cur_ops->read_delay(&rand);
87138 preempt_disable();
87139 pipe_count = p->rtort_pipe_count;
87140@@ -1077,11 +1077,11 @@ rcu_torture_printk(char *page)
87141 rcu_torture_current,
87142 rcu_torture_current_version,
87143 list_empty(&rcu_torture_freelist),
87144- atomic_read(&n_rcu_torture_alloc),
87145- atomic_read(&n_rcu_torture_alloc_fail),
87146- atomic_read(&n_rcu_torture_free));
87147+ atomic_read_unchecked(&n_rcu_torture_alloc),
87148+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
87149+ atomic_read_unchecked(&n_rcu_torture_free));
87150 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
87151- atomic_read(&n_rcu_torture_mberror),
87152+ atomic_read_unchecked(&n_rcu_torture_mberror),
87153 n_rcu_torture_boost_ktrerror,
87154 n_rcu_torture_boost_rterror);
87155 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
87156@@ -1100,14 +1100,14 @@ rcu_torture_printk(char *page)
87157 n_barrier_attempts,
87158 n_rcu_torture_barrier_error);
87159 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
87160- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
87161+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
87162 n_rcu_torture_barrier_error != 0 ||
87163 n_rcu_torture_boost_ktrerror != 0 ||
87164 n_rcu_torture_boost_rterror != 0 ||
87165 n_rcu_torture_boost_failure != 0 ||
87166 i > 1) {
87167 cnt += sprintf(&page[cnt], "!!! ");
87168- atomic_inc(&n_rcu_torture_error);
87169+ atomic_inc_unchecked(&n_rcu_torture_error);
87170 WARN_ON_ONCE(1);
87171 }
87172 cnt += sprintf(&page[cnt], "Reader Pipe: ");
87173@@ -1121,7 +1121,7 @@ rcu_torture_printk(char *page)
87174 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
87175 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
87176 cnt += sprintf(&page[cnt], " %d",
87177- atomic_read(&rcu_torture_wcount[i]));
87178+ atomic_read_unchecked(&rcu_torture_wcount[i]));
87179 }
87180 cnt += sprintf(&page[cnt], "\n");
87181 if (cur_ops->stats)
87182@@ -1836,7 +1836,7 @@ rcu_torture_cleanup(void)
87183
87184 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
87185
87186- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
87187+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
87188 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
87189 else if (n_online_successes != n_online_attempts ||
87190 n_offline_successes != n_offline_attempts)
87191@@ -1958,18 +1958,18 @@ rcu_torture_init(void)
87192
87193 rcu_torture_current = NULL;
87194 rcu_torture_current_version = 0;
87195- atomic_set(&n_rcu_torture_alloc, 0);
87196- atomic_set(&n_rcu_torture_alloc_fail, 0);
87197- atomic_set(&n_rcu_torture_free, 0);
87198- atomic_set(&n_rcu_torture_mberror, 0);
87199- atomic_set(&n_rcu_torture_error, 0);
87200+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
87201+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
87202+ atomic_set_unchecked(&n_rcu_torture_free, 0);
87203+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
87204+ atomic_set_unchecked(&n_rcu_torture_error, 0);
87205 n_rcu_torture_barrier_error = 0;
87206 n_rcu_torture_boost_ktrerror = 0;
87207 n_rcu_torture_boost_rterror = 0;
87208 n_rcu_torture_boost_failure = 0;
87209 n_rcu_torture_boosts = 0;
87210 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
87211- atomic_set(&rcu_torture_wcount[i], 0);
87212+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
87213 for_each_possible_cpu(cpu) {
87214 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
87215 per_cpu(rcu_torture_count, cpu)[i] = 0;
87216diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
87217index dd08198..5ccccbe 100644
87218--- a/kernel/rcu/tree.c
87219+++ b/kernel/rcu/tree.c
87220@@ -383,9 +383,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
87221 rcu_prepare_for_idle(smp_processor_id());
87222 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
87223 smp_mb__before_atomic_inc(); /* See above. */
87224- atomic_inc(&rdtp->dynticks);
87225+ atomic_inc_unchecked(&rdtp->dynticks);
87226 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
87227- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
87228+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
87229
87230 /*
87231 * It is illegal to enter an extended quiescent state while
87232@@ -502,10 +502,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
87233 int user)
87234 {
87235 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
87236- atomic_inc(&rdtp->dynticks);
87237+ atomic_inc_unchecked(&rdtp->dynticks);
87238 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
87239 smp_mb__after_atomic_inc(); /* See above. */
87240- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
87241+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
87242 rcu_cleanup_after_idle(smp_processor_id());
87243 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
87244 if (!user && !is_idle_task(current)) {
87245@@ -625,14 +625,14 @@ void rcu_nmi_enter(void)
87246 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
87247
87248 if (rdtp->dynticks_nmi_nesting == 0 &&
87249- (atomic_read(&rdtp->dynticks) & 0x1))
87250+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
87251 return;
87252 rdtp->dynticks_nmi_nesting++;
87253 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
87254- atomic_inc(&rdtp->dynticks);
87255+ atomic_inc_unchecked(&rdtp->dynticks);
87256 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
87257 smp_mb__after_atomic_inc(); /* See above. */
87258- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
87259+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
87260 }
87261
87262 /**
87263@@ -651,9 +651,9 @@ void rcu_nmi_exit(void)
87264 return;
87265 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
87266 smp_mb__before_atomic_inc(); /* See above. */
87267- atomic_inc(&rdtp->dynticks);
87268+ atomic_inc_unchecked(&rdtp->dynticks);
87269 smp_mb__after_atomic_inc(); /* Force delay to next write. */
87270- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
87271+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
87272 }
87273
87274 /**
87275@@ -666,7 +666,7 @@ void rcu_nmi_exit(void)
87276 */
87277 bool notrace __rcu_is_watching(void)
87278 {
87279- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
87280+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
87281 }
87282
87283 /**
87284@@ -749,7 +749,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
87285 static int dyntick_save_progress_counter(struct rcu_data *rdp,
87286 bool *isidle, unsigned long *maxj)
87287 {
87288- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
87289+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
87290 rcu_sysidle_check_cpu(rdp, isidle, maxj);
87291 return (rdp->dynticks_snap & 0x1) == 0;
87292 }
87293@@ -766,7 +766,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
87294 unsigned int curr;
87295 unsigned int snap;
87296
87297- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
87298+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
87299 snap = (unsigned int)rdp->dynticks_snap;
87300
87301 /*
87302@@ -1412,9 +1412,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
87303 rdp = this_cpu_ptr(rsp->rda);
87304 rcu_preempt_check_blocked_tasks(rnp);
87305 rnp->qsmask = rnp->qsmaskinit;
87306- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
87307+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
87308 WARN_ON_ONCE(rnp->completed != rsp->completed);
87309- ACCESS_ONCE(rnp->completed) = rsp->completed;
87310+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
87311 if (rnp == rdp->mynode)
87312 __note_gp_changes(rsp, rnp, rdp);
87313 rcu_preempt_boost_start_gp(rnp);
87314@@ -1505,7 +1505,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
87315 */
87316 rcu_for_each_node_breadth_first(rsp, rnp) {
87317 raw_spin_lock_irq(&rnp->lock);
87318- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
87319+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
87320 rdp = this_cpu_ptr(rsp->rda);
87321 if (rnp == rdp->mynode)
87322 __note_gp_changes(rsp, rnp, rdp);
87323@@ -1865,7 +1865,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
87324 rsp->qlen += rdp->qlen;
87325 rdp->n_cbs_orphaned += rdp->qlen;
87326 rdp->qlen_lazy = 0;
87327- ACCESS_ONCE(rdp->qlen) = 0;
87328+ ACCESS_ONCE_RW(rdp->qlen) = 0;
87329 }
87330
87331 /*
87332@@ -2111,7 +2111,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
87333 }
87334 smp_mb(); /* List handling before counting for rcu_barrier(). */
87335 rdp->qlen_lazy -= count_lazy;
87336- ACCESS_ONCE(rdp->qlen) -= count;
87337+ ACCESS_ONCE_RW(rdp->qlen) -= count;
87338 rdp->n_cbs_invoked += count;
87339
87340 /* Reinstate batch limit if we have worked down the excess. */
87341@@ -2308,7 +2308,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
87342 /*
87343 * Do RCU core processing for the current CPU.
87344 */
87345-static void rcu_process_callbacks(struct softirq_action *unused)
87346+static void rcu_process_callbacks(void)
87347 {
87348 struct rcu_state *rsp;
87349
87350@@ -2415,7 +2415,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
87351 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
87352 if (debug_rcu_head_queue(head)) {
87353 /* Probable double call_rcu(), so leak the callback. */
87354- ACCESS_ONCE(head->func) = rcu_leak_callback;
87355+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
87356 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
87357 return;
87358 }
87359@@ -2443,7 +2443,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
87360 local_irq_restore(flags);
87361 return;
87362 }
87363- ACCESS_ONCE(rdp->qlen)++;
87364+ ACCESS_ONCE_RW(rdp->qlen)++;
87365 if (lazy)
87366 rdp->qlen_lazy++;
87367 else
87368@@ -2652,11 +2652,11 @@ void synchronize_sched_expedited(void)
87369 * counter wrap on a 32-bit system. Quite a few more CPUs would of
87370 * course be required on a 64-bit system.
87371 */
87372- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
87373+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
87374 (ulong)atomic_long_read(&rsp->expedited_done) +
87375 ULONG_MAX / 8)) {
87376 synchronize_sched();
87377- atomic_long_inc(&rsp->expedited_wrap);
87378+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
87379 return;
87380 }
87381
87382@@ -2664,7 +2664,7 @@ void synchronize_sched_expedited(void)
87383 * Take a ticket. Note that atomic_inc_return() implies a
87384 * full memory barrier.
87385 */
87386- snap = atomic_long_inc_return(&rsp->expedited_start);
87387+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
87388 firstsnap = snap;
87389 get_online_cpus();
87390 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
87391@@ -2677,14 +2677,14 @@ void synchronize_sched_expedited(void)
87392 synchronize_sched_expedited_cpu_stop,
87393 NULL) == -EAGAIN) {
87394 put_online_cpus();
87395- atomic_long_inc(&rsp->expedited_tryfail);
87396+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
87397
87398 /* Check to see if someone else did our work for us. */
87399 s = atomic_long_read(&rsp->expedited_done);
87400 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
87401 /* ensure test happens before caller kfree */
87402 smp_mb__before_atomic_inc(); /* ^^^ */
87403- atomic_long_inc(&rsp->expedited_workdone1);
87404+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
87405 return;
87406 }
87407
87408@@ -2693,7 +2693,7 @@ void synchronize_sched_expedited(void)
87409 udelay(trycount * num_online_cpus());
87410 } else {
87411 wait_rcu_gp(call_rcu_sched);
87412- atomic_long_inc(&rsp->expedited_normal);
87413+ atomic_long_inc_unchecked(&rsp->expedited_normal);
87414 return;
87415 }
87416
87417@@ -2702,7 +2702,7 @@ void synchronize_sched_expedited(void)
87418 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
87419 /* ensure test happens before caller kfree */
87420 smp_mb__before_atomic_inc(); /* ^^^ */
87421- atomic_long_inc(&rsp->expedited_workdone2);
87422+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
87423 return;
87424 }
87425
87426@@ -2714,10 +2714,10 @@ void synchronize_sched_expedited(void)
87427 * period works for us.
87428 */
87429 get_online_cpus();
87430- snap = atomic_long_read(&rsp->expedited_start);
87431+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
87432 smp_mb(); /* ensure read is before try_stop_cpus(). */
87433 }
87434- atomic_long_inc(&rsp->expedited_stoppedcpus);
87435+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
87436
87437 /*
87438 * Everyone up to our most recent fetch is covered by our grace
87439@@ -2726,16 +2726,16 @@ void synchronize_sched_expedited(void)
87440 * than we did already did their update.
87441 */
87442 do {
87443- atomic_long_inc(&rsp->expedited_done_tries);
87444+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
87445 s = atomic_long_read(&rsp->expedited_done);
87446 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
87447 /* ensure test happens before caller kfree */
87448 smp_mb__before_atomic_inc(); /* ^^^ */
87449- atomic_long_inc(&rsp->expedited_done_lost);
87450+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
87451 break;
87452 }
87453 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
87454- atomic_long_inc(&rsp->expedited_done_exit);
87455+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
87456
87457 put_online_cpus();
87458 }
87459@@ -2931,7 +2931,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
87460 * ACCESS_ONCE() to prevent the compiler from speculating
87461 * the increment to precede the early-exit check.
87462 */
87463- ACCESS_ONCE(rsp->n_barrier_done)++;
87464+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
87465 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
87466 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
87467 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
87468@@ -2981,7 +2981,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
87469
87470 /* Increment ->n_barrier_done to prevent duplicate work. */
87471 smp_mb(); /* Keep increment after above mechanism. */
87472- ACCESS_ONCE(rsp->n_barrier_done)++;
87473+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
87474 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
87475 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
87476 smp_mb(); /* Keep increment before caller's subsequent code. */
87477@@ -3026,10 +3026,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
87478 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
87479 init_callback_list(rdp);
87480 rdp->qlen_lazy = 0;
87481- ACCESS_ONCE(rdp->qlen) = 0;
87482+ ACCESS_ONCE_RW(rdp->qlen) = 0;
87483 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
87484 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
87485- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
87486+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
87487 rdp->cpu = cpu;
87488 rdp->rsp = rsp;
87489 rcu_boot_init_nocb_percpu_data(rdp);
87490@@ -3063,8 +3063,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
87491 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
87492 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
87493 rcu_sysidle_init_percpu_data(rdp->dynticks);
87494- atomic_set(&rdp->dynticks->dynticks,
87495- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
87496+ atomic_set_unchecked(&rdp->dynticks->dynticks,
87497+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
87498 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
87499
87500 /* Add CPU to rcu_node bitmasks. */
87501diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
87502index 52be957..365ded3 100644
87503--- a/kernel/rcu/tree.h
87504+++ b/kernel/rcu/tree.h
87505@@ -87,11 +87,11 @@ struct rcu_dynticks {
87506 long long dynticks_nesting; /* Track irq/process nesting level. */
87507 /* Process level is worth LLONG_MAX/2. */
87508 int dynticks_nmi_nesting; /* Track NMI nesting level. */
87509- atomic_t dynticks; /* Even value for idle, else odd. */
87510+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
87511 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
87512 long long dynticks_idle_nesting;
87513 /* irq/process nesting level from idle. */
87514- atomic_t dynticks_idle; /* Even value for idle, else odd. */
87515+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
87516 /* "Idle" excludes userspace execution. */
87517 unsigned long dynticks_idle_jiffies;
87518 /* End of last non-NMI non-idle period. */
87519@@ -429,17 +429,17 @@ struct rcu_state {
87520 /* _rcu_barrier(). */
87521 /* End of fields guarded by barrier_mutex. */
87522
87523- atomic_long_t expedited_start; /* Starting ticket. */
87524- atomic_long_t expedited_done; /* Done ticket. */
87525- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
87526- atomic_long_t expedited_tryfail; /* # acquisition failures. */
87527- atomic_long_t expedited_workdone1; /* # done by others #1. */
87528- atomic_long_t expedited_workdone2; /* # done by others #2. */
87529- atomic_long_t expedited_normal; /* # fallbacks to normal. */
87530- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
87531- atomic_long_t expedited_done_tries; /* # tries to update _done. */
87532- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
87533- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
87534+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
87535+ atomic_long_t expedited_done; /* Done ticket. */
87536+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
87537+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
87538+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
87539+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
87540+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
87541+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
87542+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
87543+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
87544+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
87545
87546 unsigned long jiffies_force_qs; /* Time at which to invoke */
87547 /* force_quiescent_state(). */
87548diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
87549index 08a7652..3598c7e 100644
87550--- a/kernel/rcu/tree_plugin.h
87551+++ b/kernel/rcu/tree_plugin.h
87552@@ -749,7 +749,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
87553 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
87554 {
87555 return !rcu_preempted_readers_exp(rnp) &&
87556- ACCESS_ONCE(rnp->expmask) == 0;
87557+ ACCESS_ONCE_RW(rnp->expmask) == 0;
87558 }
87559
87560 /*
87561@@ -905,7 +905,7 @@ void synchronize_rcu_expedited(void)
87562
87563 /* Clean up and exit. */
87564 smp_mb(); /* ensure expedited GP seen before counter increment. */
87565- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
87566+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
87567 unlock_mb_ret:
87568 mutex_unlock(&sync_rcu_preempt_exp_mutex);
87569 mb_ret:
87570@@ -1479,7 +1479,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
87571 free_cpumask_var(cm);
87572 }
87573
87574-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
87575+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
87576 .store = &rcu_cpu_kthread_task,
87577 .thread_should_run = rcu_cpu_kthread_should_run,
87578 .thread_fn = rcu_cpu_kthread,
87579@@ -1946,7 +1946,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
87580 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
87581 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
87582 cpu, ticks_value, ticks_title,
87583- atomic_read(&rdtp->dynticks) & 0xfff,
87584+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
87585 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
87586 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
87587 fast_no_hz);
87588@@ -2109,7 +2109,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
87589
87590 /* Enqueue the callback on the nocb list and update counts. */
87591 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
87592- ACCESS_ONCE(*old_rhpp) = rhp;
87593+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
87594 atomic_long_add(rhcount, &rdp->nocb_q_count);
87595 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
87596
87597@@ -2272,12 +2272,12 @@ static int rcu_nocb_kthread(void *arg)
87598 * Extract queued callbacks, update counts, and wait
87599 * for a grace period to elapse.
87600 */
87601- ACCESS_ONCE(rdp->nocb_head) = NULL;
87602+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
87603 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
87604 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
87605 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
87606- ACCESS_ONCE(rdp->nocb_p_count) += c;
87607- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
87608+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
87609+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
87610 rcu_nocb_wait_gp(rdp);
87611
87612 /* Each pass through the following loop invokes a callback. */
87613@@ -2303,8 +2303,8 @@ static int rcu_nocb_kthread(void *arg)
87614 list = next;
87615 }
87616 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
87617- ACCESS_ONCE(rdp->nocb_p_count) -= c;
87618- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
87619+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
87620+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
87621 rdp->n_nocbs_invoked += c;
87622 }
87623 return 0;
87624@@ -2331,7 +2331,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
87625 t = kthread_run(rcu_nocb_kthread, rdp,
87626 "rcuo%c/%d", rsp->abbr, cpu);
87627 BUG_ON(IS_ERR(t));
87628- ACCESS_ONCE(rdp->nocb_kthread) = t;
87629+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
87630 }
87631 }
87632
87633@@ -2457,11 +2457,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
87634
87635 /* Record start of fully idle period. */
87636 j = jiffies;
87637- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
87638+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
87639 smp_mb__before_atomic_inc();
87640- atomic_inc(&rdtp->dynticks_idle);
87641+ atomic_inc_unchecked(&rdtp->dynticks_idle);
87642 smp_mb__after_atomic_inc();
87643- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
87644+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
87645 }
87646
87647 /*
87648@@ -2526,9 +2526,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
87649
87650 /* Record end of idle period. */
87651 smp_mb__before_atomic_inc();
87652- atomic_inc(&rdtp->dynticks_idle);
87653+ atomic_inc_unchecked(&rdtp->dynticks_idle);
87654 smp_mb__after_atomic_inc();
87655- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
87656+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
87657
87658 /*
87659 * If we are the timekeeping CPU, we are permitted to be non-idle
87660@@ -2569,7 +2569,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
87661 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
87662
87663 /* Pick up current idle and NMI-nesting counter and check. */
87664- cur = atomic_read(&rdtp->dynticks_idle);
87665+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
87666 if (cur & 0x1) {
87667 *isidle = false; /* We are not idle! */
87668 return;
87669@@ -2632,7 +2632,7 @@ static void rcu_sysidle(unsigned long j)
87670 case RCU_SYSIDLE_NOT:
87671
87672 /* First time all are idle, so note a short idle period. */
87673- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
87674+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
87675 break;
87676
87677 case RCU_SYSIDLE_SHORT:
87678@@ -2669,7 +2669,7 @@ static void rcu_sysidle(unsigned long j)
87679 static void rcu_sysidle_cancel(void)
87680 {
87681 smp_mb();
87682- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
87683+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
87684 }
87685
87686 /*
87687@@ -2717,7 +2717,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
87688 smp_mb(); /* grace period precedes setting inuse. */
87689
87690 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
87691- ACCESS_ONCE(rshp->inuse) = 0;
87692+ ACCESS_ONCE_RW(rshp->inuse) = 0;
87693 }
87694
87695 /*
87696diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
87697index 3596797..f78391c 100644
87698--- a/kernel/rcu/tree_trace.c
87699+++ b/kernel/rcu/tree_trace.c
87700@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
87701 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
87702 rdp->passed_quiesce, rdp->qs_pending);
87703 seq_printf(m, " dt=%d/%llx/%d df=%lu",
87704- atomic_read(&rdp->dynticks->dynticks),
87705+ atomic_read_unchecked(&rdp->dynticks->dynticks),
87706 rdp->dynticks->dynticks_nesting,
87707 rdp->dynticks->dynticks_nmi_nesting,
87708 rdp->dynticks_fqs);
87709@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
87710 struct rcu_state *rsp = (struct rcu_state *)m->private;
87711
87712 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
87713- atomic_long_read(&rsp->expedited_start),
87714+ atomic_long_read_unchecked(&rsp->expedited_start),
87715 atomic_long_read(&rsp->expedited_done),
87716- atomic_long_read(&rsp->expedited_wrap),
87717- atomic_long_read(&rsp->expedited_tryfail),
87718- atomic_long_read(&rsp->expedited_workdone1),
87719- atomic_long_read(&rsp->expedited_workdone2),
87720- atomic_long_read(&rsp->expedited_normal),
87721- atomic_long_read(&rsp->expedited_stoppedcpus),
87722- atomic_long_read(&rsp->expedited_done_tries),
87723- atomic_long_read(&rsp->expedited_done_lost),
87724- atomic_long_read(&rsp->expedited_done_exit));
87725+ atomic_long_read_unchecked(&rsp->expedited_wrap),
87726+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
87727+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
87728+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
87729+ atomic_long_read_unchecked(&rsp->expedited_normal),
87730+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
87731+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
87732+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
87733+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
87734 return 0;
87735 }
87736
87737diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
87738index 6cb3dff..dc5710f 100644
87739--- a/kernel/rcu/update.c
87740+++ b/kernel/rcu/update.c
87741@@ -318,10 +318,10 @@ int rcu_jiffies_till_stall_check(void)
87742 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
87743 */
87744 if (till_stall_check < 3) {
87745- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
87746+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
87747 till_stall_check = 3;
87748 } else if (till_stall_check > 300) {
87749- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
87750+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
87751 till_stall_check = 300;
87752 }
87753 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
87754diff --git a/kernel/resource.c b/kernel/resource.c
87755index 3f285dc..5755f62 100644
87756--- a/kernel/resource.c
87757+++ b/kernel/resource.c
87758@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
87759
87760 static int __init ioresources_init(void)
87761 {
87762+#ifdef CONFIG_GRKERNSEC_PROC_ADD
87763+#ifdef CONFIG_GRKERNSEC_PROC_USER
87764+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
87765+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
87766+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87767+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
87768+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
87769+#endif
87770+#else
87771 proc_create("ioports", 0, NULL, &proc_ioports_operations);
87772 proc_create("iomem", 0, NULL, &proc_iomem_operations);
87773+#endif
87774 return 0;
87775 }
87776 __initcall(ioresources_init);
87777diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
87778index 4a07353..66b5291 100644
87779--- a/kernel/sched/auto_group.c
87780+++ b/kernel/sched/auto_group.c
87781@@ -11,7 +11,7 @@
87782
87783 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
87784 static struct autogroup autogroup_default;
87785-static atomic_t autogroup_seq_nr;
87786+static atomic_unchecked_t autogroup_seq_nr;
87787
87788 void __init autogroup_init(struct task_struct *init_task)
87789 {
87790@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
87791
87792 kref_init(&ag->kref);
87793 init_rwsem(&ag->lock);
87794- ag->id = atomic_inc_return(&autogroup_seq_nr);
87795+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
87796 ag->tg = tg;
87797 #ifdef CONFIG_RT_GROUP_SCHED
87798 /*
87799diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
87800index a63f4dc..349bbb0 100644
87801--- a/kernel/sched/completion.c
87802+++ b/kernel/sched/completion.c
87803@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
87804 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
87805 * or number of jiffies left till timeout) if completed.
87806 */
87807-long __sched
87808+long __sched __intentional_overflow(-1)
87809 wait_for_completion_interruptible_timeout(struct completion *x,
87810 unsigned long timeout)
87811 {
87812@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
87813 *
87814 * Return: -ERESTARTSYS if interrupted, 0 if completed.
87815 */
87816-int __sched wait_for_completion_killable(struct completion *x)
87817+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
87818 {
87819 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
87820 if (t == -ERESTARTSYS)
87821@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
87822 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
87823 * or number of jiffies left till timeout) if completed.
87824 */
87825-long __sched
87826+long __sched __intentional_overflow(-1)
87827 wait_for_completion_killable_timeout(struct completion *x,
87828 unsigned long timeout)
87829 {
87830diff --git a/kernel/sched/core.c b/kernel/sched/core.c
87831index a88f4a4..9d57ac9 100644
87832--- a/kernel/sched/core.c
87833+++ b/kernel/sched/core.c
87834@@ -2871,6 +2871,8 @@ int can_nice(const struct task_struct *p, const int nice)
87835 /* convert nice value [19,-20] to rlimit style value [1,40] */
87836 int nice_rlim = 20 - nice;
87837
87838+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
87839+
87840 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
87841 capable(CAP_SYS_NICE));
87842 }
87843@@ -2904,7 +2906,8 @@ SYSCALL_DEFINE1(nice, int, increment)
87844 if (nice > 19)
87845 nice = 19;
87846
87847- if (increment < 0 && !can_nice(current, nice))
87848+ if (increment < 0 && (!can_nice(current, nice) ||
87849+ gr_handle_chroot_nice()))
87850 return -EPERM;
87851
87852 retval = security_task_setnice(current, nice);
87853@@ -3066,6 +3069,7 @@ recheck:
87854 unsigned long rlim_rtprio =
87855 task_rlimit(p, RLIMIT_RTPRIO);
87856
87857+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
87858 /* can't set/change the rt policy */
87859 if (policy != p->policy && !rlim_rtprio)
87860 return -EPERM;
87861@@ -4232,7 +4236,7 @@ static void migrate_tasks(unsigned int dead_cpu)
87862
87863 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
87864
87865-static struct ctl_table sd_ctl_dir[] = {
87866+static ctl_table_no_const sd_ctl_dir[] __read_only = {
87867 {
87868 .procname = "sched_domain",
87869 .mode = 0555,
87870@@ -4249,17 +4253,17 @@ static struct ctl_table sd_ctl_root[] = {
87871 {}
87872 };
87873
87874-static struct ctl_table *sd_alloc_ctl_entry(int n)
87875+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
87876 {
87877- struct ctl_table *entry =
87878+ ctl_table_no_const *entry =
87879 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
87880
87881 return entry;
87882 }
87883
87884-static void sd_free_ctl_entry(struct ctl_table **tablep)
87885+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
87886 {
87887- struct ctl_table *entry;
87888+ ctl_table_no_const *entry;
87889
87890 /*
87891 * In the intermediate directories, both the child directory and
87892@@ -4267,22 +4271,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
87893 * will always be set. In the lowest directory the names are
87894 * static strings and all have proc handlers.
87895 */
87896- for (entry = *tablep; entry->mode; entry++) {
87897- if (entry->child)
87898- sd_free_ctl_entry(&entry->child);
87899+ for (entry = tablep; entry->mode; entry++) {
87900+ if (entry->child) {
87901+ sd_free_ctl_entry(entry->child);
87902+ pax_open_kernel();
87903+ entry->child = NULL;
87904+ pax_close_kernel();
87905+ }
87906 if (entry->proc_handler == NULL)
87907 kfree(entry->procname);
87908 }
87909
87910- kfree(*tablep);
87911- *tablep = NULL;
87912+ kfree(tablep);
87913 }
87914
87915 static int min_load_idx = 0;
87916 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
87917
87918 static void
87919-set_table_entry(struct ctl_table *entry,
87920+set_table_entry(ctl_table_no_const *entry,
87921 const char *procname, void *data, int maxlen,
87922 umode_t mode, proc_handler *proc_handler,
87923 bool load_idx)
87924@@ -4302,7 +4309,7 @@ set_table_entry(struct ctl_table *entry,
87925 static struct ctl_table *
87926 sd_alloc_ctl_domain_table(struct sched_domain *sd)
87927 {
87928- struct ctl_table *table = sd_alloc_ctl_entry(13);
87929+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
87930
87931 if (table == NULL)
87932 return NULL;
87933@@ -4337,9 +4344,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
87934 return table;
87935 }
87936
87937-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
87938+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
87939 {
87940- struct ctl_table *entry, *table;
87941+ ctl_table_no_const *entry, *table;
87942 struct sched_domain *sd;
87943 int domain_num = 0, i;
87944 char buf[32];
87945@@ -4366,11 +4373,13 @@ static struct ctl_table_header *sd_sysctl_header;
87946 static void register_sched_domain_sysctl(void)
87947 {
87948 int i, cpu_num = num_possible_cpus();
87949- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
87950+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
87951 char buf[32];
87952
87953 WARN_ON(sd_ctl_dir[0].child);
87954+ pax_open_kernel();
87955 sd_ctl_dir[0].child = entry;
87956+ pax_close_kernel();
87957
87958 if (entry == NULL)
87959 return;
87960@@ -4393,8 +4402,12 @@ static void unregister_sched_domain_sysctl(void)
87961 if (sd_sysctl_header)
87962 unregister_sysctl_table(sd_sysctl_header);
87963 sd_sysctl_header = NULL;
87964- if (sd_ctl_dir[0].child)
87965- sd_free_ctl_entry(&sd_ctl_dir[0].child);
87966+ if (sd_ctl_dir[0].child) {
87967+ sd_free_ctl_entry(sd_ctl_dir[0].child);
87968+ pax_open_kernel();
87969+ sd_ctl_dir[0].child = NULL;
87970+ pax_close_kernel();
87971+ }
87972 }
87973 #else
87974 static void register_sched_domain_sysctl(void)
87975diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
87976index e64b079..a46bd34 100644
87977--- a/kernel/sched/fair.c
87978+++ b/kernel/sched/fair.c
87979@@ -1652,7 +1652,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
87980
87981 static void reset_ptenuma_scan(struct task_struct *p)
87982 {
87983- ACCESS_ONCE(p->mm->numa_scan_seq)++;
87984+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
87985 p->mm->numa_scan_offset = 0;
87986 }
87987
87988@@ -6863,7 +6863,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
87989 * run_rebalance_domains is triggered when needed from the scheduler tick.
87990 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
87991 */
87992-static void run_rebalance_domains(struct softirq_action *h)
87993+static __latent_entropy void run_rebalance_domains(void)
87994 {
87995 int this_cpu = smp_processor_id();
87996 struct rq *this_rq = cpu_rq(this_cpu);
87997diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
87998index 88c85b2..a1dec86 100644
87999--- a/kernel/sched/sched.h
88000+++ b/kernel/sched/sched.h
88001@@ -1035,7 +1035,7 @@ struct sched_class {
88002 #ifdef CONFIG_FAIR_GROUP_SCHED
88003 void (*task_move_group) (struct task_struct *p, int on_rq);
88004 #endif
88005-};
88006+} __do_const;
88007
88008 #define sched_class_highest (&stop_sched_class)
88009 #define for_each_class(class) \
88010diff --git a/kernel/signal.c b/kernel/signal.c
88011index 940b30e..7fd6041 100644
88012--- a/kernel/signal.c
88013+++ b/kernel/signal.c
88014@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
88015
88016 int print_fatal_signals __read_mostly;
88017
88018-static void __user *sig_handler(struct task_struct *t, int sig)
88019+static __sighandler_t sig_handler(struct task_struct *t, int sig)
88020 {
88021 return t->sighand->action[sig - 1].sa.sa_handler;
88022 }
88023
88024-static int sig_handler_ignored(void __user *handler, int sig)
88025+static int sig_handler_ignored(__sighandler_t handler, int sig)
88026 {
88027 /* Is it explicitly or implicitly ignored? */
88028 return handler == SIG_IGN ||
88029@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
88030
88031 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
88032 {
88033- void __user *handler;
88034+ __sighandler_t handler;
88035
88036 handler = sig_handler(t, sig);
88037
88038@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
88039 atomic_inc(&user->sigpending);
88040 rcu_read_unlock();
88041
88042+ if (!override_rlimit)
88043+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
88044+
88045 if (override_rlimit ||
88046 atomic_read(&user->sigpending) <=
88047 task_rlimit(t, RLIMIT_SIGPENDING)) {
88048@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
88049
88050 int unhandled_signal(struct task_struct *tsk, int sig)
88051 {
88052- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
88053+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
88054 if (is_global_init(tsk))
88055 return 1;
88056 if (handler != SIG_IGN && handler != SIG_DFL)
88057@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
88058 }
88059 }
88060
88061+ /* allow glibc communication via tgkill to other threads in our
88062+ thread group */
88063+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
88064+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
88065+ && gr_handle_signal(t, sig))
88066+ return -EPERM;
88067+
88068 return security_task_kill(t, info, sig, 0);
88069 }
88070
88071@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
88072 return send_signal(sig, info, p, 1);
88073 }
88074
88075-static int
88076+int
88077 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88078 {
88079 return send_signal(sig, info, t, 0);
88080@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88081 unsigned long int flags;
88082 int ret, blocked, ignored;
88083 struct k_sigaction *action;
88084+ int is_unhandled = 0;
88085
88086 spin_lock_irqsave(&t->sighand->siglock, flags);
88087 action = &t->sighand->action[sig-1];
88088@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88089 }
88090 if (action->sa.sa_handler == SIG_DFL)
88091 t->signal->flags &= ~SIGNAL_UNKILLABLE;
88092+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
88093+ is_unhandled = 1;
88094 ret = specific_send_sig_info(sig, info, t);
88095 spin_unlock_irqrestore(&t->sighand->siglock, flags);
88096
88097+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
88098+ normal operation */
88099+ if (is_unhandled) {
88100+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
88101+ gr_handle_crash(t, sig);
88102+ }
88103+
88104 return ret;
88105 }
88106
88107@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
88108 ret = check_kill_permission(sig, info, p);
88109 rcu_read_unlock();
88110
88111- if (!ret && sig)
88112+ if (!ret && sig) {
88113 ret = do_send_sig_info(sig, info, p, true);
88114+ if (!ret)
88115+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
88116+ }
88117
88118 return ret;
88119 }
88120@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
88121 int error = -ESRCH;
88122
88123 rcu_read_lock();
88124- p = find_task_by_vpid(pid);
88125+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
88126+ /* allow glibc communication via tgkill to other threads in our
88127+ thread group */
88128+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
88129+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
88130+ p = find_task_by_vpid_unrestricted(pid);
88131+ else
88132+#endif
88133+ p = find_task_by_vpid(pid);
88134 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
88135 error = check_kill_permission(sig, info, p);
88136 /*
88137@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
88138 }
88139 seg = get_fs();
88140 set_fs(KERNEL_DS);
88141- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
88142- (stack_t __force __user *) &uoss,
88143+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
88144+ (stack_t __force_user *) &uoss,
88145 compat_user_stack_pointer());
88146 set_fs(seg);
88147 if (ret >= 0 && uoss_ptr) {
88148diff --git a/kernel/smpboot.c b/kernel/smpboot.c
88149index eb89e18..a4e6792 100644
88150--- a/kernel/smpboot.c
88151+++ b/kernel/smpboot.c
88152@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
88153 }
88154 smpboot_unpark_thread(plug_thread, cpu);
88155 }
88156- list_add(&plug_thread->list, &hotplug_threads);
88157+ pax_list_add(&plug_thread->list, &hotplug_threads);
88158 out:
88159 mutex_unlock(&smpboot_threads_lock);
88160 return ret;
88161@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
88162 {
88163 get_online_cpus();
88164 mutex_lock(&smpboot_threads_lock);
88165- list_del(&plug_thread->list);
88166+ pax_list_del(&plug_thread->list);
88167 smpboot_destroy_threads(plug_thread);
88168 mutex_unlock(&smpboot_threads_lock);
88169 put_online_cpus();
88170diff --git a/kernel/softirq.c b/kernel/softirq.c
88171index 11025cc..bc0e4dc 100644
88172--- a/kernel/softirq.c
88173+++ b/kernel/softirq.c
88174@@ -50,11 +50,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
88175 EXPORT_SYMBOL(irq_stat);
88176 #endif
88177
88178-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
88179+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
88180
88181 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
88182
88183-char *softirq_to_name[NR_SOFTIRQS] = {
88184+const char * const softirq_to_name[NR_SOFTIRQS] = {
88185 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
88186 "TASKLET", "SCHED", "HRTIMER", "RCU"
88187 };
88188@@ -250,7 +250,7 @@ restart:
88189 kstat_incr_softirqs_this_cpu(vec_nr);
88190
88191 trace_softirq_entry(vec_nr);
88192- h->action(h);
88193+ h->action();
88194 trace_softirq_exit(vec_nr);
88195 if (unlikely(prev_count != preempt_count())) {
88196 printk(KERN_ERR "huh, entered softirq %u %s %p"
88197@@ -419,7 +419,7 @@ void __raise_softirq_irqoff(unsigned int nr)
88198 or_softirq_pending(1UL << nr);
88199 }
88200
88201-void open_softirq(int nr, void (*action)(struct softirq_action *))
88202+void __init open_softirq(int nr, void (*action)(void))
88203 {
88204 softirq_vec[nr].action = action;
88205 }
88206@@ -475,7 +475,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
88207
88208 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
88209
88210-static void tasklet_action(struct softirq_action *a)
88211+static __latent_entropy void tasklet_action(void)
88212 {
88213 struct tasklet_struct *list;
88214
88215@@ -510,7 +510,7 @@ static void tasklet_action(struct softirq_action *a)
88216 }
88217 }
88218
88219-static void tasklet_hi_action(struct softirq_action *a)
88220+static __latent_entropy void tasklet_hi_action(void)
88221 {
88222 struct tasklet_struct *list;
88223
88224@@ -740,7 +740,7 @@ static struct notifier_block cpu_nfb = {
88225 .notifier_call = cpu_callback
88226 };
88227
88228-static struct smp_hotplug_thread softirq_threads = {
88229+static struct smp_hotplug_thread softirq_threads __read_only = {
88230 .store = &ksoftirqd,
88231 .thread_should_run = ksoftirqd_should_run,
88232 .thread_fn = run_ksoftirqd,
88233diff --git a/kernel/sys.c b/kernel/sys.c
88234index c723113..46bf922 100644
88235--- a/kernel/sys.c
88236+++ b/kernel/sys.c
88237@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
88238 error = -EACCES;
88239 goto out;
88240 }
88241+
88242+ if (gr_handle_chroot_setpriority(p, niceval)) {
88243+ error = -EACCES;
88244+ goto out;
88245+ }
88246+
88247 no_nice = security_task_setnice(p, niceval);
88248 if (no_nice) {
88249 error = no_nice;
88250@@ -351,6 +357,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
88251 goto error;
88252 }
88253
88254+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
88255+ goto error;
88256+
88257 if (rgid != (gid_t) -1 ||
88258 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
88259 new->sgid = new->egid;
88260@@ -386,6 +395,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
88261 old = current_cred();
88262
88263 retval = -EPERM;
88264+
88265+ if (gr_check_group_change(kgid, kgid, kgid))
88266+ goto error;
88267+
88268 if (ns_capable(old->user_ns, CAP_SETGID))
88269 new->gid = new->egid = new->sgid = new->fsgid = kgid;
88270 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
88271@@ -403,7 +416,7 @@ error:
88272 /*
88273 * change the user struct in a credentials set to match the new UID
88274 */
88275-static int set_user(struct cred *new)
88276+int set_user(struct cred *new)
88277 {
88278 struct user_struct *new_user;
88279
88280@@ -483,6 +496,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
88281 goto error;
88282 }
88283
88284+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
88285+ goto error;
88286+
88287 if (!uid_eq(new->uid, old->uid)) {
88288 retval = set_user(new);
88289 if (retval < 0)
88290@@ -533,6 +549,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
88291 old = current_cred();
88292
88293 retval = -EPERM;
88294+
88295+ if (gr_check_crash_uid(kuid))
88296+ goto error;
88297+ if (gr_check_user_change(kuid, kuid, kuid))
88298+ goto error;
88299+
88300 if (ns_capable(old->user_ns, CAP_SETUID)) {
88301 new->suid = new->uid = kuid;
88302 if (!uid_eq(kuid, old->uid)) {
88303@@ -602,6 +624,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
88304 goto error;
88305 }
88306
88307+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
88308+ goto error;
88309+
88310 if (ruid != (uid_t) -1) {
88311 new->uid = kruid;
88312 if (!uid_eq(kruid, old->uid)) {
88313@@ -684,6 +709,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
88314 goto error;
88315 }
88316
88317+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
88318+ goto error;
88319+
88320 if (rgid != (gid_t) -1)
88321 new->gid = krgid;
88322 if (egid != (gid_t) -1)
88323@@ -745,12 +773,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
88324 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
88325 ns_capable(old->user_ns, CAP_SETUID)) {
88326 if (!uid_eq(kuid, old->fsuid)) {
88327+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
88328+ goto error;
88329+
88330 new->fsuid = kuid;
88331 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
88332 goto change_okay;
88333 }
88334 }
88335
88336+error:
88337 abort_creds(new);
88338 return old_fsuid;
88339
88340@@ -783,12 +815,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
88341 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
88342 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
88343 ns_capable(old->user_ns, CAP_SETGID)) {
88344+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
88345+ goto error;
88346+
88347 if (!gid_eq(kgid, old->fsgid)) {
88348 new->fsgid = kgid;
88349 goto change_okay;
88350 }
88351 }
88352
88353+error:
88354 abort_creds(new);
88355 return old_fsgid;
88356
88357@@ -1168,19 +1204,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
88358 return -EFAULT;
88359
88360 down_read(&uts_sem);
88361- error = __copy_to_user(&name->sysname, &utsname()->sysname,
88362+ error = __copy_to_user(name->sysname, &utsname()->sysname,
88363 __OLD_UTS_LEN);
88364 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
88365- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
88366+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
88367 __OLD_UTS_LEN);
88368 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
88369- error |= __copy_to_user(&name->release, &utsname()->release,
88370+ error |= __copy_to_user(name->release, &utsname()->release,
88371 __OLD_UTS_LEN);
88372 error |= __put_user(0, name->release + __OLD_UTS_LEN);
88373- error |= __copy_to_user(&name->version, &utsname()->version,
88374+ error |= __copy_to_user(name->version, &utsname()->version,
88375 __OLD_UTS_LEN);
88376 error |= __put_user(0, name->version + __OLD_UTS_LEN);
88377- error |= __copy_to_user(&name->machine, &utsname()->machine,
88378+ error |= __copy_to_user(name->machine, &utsname()->machine,
88379 __OLD_UTS_LEN);
88380 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
88381 up_read(&uts_sem);
88382@@ -1382,6 +1418,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
88383 */
88384 new_rlim->rlim_cur = 1;
88385 }
88386+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
88387+ is changed to a lower value. Since tasks can be created by the same
88388+ user in between this limit change and an execve by this task, force
88389+ a recheck only for this task by setting PF_NPROC_EXCEEDED
88390+ */
88391+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
88392+ tsk->flags |= PF_NPROC_EXCEEDED;
88393 }
88394 if (!retval) {
88395 if (old_rlim)
88396diff --git a/kernel/sysctl.c b/kernel/sysctl.c
88397index 34a6047..5665aa7 100644
88398--- a/kernel/sysctl.c
88399+++ b/kernel/sysctl.c
88400@@ -93,7 +93,6 @@
88401
88402
88403 #if defined(CONFIG_SYSCTL)
88404-
88405 /* External variables not in a header file. */
88406 extern int sysctl_overcommit_memory;
88407 extern int sysctl_overcommit_ratio;
88408@@ -119,17 +118,18 @@ extern int blk_iopoll_enabled;
88409
88410 /* Constants used for minimum and maximum */
88411 #ifdef CONFIG_LOCKUP_DETECTOR
88412-static int sixty = 60;
88413+static int sixty __read_only = 60;
88414 #endif
88415
88416-static int zero;
88417-static int __maybe_unused one = 1;
88418-static int __maybe_unused two = 2;
88419-static int __maybe_unused three = 3;
88420-static unsigned long one_ul = 1;
88421-static int one_hundred = 100;
88422+static int neg_one __read_only = -1;
88423+static int zero __read_only = 0;
88424+static int __maybe_unused one __read_only = 1;
88425+static int __maybe_unused two __read_only = 2;
88426+static int __maybe_unused three __read_only = 3;
88427+static unsigned long one_ul __read_only = 1;
88428+static int one_hundred __read_only = 100;
88429 #ifdef CONFIG_PRINTK
88430-static int ten_thousand = 10000;
88431+static int ten_thousand __read_only = 10000;
88432 #endif
88433
88434 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
88435@@ -176,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
88436 void __user *buffer, size_t *lenp, loff_t *ppos);
88437 #endif
88438
88439-#ifdef CONFIG_PRINTK
88440 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
88441 void __user *buffer, size_t *lenp, loff_t *ppos);
88442-#endif
88443
88444 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
88445 void __user *buffer, size_t *lenp, loff_t *ppos);
88446@@ -210,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
88447
88448 #endif
88449
88450+extern struct ctl_table grsecurity_table[];
88451+
88452 static struct ctl_table kern_table[];
88453 static struct ctl_table vm_table[];
88454 static struct ctl_table fs_table[];
88455@@ -224,6 +224,20 @@ extern struct ctl_table epoll_table[];
88456 int sysctl_legacy_va_layout;
88457 #endif
88458
88459+#ifdef CONFIG_PAX_SOFTMODE
88460+static ctl_table pax_table[] = {
88461+ {
88462+ .procname = "softmode",
88463+ .data = &pax_softmode,
88464+ .maxlen = sizeof(unsigned int),
88465+ .mode = 0600,
88466+ .proc_handler = &proc_dointvec,
88467+ },
88468+
88469+ { }
88470+};
88471+#endif
88472+
88473 /* The default sysctl tables: */
88474
88475 static struct ctl_table sysctl_base_table[] = {
88476@@ -272,6 +286,22 @@ static int max_extfrag_threshold = 1000;
88477 #endif
88478
88479 static struct ctl_table kern_table[] = {
88480+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
88481+ {
88482+ .procname = "grsecurity",
88483+ .mode = 0500,
88484+ .child = grsecurity_table,
88485+ },
88486+#endif
88487+
88488+#ifdef CONFIG_PAX_SOFTMODE
88489+ {
88490+ .procname = "pax",
88491+ .mode = 0500,
88492+ .child = pax_table,
88493+ },
88494+#endif
88495+
88496 {
88497 .procname = "sched_child_runs_first",
88498 .data = &sysctl_sched_child_runs_first,
88499@@ -620,7 +650,7 @@ static struct ctl_table kern_table[] = {
88500 .data = &modprobe_path,
88501 .maxlen = KMOD_PATH_LEN,
88502 .mode = 0644,
88503- .proc_handler = proc_dostring,
88504+ .proc_handler = proc_dostring_modpriv,
88505 },
88506 {
88507 .procname = "modules_disabled",
88508@@ -787,16 +817,20 @@ static struct ctl_table kern_table[] = {
88509 .extra1 = &zero,
88510 .extra2 = &one,
88511 },
88512+#endif
88513 {
88514 .procname = "kptr_restrict",
88515 .data = &kptr_restrict,
88516 .maxlen = sizeof(int),
88517 .mode = 0644,
88518 .proc_handler = proc_dointvec_minmax_sysadmin,
88519+#ifdef CONFIG_GRKERNSEC_HIDESYM
88520+ .extra1 = &two,
88521+#else
88522 .extra1 = &zero,
88523+#endif
88524 .extra2 = &two,
88525 },
88526-#endif
88527 {
88528 .procname = "ngroups_max",
88529 .data = &ngroups_max,
88530@@ -1039,10 +1073,17 @@ static struct ctl_table kern_table[] = {
88531 */
88532 {
88533 .procname = "perf_event_paranoid",
88534- .data = &sysctl_perf_event_paranoid,
88535- .maxlen = sizeof(sysctl_perf_event_paranoid),
88536+ .data = &sysctl_perf_event_legitimately_concerned,
88537+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
88538 .mode = 0644,
88539- .proc_handler = proc_dointvec,
88540+ /* go ahead, be a hero */
88541+ .proc_handler = proc_dointvec_minmax_sysadmin,
88542+ .extra1 = &neg_one,
88543+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88544+ .extra2 = &three,
88545+#else
88546+ .extra2 = &two,
88547+#endif
88548 },
88549 {
88550 .procname = "perf_event_mlock_kb",
88551@@ -1306,6 +1347,13 @@ static struct ctl_table vm_table[] = {
88552 .proc_handler = proc_dointvec_minmax,
88553 .extra1 = &zero,
88554 },
88555+ {
88556+ .procname = "heap_stack_gap",
88557+ .data = &sysctl_heap_stack_gap,
88558+ .maxlen = sizeof(sysctl_heap_stack_gap),
88559+ .mode = 0644,
88560+ .proc_handler = proc_doulongvec_minmax,
88561+ },
88562 #else
88563 {
88564 .procname = "nr_trim_pages",
88565@@ -1770,6 +1818,16 @@ int proc_dostring(struct ctl_table *table, int write,
88566 buffer, lenp, ppos);
88567 }
88568
88569+int proc_dostring_modpriv(struct ctl_table *table, int write,
88570+ void __user *buffer, size_t *lenp, loff_t *ppos)
88571+{
88572+ if (write && !capable(CAP_SYS_MODULE))
88573+ return -EPERM;
88574+
88575+ return _proc_do_string(table->data, table->maxlen, write,
88576+ buffer, lenp, ppos);
88577+}
88578+
88579 static size_t proc_skip_spaces(char **buf)
88580 {
88581 size_t ret;
88582@@ -1875,6 +1933,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
88583 len = strlen(tmp);
88584 if (len > *size)
88585 len = *size;
88586+ if (len > sizeof(tmp))
88587+ len = sizeof(tmp);
88588 if (copy_to_user(*buf, tmp, len))
88589 return -EFAULT;
88590 *size -= len;
88591@@ -2039,7 +2099,7 @@ int proc_dointvec(struct ctl_table *table, int write,
88592 static int proc_taint(struct ctl_table *table, int write,
88593 void __user *buffer, size_t *lenp, loff_t *ppos)
88594 {
88595- struct ctl_table t;
88596+ ctl_table_no_const t;
88597 unsigned long tmptaint = get_taint();
88598 int err;
88599
88600@@ -2067,7 +2127,6 @@ static int proc_taint(struct ctl_table *table, int write,
88601 return err;
88602 }
88603
88604-#ifdef CONFIG_PRINTK
88605 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
88606 void __user *buffer, size_t *lenp, loff_t *ppos)
88607 {
88608@@ -2076,7 +2135,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
88609
88610 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
88611 }
88612-#endif
88613
88614 struct do_proc_dointvec_minmax_conv_param {
88615 int *min;
88616@@ -2623,6 +2681,12 @@ int proc_dostring(struct ctl_table *table, int write,
88617 return -ENOSYS;
88618 }
88619
88620+int proc_dostring_modpriv(struct ctl_table *table, int write,
88621+ void __user *buffer, size_t *lenp, loff_t *ppos)
88622+{
88623+ return -ENOSYS;
88624+}
88625+
88626 int proc_dointvec(struct ctl_table *table, int write,
88627 void __user *buffer, size_t *lenp, loff_t *ppos)
88628 {
88629@@ -2679,5 +2743,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
88630 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
88631 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
88632 EXPORT_SYMBOL(proc_dostring);
88633+EXPORT_SYMBOL(proc_dostring_modpriv);
88634 EXPORT_SYMBOL(proc_doulongvec_minmax);
88635 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
88636diff --git a/kernel/taskstats.c b/kernel/taskstats.c
88637index 13d2f7c..c93d0b0 100644
88638--- a/kernel/taskstats.c
88639+++ b/kernel/taskstats.c
88640@@ -28,9 +28,12 @@
88641 #include <linux/fs.h>
88642 #include <linux/file.h>
88643 #include <linux/pid_namespace.h>
88644+#include <linux/grsecurity.h>
88645 #include <net/genetlink.h>
88646 #include <linux/atomic.h>
88647
88648+extern int gr_is_taskstats_denied(int pid);
88649+
88650 /*
88651 * Maximum length of a cpumask that can be specified in
88652 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
88653@@ -576,6 +579,9 @@ err:
88654
88655 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
88656 {
88657+ if (gr_is_taskstats_denied(current->pid))
88658+ return -EACCES;
88659+
88660 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
88661 return cmd_attr_register_cpumask(info);
88662 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
88663diff --git a/kernel/time.c b/kernel/time.c
88664index 7c7964c..2a0d412 100644
88665--- a/kernel/time.c
88666+++ b/kernel/time.c
88667@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
88668 return error;
88669
88670 if (tz) {
88671+ /* we log in do_settimeofday called below, so don't log twice
88672+ */
88673+ if (!tv)
88674+ gr_log_timechange();
88675+
88676 sys_tz = *tz;
88677 update_vsyscall_tz();
88678 if (firsttime) {
88679diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
88680index 88c9c65..7497ebc 100644
88681--- a/kernel/time/alarmtimer.c
88682+++ b/kernel/time/alarmtimer.c
88683@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
88684 struct platform_device *pdev;
88685 int error = 0;
88686 int i;
88687- struct k_clock alarm_clock = {
88688+ static struct k_clock alarm_clock = {
88689 .clock_getres = alarm_clock_getres,
88690 .clock_get = alarm_clock_get,
88691 .timer_create = alarm_timer_create,
88692diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
88693index 87b4f00..b7f77a7 100644
88694--- a/kernel/time/timekeeping.c
88695+++ b/kernel/time/timekeeping.c
88696@@ -15,6 +15,7 @@
88697 #include <linux/init.h>
88698 #include <linux/mm.h>
88699 #include <linux/sched.h>
88700+#include <linux/grsecurity.h>
88701 #include <linux/syscore_ops.h>
88702 #include <linux/clocksource.h>
88703 #include <linux/jiffies.h>
88704@@ -500,6 +501,8 @@ int do_settimeofday(const struct timespec *tv)
88705 if (!timespec_valid_strict(tv))
88706 return -EINVAL;
88707
88708+ gr_log_timechange();
88709+
88710 raw_spin_lock_irqsave(&timekeeper_lock, flags);
88711 write_seqcount_begin(&timekeeper_seq);
88712
88713diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
88714index 61ed862..3b52c65 100644
88715--- a/kernel/time/timer_list.c
88716+++ b/kernel/time/timer_list.c
88717@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
88718
88719 static void print_name_offset(struct seq_file *m, void *sym)
88720 {
88721+#ifdef CONFIG_GRKERNSEC_HIDESYM
88722+ SEQ_printf(m, "<%p>", NULL);
88723+#else
88724 char symname[KSYM_NAME_LEN];
88725
88726 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
88727 SEQ_printf(m, "<%pK>", sym);
88728 else
88729 SEQ_printf(m, "%s", symname);
88730+#endif
88731 }
88732
88733 static void
88734@@ -119,7 +123,11 @@ next_one:
88735 static void
88736 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
88737 {
88738+#ifdef CONFIG_GRKERNSEC_HIDESYM
88739+ SEQ_printf(m, " .base: %p\n", NULL);
88740+#else
88741 SEQ_printf(m, " .base: %pK\n", base);
88742+#endif
88743 SEQ_printf(m, " .index: %d\n",
88744 base->index);
88745 SEQ_printf(m, " .resolution: %Lu nsecs\n",
88746@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
88747 {
88748 struct proc_dir_entry *pe;
88749
88750+#ifdef CONFIG_GRKERNSEC_PROC_ADD
88751+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
88752+#else
88753 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
88754+#endif
88755 if (!pe)
88756 return -ENOMEM;
88757 return 0;
88758diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
88759index 1fb08f2..ca4bb1e 100644
88760--- a/kernel/time/timer_stats.c
88761+++ b/kernel/time/timer_stats.c
88762@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
88763 static unsigned long nr_entries;
88764 static struct entry entries[MAX_ENTRIES];
88765
88766-static atomic_t overflow_count;
88767+static atomic_unchecked_t overflow_count;
88768
88769 /*
88770 * The entries are in a hash-table, for fast lookup:
88771@@ -140,7 +140,7 @@ static void reset_entries(void)
88772 nr_entries = 0;
88773 memset(entries, 0, sizeof(entries));
88774 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
88775- atomic_set(&overflow_count, 0);
88776+ atomic_set_unchecked(&overflow_count, 0);
88777 }
88778
88779 static struct entry *alloc_entry(void)
88780@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
88781 if (likely(entry))
88782 entry->count++;
88783 else
88784- atomic_inc(&overflow_count);
88785+ atomic_inc_unchecked(&overflow_count);
88786
88787 out_unlock:
88788 raw_spin_unlock_irqrestore(lock, flags);
88789@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
88790
88791 static void print_name_offset(struct seq_file *m, unsigned long addr)
88792 {
88793+#ifdef CONFIG_GRKERNSEC_HIDESYM
88794+ seq_printf(m, "<%p>", NULL);
88795+#else
88796 char symname[KSYM_NAME_LEN];
88797
88798 if (lookup_symbol_name(addr, symname) < 0)
88799- seq_printf(m, "<%p>", (void *)addr);
88800+ seq_printf(m, "<%pK>", (void *)addr);
88801 else
88802 seq_printf(m, "%s", symname);
88803+#endif
88804 }
88805
88806 static int tstats_show(struct seq_file *m, void *v)
88807@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
88808
88809 seq_puts(m, "Timer Stats Version: v0.3\n");
88810 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
88811- if (atomic_read(&overflow_count))
88812- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
88813+ if (atomic_read_unchecked(&overflow_count))
88814+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
88815 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
88816
88817 for (i = 0; i < nr_entries; i++) {
88818@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
88819 {
88820 struct proc_dir_entry *pe;
88821
88822+#ifdef CONFIG_GRKERNSEC_PROC_ADD
88823+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
88824+#else
88825 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
88826+#endif
88827 if (!pe)
88828 return -ENOMEM;
88829 return 0;
88830diff --git a/kernel/timer.c b/kernel/timer.c
88831index accfd24..e00f0c0 100644
88832--- a/kernel/timer.c
88833+++ b/kernel/timer.c
88834@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
88835 /*
88836 * This function runs timers and the timer-tq in bottom half context.
88837 */
88838-static void run_timer_softirq(struct softirq_action *h)
88839+static __latent_entropy void run_timer_softirq(void)
88840 {
88841 struct tvec_base *base = __this_cpu_read(tvec_bases);
88842
88843@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
88844 *
88845 * In all cases the return value is guaranteed to be non-negative.
88846 */
88847-signed long __sched schedule_timeout(signed long timeout)
88848+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
88849 {
88850 struct timer_list timer;
88851 unsigned long expire;
88852diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
88853index f785aef..59f1b18 100644
88854--- a/kernel/trace/blktrace.c
88855+++ b/kernel/trace/blktrace.c
88856@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
88857 struct blk_trace *bt = filp->private_data;
88858 char buf[16];
88859
88860- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
88861+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
88862
88863 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
88864 }
88865@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
88866 return 1;
88867
88868 bt = buf->chan->private_data;
88869- atomic_inc(&bt->dropped);
88870+ atomic_inc_unchecked(&bt->dropped);
88871 return 0;
88872 }
88873
88874@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
88875
88876 bt->dir = dir;
88877 bt->dev = dev;
88878- atomic_set(&bt->dropped, 0);
88879+ atomic_set_unchecked(&bt->dropped, 0);
88880 INIT_LIST_HEAD(&bt->running_list);
88881
88882 ret = -EIO;
88883diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
88884index 72a0f81..0bbfd090 100644
88885--- a/kernel/trace/ftrace.c
88886+++ b/kernel/trace/ftrace.c
88887@@ -1944,12 +1944,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
88888 if (unlikely(ftrace_disabled))
88889 return 0;
88890
88891+ ret = ftrace_arch_code_modify_prepare();
88892+ FTRACE_WARN_ON(ret);
88893+ if (ret)
88894+ return 0;
88895+
88896 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
88897+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
88898 if (ret) {
88899 ftrace_bug(ret, ip);
88900- return 0;
88901 }
88902- return 1;
88903+ return ret ? 0 : 1;
88904 }
88905
88906 /*
88907@@ -4119,8 +4124,10 @@ static int ftrace_process_locs(struct module *mod,
88908 if (!count)
88909 return 0;
88910
88911+ pax_open_kernel();
88912 sort(start, count, sizeof(*start),
88913 ftrace_cmp_ips, ftrace_swap_ips);
88914+ pax_close_kernel();
88915
88916 start_pg = ftrace_allocate_pages(count);
88917 if (!start_pg)
88918@@ -4851,8 +4858,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
88919 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
88920
88921 static int ftrace_graph_active;
88922-static struct notifier_block ftrace_suspend_notifier;
88923-
88924 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
88925 {
88926 return 0;
88927@@ -5003,6 +5008,10 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
88928 FTRACE_OPS_FL_RECURSION_SAFE,
88929 };
88930
88931+static struct notifier_block ftrace_suspend_notifier = {
88932+ .notifier_call = ftrace_suspend_notifier_call
88933+};
88934+
88935 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
88936 trace_func_graph_ent_t entryfunc)
88937 {
88938@@ -5016,7 +5025,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
88939 goto out;
88940 }
88941
88942- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
88943 register_pm_notifier(&ftrace_suspend_notifier);
88944
88945 ftrace_graph_active++;
88946diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
88947index cc2f66f..05edd54 100644
88948--- a/kernel/trace/ring_buffer.c
88949+++ b/kernel/trace/ring_buffer.c
88950@@ -352,9 +352,9 @@ struct buffer_data_page {
88951 */
88952 struct buffer_page {
88953 struct list_head list; /* list of buffer pages */
88954- local_t write; /* index for next write */
88955+ local_unchecked_t write; /* index for next write */
88956 unsigned read; /* index for next read */
88957- local_t entries; /* entries on this page */
88958+ local_unchecked_t entries; /* entries on this page */
88959 unsigned long real_end; /* real end of data */
88960 struct buffer_data_page *page; /* Actual data page */
88961 };
88962@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
88963 unsigned long last_overrun;
88964 local_t entries_bytes;
88965 local_t entries;
88966- local_t overrun;
88967- local_t commit_overrun;
88968+ local_unchecked_t overrun;
88969+ local_unchecked_t commit_overrun;
88970 local_t dropped_events;
88971 local_t committing;
88972 local_t commits;
88973@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
88974 *
88975 * We add a counter to the write field to denote this.
88976 */
88977- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
88978- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
88979+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
88980+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
88981
88982 /*
88983 * Just make sure we have seen our old_write and synchronize
88984@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
88985 * cmpxchg to only update if an interrupt did not already
88986 * do it for us. If the cmpxchg fails, we don't care.
88987 */
88988- (void)local_cmpxchg(&next_page->write, old_write, val);
88989- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
88990+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
88991+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
88992
88993 /*
88994 * No need to worry about races with clearing out the commit.
88995@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
88996
88997 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
88998 {
88999- return local_read(&bpage->entries) & RB_WRITE_MASK;
89000+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
89001 }
89002
89003 static inline unsigned long rb_page_write(struct buffer_page *bpage)
89004 {
89005- return local_read(&bpage->write) & RB_WRITE_MASK;
89006+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
89007 }
89008
89009 static int
89010@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
89011 * bytes consumed in ring buffer from here.
89012 * Increment overrun to account for the lost events.
89013 */
89014- local_add(page_entries, &cpu_buffer->overrun);
89015+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
89016 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
89017 }
89018
89019@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
89020 * it is our responsibility to update
89021 * the counters.
89022 */
89023- local_add(entries, &cpu_buffer->overrun);
89024+ local_add_unchecked(entries, &cpu_buffer->overrun);
89025 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
89026
89027 /*
89028@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89029 if (tail == BUF_PAGE_SIZE)
89030 tail_page->real_end = 0;
89031
89032- local_sub(length, &tail_page->write);
89033+ local_sub_unchecked(length, &tail_page->write);
89034 return;
89035 }
89036
89037@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89038 rb_event_set_padding(event);
89039
89040 /* Set the write back to the previous setting */
89041- local_sub(length, &tail_page->write);
89042+ local_sub_unchecked(length, &tail_page->write);
89043 return;
89044 }
89045
89046@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89047
89048 /* Set write to end of buffer */
89049 length = (tail + length) - BUF_PAGE_SIZE;
89050- local_sub(length, &tail_page->write);
89051+ local_sub_unchecked(length, &tail_page->write);
89052 }
89053
89054 /*
89055@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
89056 * about it.
89057 */
89058 if (unlikely(next_page == commit_page)) {
89059- local_inc(&cpu_buffer->commit_overrun);
89060+ local_inc_unchecked(&cpu_buffer->commit_overrun);
89061 goto out_reset;
89062 }
89063
89064@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
89065 cpu_buffer->tail_page) &&
89066 (cpu_buffer->commit_page ==
89067 cpu_buffer->reader_page))) {
89068- local_inc(&cpu_buffer->commit_overrun);
89069+ local_inc_unchecked(&cpu_buffer->commit_overrun);
89070 goto out_reset;
89071 }
89072 }
89073@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
89074 length += RB_LEN_TIME_EXTEND;
89075
89076 tail_page = cpu_buffer->tail_page;
89077- write = local_add_return(length, &tail_page->write);
89078+ write = local_add_return_unchecked(length, &tail_page->write);
89079
89080 /* set write to only the index of the write */
89081 write &= RB_WRITE_MASK;
89082@@ -2408,7 +2408,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
89083 kmemcheck_annotate_bitfield(event, bitfield);
89084 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
89085
89086- local_inc(&tail_page->entries);
89087+ local_inc_unchecked(&tail_page->entries);
89088
89089 /*
89090 * If this is the first commit on the page, then update
89091@@ -2441,7 +2441,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
89092
89093 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
89094 unsigned long write_mask =
89095- local_read(&bpage->write) & ~RB_WRITE_MASK;
89096+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
89097 unsigned long event_length = rb_event_length(event);
89098 /*
89099 * This is on the tail page. It is possible that
89100@@ -2451,7 +2451,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
89101 */
89102 old_index += write_mask;
89103 new_index += write_mask;
89104- index = local_cmpxchg(&bpage->write, old_index, new_index);
89105+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
89106 if (index == old_index) {
89107 /* update counters */
89108 local_sub(event_length, &cpu_buffer->entries_bytes);
89109@@ -2843,7 +2843,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
89110
89111 /* Do the likely case first */
89112 if (likely(bpage->page == (void *)addr)) {
89113- local_dec(&bpage->entries);
89114+ local_dec_unchecked(&bpage->entries);
89115 return;
89116 }
89117
89118@@ -2855,7 +2855,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
89119 start = bpage;
89120 do {
89121 if (bpage->page == (void *)addr) {
89122- local_dec(&bpage->entries);
89123+ local_dec_unchecked(&bpage->entries);
89124 return;
89125 }
89126 rb_inc_page(cpu_buffer, &bpage);
89127@@ -3139,7 +3139,7 @@ static inline unsigned long
89128 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
89129 {
89130 return local_read(&cpu_buffer->entries) -
89131- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
89132+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
89133 }
89134
89135 /**
89136@@ -3228,7 +3228,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
89137 return 0;
89138
89139 cpu_buffer = buffer->buffers[cpu];
89140- ret = local_read(&cpu_buffer->overrun);
89141+ ret = local_read_unchecked(&cpu_buffer->overrun);
89142
89143 return ret;
89144 }
89145@@ -3251,7 +3251,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
89146 return 0;
89147
89148 cpu_buffer = buffer->buffers[cpu];
89149- ret = local_read(&cpu_buffer->commit_overrun);
89150+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
89151
89152 return ret;
89153 }
89154@@ -3336,7 +3336,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
89155 /* if you care about this being correct, lock the buffer */
89156 for_each_buffer_cpu(buffer, cpu) {
89157 cpu_buffer = buffer->buffers[cpu];
89158- overruns += local_read(&cpu_buffer->overrun);
89159+ overruns += local_read_unchecked(&cpu_buffer->overrun);
89160 }
89161
89162 return overruns;
89163@@ -3512,8 +3512,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
89164 /*
89165 * Reset the reader page to size zero.
89166 */
89167- local_set(&cpu_buffer->reader_page->write, 0);
89168- local_set(&cpu_buffer->reader_page->entries, 0);
89169+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
89170+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
89171 local_set(&cpu_buffer->reader_page->page->commit, 0);
89172 cpu_buffer->reader_page->real_end = 0;
89173
89174@@ -3547,7 +3547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
89175 * want to compare with the last_overrun.
89176 */
89177 smp_mb();
89178- overwrite = local_read(&(cpu_buffer->overrun));
89179+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
89180
89181 /*
89182 * Here's the tricky part.
89183@@ -4117,8 +4117,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
89184
89185 cpu_buffer->head_page
89186 = list_entry(cpu_buffer->pages, struct buffer_page, list);
89187- local_set(&cpu_buffer->head_page->write, 0);
89188- local_set(&cpu_buffer->head_page->entries, 0);
89189+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
89190+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
89191 local_set(&cpu_buffer->head_page->page->commit, 0);
89192
89193 cpu_buffer->head_page->read = 0;
89194@@ -4128,14 +4128,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
89195
89196 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
89197 INIT_LIST_HEAD(&cpu_buffer->new_pages);
89198- local_set(&cpu_buffer->reader_page->write, 0);
89199- local_set(&cpu_buffer->reader_page->entries, 0);
89200+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
89201+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
89202 local_set(&cpu_buffer->reader_page->page->commit, 0);
89203 cpu_buffer->reader_page->read = 0;
89204
89205 local_set(&cpu_buffer->entries_bytes, 0);
89206- local_set(&cpu_buffer->overrun, 0);
89207- local_set(&cpu_buffer->commit_overrun, 0);
89208+ local_set_unchecked(&cpu_buffer->overrun, 0);
89209+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
89210 local_set(&cpu_buffer->dropped_events, 0);
89211 local_set(&cpu_buffer->entries, 0);
89212 local_set(&cpu_buffer->committing, 0);
89213@@ -4540,8 +4540,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
89214 rb_init_page(bpage);
89215 bpage = reader->page;
89216 reader->page = *data_page;
89217- local_set(&reader->write, 0);
89218- local_set(&reader->entries, 0);
89219+ local_set_unchecked(&reader->write, 0);
89220+ local_set_unchecked(&reader->entries, 0);
89221 reader->read = 0;
89222 *data_page = bpage;
89223
89224diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
89225index 9d20cd9..221d816 100644
89226--- a/kernel/trace/trace.c
89227+++ b/kernel/trace/trace.c
89228@@ -3346,7 +3346,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
89229 return 0;
89230 }
89231
89232-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
89233+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
89234 {
89235 /* do nothing if flag is already set */
89236 if (!!(trace_flags & mask) == !!enabled)
89237diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
89238index ea189e0..a5b48c4 100644
89239--- a/kernel/trace/trace.h
89240+++ b/kernel/trace/trace.h
89241@@ -1040,7 +1040,7 @@ extern const char *__stop___tracepoint_str[];
89242 void trace_printk_init_buffers(void);
89243 void trace_printk_start_comm(void);
89244 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
89245-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
89246+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
89247
89248 /*
89249 * Normal trace_printk() and friends allocates special buffers
89250diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
89251index 26dc348..8708ca7 100644
89252--- a/kernel/trace/trace_clock.c
89253+++ b/kernel/trace/trace_clock.c
89254@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
89255 return now;
89256 }
89257
89258-static atomic64_t trace_counter;
89259+static atomic64_unchecked_t trace_counter;
89260
89261 /*
89262 * trace_clock_counter(): simply an atomic counter.
89263@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
89264 */
89265 u64 notrace trace_clock_counter(void)
89266 {
89267- return atomic64_add_return(1, &trace_counter);
89268+ return atomic64_inc_return_unchecked(&trace_counter);
89269 }
89270diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
89271index a11800a..3dafde5 100644
89272--- a/kernel/trace/trace_events.c
89273+++ b/kernel/trace/trace_events.c
89274@@ -1681,7 +1681,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
89275 return 0;
89276 }
89277
89278-struct ftrace_module_file_ops;
89279 static void __add_event_to_tracers(struct ftrace_event_call *call);
89280
89281 /* Add an additional event_call dynamically */
89282diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
89283index 0abd9b8..6a663a2 100644
89284--- a/kernel/trace/trace_mmiotrace.c
89285+++ b/kernel/trace/trace_mmiotrace.c
89286@@ -24,7 +24,7 @@ struct header_iter {
89287 static struct trace_array *mmio_trace_array;
89288 static bool overrun_detected;
89289 static unsigned long prev_overruns;
89290-static atomic_t dropped_count;
89291+static atomic_unchecked_t dropped_count;
89292
89293 static void mmio_reset_data(struct trace_array *tr)
89294 {
89295@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
89296
89297 static unsigned long count_overruns(struct trace_iterator *iter)
89298 {
89299- unsigned long cnt = atomic_xchg(&dropped_count, 0);
89300+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
89301 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
89302
89303 if (over > prev_overruns)
89304@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
89305 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
89306 sizeof(*entry), 0, pc);
89307 if (!event) {
89308- atomic_inc(&dropped_count);
89309+ atomic_inc_unchecked(&dropped_count);
89310 return;
89311 }
89312 entry = ring_buffer_event_data(event);
89313@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
89314 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
89315 sizeof(*entry), 0, pc);
89316 if (!event) {
89317- atomic_inc(&dropped_count);
89318+ atomic_inc_unchecked(&dropped_count);
89319 return;
89320 }
89321 entry = ring_buffer_event_data(event);
89322diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
89323index ed32284..884d6c3 100644
89324--- a/kernel/trace/trace_output.c
89325+++ b/kernel/trace/trace_output.c
89326@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
89327
89328 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
89329 if (!IS_ERR(p)) {
89330- p = mangle_path(s->buffer + s->len, p, "\n");
89331+ p = mangle_path(s->buffer + s->len, p, "\n\\");
89332 if (p) {
89333 s->len = p - s->buffer;
89334 return 1;
89335@@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_event *event)
89336 goto out;
89337 }
89338
89339+ pax_open_kernel();
89340 if (event->funcs->trace == NULL)
89341- event->funcs->trace = trace_nop_print;
89342+ *(void **)&event->funcs->trace = trace_nop_print;
89343 if (event->funcs->raw == NULL)
89344- event->funcs->raw = trace_nop_print;
89345+ *(void **)&event->funcs->raw = trace_nop_print;
89346 if (event->funcs->hex == NULL)
89347- event->funcs->hex = trace_nop_print;
89348+ *(void **)&event->funcs->hex = trace_nop_print;
89349 if (event->funcs->binary == NULL)
89350- event->funcs->binary = trace_nop_print;
89351+ *(void **)&event->funcs->binary = trace_nop_print;
89352+ pax_close_kernel();
89353
89354 key = event->type & (EVENT_HASHSIZE - 1);
89355
89356diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
89357index b20428c..4845a10 100644
89358--- a/kernel/trace/trace_stack.c
89359+++ b/kernel/trace/trace_stack.c
89360@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
89361 return;
89362
89363 /* we do not handle interrupt stacks yet */
89364- if (!object_is_on_stack(stack))
89365+ if (!object_starts_on_stack(stack))
89366 return;
89367
89368 local_irq_save(flags);
89369diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
89370index 240fb62..583473e 100644
89371--- a/kernel/user_namespace.c
89372+++ b/kernel/user_namespace.c
89373@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
89374 !kgid_has_mapping(parent_ns, group))
89375 return -EPERM;
89376
89377+#ifdef CONFIG_GRKERNSEC
89378+ /*
89379+ * This doesn't really inspire confidence:
89380+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
89381+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
89382+ * Increases kernel attack surface in areas developers
89383+ * previously cared little about ("low importance due
89384+ * to requiring "root" capability")
89385+ * To be removed when this code receives *proper* review
89386+ */
89387+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
89388+ !capable(CAP_SETGID))
89389+ return -EPERM;
89390+#endif
89391+
89392 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
89393 if (!ns)
89394 return -ENOMEM;
89395@@ -866,7 +881,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
89396 if (atomic_read(&current->mm->mm_users) > 1)
89397 return -EINVAL;
89398
89399- if (current->fs->users != 1)
89400+ if (atomic_read(&current->fs->users) != 1)
89401 return -EINVAL;
89402
89403 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
89404diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
89405index 4f69f9a..7c6f8f8 100644
89406--- a/kernel/utsname_sysctl.c
89407+++ b/kernel/utsname_sysctl.c
89408@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
89409 static int proc_do_uts_string(ctl_table *table, int write,
89410 void __user *buffer, size_t *lenp, loff_t *ppos)
89411 {
89412- struct ctl_table uts_table;
89413+ ctl_table_no_const uts_table;
89414 int r;
89415 memcpy(&uts_table, table, sizeof(uts_table));
89416 uts_table.data = get_uts(table, write);
89417diff --git a/kernel/watchdog.c b/kernel/watchdog.c
89418index 4431610..4265616 100644
89419--- a/kernel/watchdog.c
89420+++ b/kernel/watchdog.c
89421@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
89422 static void watchdog_nmi_disable(unsigned int cpu) { return; }
89423 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
89424
89425-static struct smp_hotplug_thread watchdog_threads = {
89426+static struct smp_hotplug_thread watchdog_threads __read_only = {
89427 .store = &softlockup_watchdog,
89428 .thread_should_run = watchdog_should_run,
89429 .thread_fn = watchdog,
89430diff --git a/kernel/workqueue.c b/kernel/workqueue.c
89431index b010eac..e4bda78 100644
89432--- a/kernel/workqueue.c
89433+++ b/kernel/workqueue.c
89434@@ -4671,7 +4671,7 @@ static void rebind_workers(struct worker_pool *pool)
89435 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
89436 worker_flags |= WORKER_REBOUND;
89437 worker_flags &= ~WORKER_UNBOUND;
89438- ACCESS_ONCE(worker->flags) = worker_flags;
89439+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
89440 }
89441
89442 spin_unlock_irq(&pool->lock);
89443diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
89444index db25707..8b16430 100644
89445--- a/lib/Kconfig.debug
89446+++ b/lib/Kconfig.debug
89447@@ -845,7 +845,7 @@ config DEBUG_MUTEXES
89448
89449 config DEBUG_WW_MUTEX_SLOWPATH
89450 bool "Wait/wound mutex debugging: Slowpath testing"
89451- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89452+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89453 select DEBUG_LOCK_ALLOC
89454 select DEBUG_SPINLOCK
89455 select DEBUG_MUTEXES
89456@@ -858,7 +858,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
89457
89458 config DEBUG_LOCK_ALLOC
89459 bool "Lock debugging: detect incorrect freeing of live locks"
89460- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89461+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89462 select DEBUG_SPINLOCK
89463 select DEBUG_MUTEXES
89464 select LOCKDEP
89465@@ -872,7 +872,7 @@ config DEBUG_LOCK_ALLOC
89466
89467 config PROVE_LOCKING
89468 bool "Lock debugging: prove locking correctness"
89469- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89470+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89471 select LOCKDEP
89472 select DEBUG_SPINLOCK
89473 select DEBUG_MUTEXES
89474@@ -923,7 +923,7 @@ config LOCKDEP
89475
89476 config LOCK_STAT
89477 bool "Lock usage statistics"
89478- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89479+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89480 select LOCKDEP
89481 select DEBUG_SPINLOCK
89482 select DEBUG_MUTEXES
89483@@ -1385,6 +1385,7 @@ config LATENCYTOP
89484 depends on DEBUG_KERNEL
89485 depends on STACKTRACE_SUPPORT
89486 depends on PROC_FS
89487+ depends on !GRKERNSEC_HIDESYM
89488 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
89489 select KALLSYMS
89490 select KALLSYMS_ALL
89491@@ -1401,7 +1402,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
89492 config DEBUG_STRICT_USER_COPY_CHECKS
89493 bool "Strict user copy size checks"
89494 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
89495- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
89496+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
89497 help
89498 Enabling this option turns a certain set of sanity checks for user
89499 copy operations into compile time failures.
89500@@ -1520,7 +1521,7 @@ endmenu # runtime tests
89501
89502 config PROVIDE_OHCI1394_DMA_INIT
89503 bool "Remote debugging over FireWire early on boot"
89504- depends on PCI && X86
89505+ depends on PCI && X86 && !GRKERNSEC
89506 help
89507 If you want to debug problems which hang or crash the kernel early
89508 on boot and the crashing machine has a FireWire port, you can use
89509@@ -1549,7 +1550,7 @@ config PROVIDE_OHCI1394_DMA_INIT
89510
89511 config FIREWIRE_OHCI_REMOTE_DMA
89512 bool "Remote debugging over FireWire with firewire-ohci"
89513- depends on FIREWIRE_OHCI
89514+ depends on FIREWIRE_OHCI && !GRKERNSEC
89515 help
89516 This option lets you use the FireWire bus for remote debugging
89517 with help of the firewire-ohci driver. It enables unfiltered
89518diff --git a/lib/Makefile b/lib/Makefile
89519index a459c31..3320e82 100644
89520--- a/lib/Makefile
89521+++ b/lib/Makefile
89522@@ -49,7 +49,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
89523 obj-$(CONFIG_BTREE) += btree.o
89524 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
89525 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
89526-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
89527+obj-y += list_debug.o
89528 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
89529
89530 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
89531diff --git a/lib/bitmap.c b/lib/bitmap.c
89532index 06f7e4f..f3cf2b0 100644
89533--- a/lib/bitmap.c
89534+++ b/lib/bitmap.c
89535@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
89536 {
89537 int c, old_c, totaldigits, ndigits, nchunks, nbits;
89538 u32 chunk;
89539- const char __user __force *ubuf = (const char __user __force *)buf;
89540+ const char __user *ubuf = (const char __force_user *)buf;
89541
89542 bitmap_zero(maskp, nmaskbits);
89543
89544@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
89545 {
89546 if (!access_ok(VERIFY_READ, ubuf, ulen))
89547 return -EFAULT;
89548- return __bitmap_parse((const char __force *)ubuf,
89549+ return __bitmap_parse((const char __force_kernel *)ubuf,
89550 ulen, 1, maskp, nmaskbits);
89551
89552 }
89553@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
89554 {
89555 unsigned a, b;
89556 int c, old_c, totaldigits;
89557- const char __user __force *ubuf = (const char __user __force *)buf;
89558+ const char __user *ubuf = (const char __force_user *)buf;
89559 int exp_digit, in_range;
89560
89561 totaldigits = c = 0;
89562@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
89563 {
89564 if (!access_ok(VERIFY_READ, ubuf, ulen))
89565 return -EFAULT;
89566- return __bitmap_parselist((const char __force *)ubuf,
89567+ return __bitmap_parselist((const char __force_kernel *)ubuf,
89568 ulen, 1, maskp, nmaskbits);
89569 }
89570 EXPORT_SYMBOL(bitmap_parselist_user);
89571diff --git a/lib/bug.c b/lib/bug.c
89572index 1686034..a9c00c8 100644
89573--- a/lib/bug.c
89574+++ b/lib/bug.c
89575@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
89576 return BUG_TRAP_TYPE_NONE;
89577
89578 bug = find_bug(bugaddr);
89579+ if (!bug)
89580+ return BUG_TRAP_TYPE_NONE;
89581
89582 file = NULL;
89583 line = 0;
89584diff --git a/lib/debugobjects.c b/lib/debugobjects.c
89585index e0731c3..ad66444 100644
89586--- a/lib/debugobjects.c
89587+++ b/lib/debugobjects.c
89588@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
89589 if (limit > 4)
89590 return;
89591
89592- is_on_stack = object_is_on_stack(addr);
89593+ is_on_stack = object_starts_on_stack(addr);
89594 if (is_on_stack == onstack)
89595 return;
89596
89597diff --git a/lib/devres.c b/lib/devres.c
89598index 8235331..5881053 100644
89599--- a/lib/devres.c
89600+++ b/lib/devres.c
89601@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
89602 void devm_iounmap(struct device *dev, void __iomem *addr)
89603 {
89604 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
89605- (void *)addr));
89606+ (void __force *)addr));
89607 iounmap(addr);
89608 }
89609 EXPORT_SYMBOL(devm_iounmap);
89610@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
89611 {
89612 ioport_unmap(addr);
89613 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
89614- devm_ioport_map_match, (void *)addr));
89615+ devm_ioport_map_match, (void __force *)addr));
89616 }
89617 EXPORT_SYMBOL(devm_ioport_unmap);
89618 #endif /* CONFIG_HAS_IOPORT */
89619diff --git a/lib/div64.c b/lib/div64.c
89620index 4382ad7..08aa558 100644
89621--- a/lib/div64.c
89622+++ b/lib/div64.c
89623@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
89624 EXPORT_SYMBOL(__div64_32);
89625
89626 #ifndef div_s64_rem
89627-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
89628+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
89629 {
89630 u64 quotient;
89631
89632@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
89633 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
89634 */
89635 #ifndef div64_u64
89636-u64 div64_u64(u64 dividend, u64 divisor)
89637+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
89638 {
89639 u32 high = divisor >> 32;
89640 u64 quot;
89641diff --git a/lib/dma-debug.c b/lib/dma-debug.c
89642index d87a17a..ac0d79a 100644
89643--- a/lib/dma-debug.c
89644+++ b/lib/dma-debug.c
89645@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
89646
89647 void dma_debug_add_bus(struct bus_type *bus)
89648 {
89649- struct notifier_block *nb;
89650+ notifier_block_no_const *nb;
89651
89652 if (global_disable)
89653 return;
89654@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
89655
89656 static void check_for_stack(struct device *dev, void *addr)
89657 {
89658- if (object_is_on_stack(addr))
89659+ if (object_starts_on_stack(addr))
89660 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
89661 "stack [addr=%p]\n", addr);
89662 }
89663diff --git a/lib/inflate.c b/lib/inflate.c
89664index 013a761..c28f3fc 100644
89665--- a/lib/inflate.c
89666+++ b/lib/inflate.c
89667@@ -269,7 +269,7 @@ static void free(void *where)
89668 malloc_ptr = free_mem_ptr;
89669 }
89670 #else
89671-#define malloc(a) kmalloc(a, GFP_KERNEL)
89672+#define malloc(a) kmalloc((a), GFP_KERNEL)
89673 #define free(a) kfree(a)
89674 #endif
89675
89676diff --git a/lib/ioremap.c b/lib/ioremap.c
89677index 0c9216c..863bd89 100644
89678--- a/lib/ioremap.c
89679+++ b/lib/ioremap.c
89680@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
89681 unsigned long next;
89682
89683 phys_addr -= addr;
89684- pmd = pmd_alloc(&init_mm, pud, addr);
89685+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
89686 if (!pmd)
89687 return -ENOMEM;
89688 do {
89689@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
89690 unsigned long next;
89691
89692 phys_addr -= addr;
89693- pud = pud_alloc(&init_mm, pgd, addr);
89694+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
89695 if (!pud)
89696 return -ENOMEM;
89697 do {
89698diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
89699index bd2bea9..6b3c95e 100644
89700--- a/lib/is_single_threaded.c
89701+++ b/lib/is_single_threaded.c
89702@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
89703 struct task_struct *p, *t;
89704 bool ret;
89705
89706+ if (!mm)
89707+ return true;
89708+
89709 if (atomic_read(&task->signal->live) != 1)
89710 return false;
89711
89712diff --git a/lib/kobject.c b/lib/kobject.c
89713index 5b4b888..c2950f7 100644
89714--- a/lib/kobject.c
89715+++ b/lib/kobject.c
89716@@ -957,9 +957,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
89717
89718
89719 static DEFINE_SPINLOCK(kobj_ns_type_lock);
89720-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
89721+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
89722
89723-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
89724+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
89725 {
89726 enum kobj_ns_type type = ops->type;
89727 int error;
89728diff --git a/lib/list_debug.c b/lib/list_debug.c
89729index c24c2f7..f0296f4 100644
89730--- a/lib/list_debug.c
89731+++ b/lib/list_debug.c
89732@@ -11,7 +11,9 @@
89733 #include <linux/bug.h>
89734 #include <linux/kernel.h>
89735 #include <linux/rculist.h>
89736+#include <linux/mm.h>
89737
89738+#ifdef CONFIG_DEBUG_LIST
89739 /*
89740 * Insert a new entry between two known consecutive entries.
89741 *
89742@@ -19,21 +21,40 @@
89743 * the prev/next entries already!
89744 */
89745
89746+static bool __list_add_debug(struct list_head *new,
89747+ struct list_head *prev,
89748+ struct list_head *next)
89749+{
89750+ if (unlikely(next->prev != prev)) {
89751+ printk(KERN_ERR "list_add corruption. next->prev should be "
89752+ "prev (%p), but was %p. (next=%p).\n",
89753+ prev, next->prev, next);
89754+ BUG();
89755+ return false;
89756+ }
89757+ if (unlikely(prev->next != next)) {
89758+ printk(KERN_ERR "list_add corruption. prev->next should be "
89759+ "next (%p), but was %p. (prev=%p).\n",
89760+ next, prev->next, prev);
89761+ BUG();
89762+ return false;
89763+ }
89764+ if (unlikely(new == prev || new == next)) {
89765+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
89766+ new, prev, next);
89767+ BUG();
89768+ return false;
89769+ }
89770+ return true;
89771+}
89772+
89773 void __list_add(struct list_head *new,
89774- struct list_head *prev,
89775- struct list_head *next)
89776+ struct list_head *prev,
89777+ struct list_head *next)
89778 {
89779- WARN(next->prev != prev,
89780- "list_add corruption. next->prev should be "
89781- "prev (%p), but was %p. (next=%p).\n",
89782- prev, next->prev, next);
89783- WARN(prev->next != next,
89784- "list_add corruption. prev->next should be "
89785- "next (%p), but was %p. (prev=%p).\n",
89786- next, prev->next, prev);
89787- WARN(new == prev || new == next,
89788- "list_add double add: new=%p, prev=%p, next=%p.\n",
89789- new, prev, next);
89790+ if (!__list_add_debug(new, prev, next))
89791+ return;
89792+
89793 next->prev = new;
89794 new->next = next;
89795 new->prev = prev;
89796@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
89797 }
89798 EXPORT_SYMBOL(__list_add);
89799
89800-void __list_del_entry(struct list_head *entry)
89801+static bool __list_del_entry_debug(struct list_head *entry)
89802 {
89803 struct list_head *prev, *next;
89804
89805 prev = entry->prev;
89806 next = entry->next;
89807
89808- if (WARN(next == LIST_POISON1,
89809- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
89810- entry, LIST_POISON1) ||
89811- WARN(prev == LIST_POISON2,
89812- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
89813- entry, LIST_POISON2) ||
89814- WARN(prev->next != entry,
89815- "list_del corruption. prev->next should be %p, "
89816- "but was %p\n", entry, prev->next) ||
89817- WARN(next->prev != entry,
89818- "list_del corruption. next->prev should be %p, "
89819- "but was %p\n", entry, next->prev))
89820+ if (unlikely(next == LIST_POISON1)) {
89821+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
89822+ entry, LIST_POISON1);
89823+ BUG();
89824+ return false;
89825+ }
89826+ if (unlikely(prev == LIST_POISON2)) {
89827+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
89828+ entry, LIST_POISON2);
89829+ BUG();
89830+ return false;
89831+ }
89832+ if (unlikely(entry->prev->next != entry)) {
89833+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
89834+ "but was %p\n", entry, prev->next);
89835+ BUG();
89836+ return false;
89837+ }
89838+ if (unlikely(entry->next->prev != entry)) {
89839+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
89840+ "but was %p\n", entry, next->prev);
89841+ BUG();
89842+ return false;
89843+ }
89844+ return true;
89845+}
89846+
89847+void __list_del_entry(struct list_head *entry)
89848+{
89849+ if (!__list_del_entry_debug(entry))
89850 return;
89851
89852- __list_del(prev, next);
89853+ __list_del(entry->prev, entry->next);
89854 }
89855 EXPORT_SYMBOL(__list_del_entry);
89856
89857@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
89858 void __list_add_rcu(struct list_head *new,
89859 struct list_head *prev, struct list_head *next)
89860 {
89861- WARN(next->prev != prev,
89862- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
89863- prev, next->prev, next);
89864- WARN(prev->next != next,
89865- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
89866- next, prev->next, prev);
89867+ if (!__list_add_debug(new, prev, next))
89868+ return;
89869+
89870 new->next = next;
89871 new->prev = prev;
89872 rcu_assign_pointer(list_next_rcu(prev), new);
89873 next->prev = new;
89874 }
89875 EXPORT_SYMBOL(__list_add_rcu);
89876+#endif
89877+
89878+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
89879+{
89880+#ifdef CONFIG_DEBUG_LIST
89881+ if (!__list_add_debug(new, prev, next))
89882+ return;
89883+#endif
89884+
89885+ pax_open_kernel();
89886+ next->prev = new;
89887+ new->next = next;
89888+ new->prev = prev;
89889+ prev->next = new;
89890+ pax_close_kernel();
89891+}
89892+EXPORT_SYMBOL(__pax_list_add);
89893+
89894+void pax_list_del(struct list_head *entry)
89895+{
89896+#ifdef CONFIG_DEBUG_LIST
89897+ if (!__list_del_entry_debug(entry))
89898+ return;
89899+#endif
89900+
89901+ pax_open_kernel();
89902+ __list_del(entry->prev, entry->next);
89903+ entry->next = LIST_POISON1;
89904+ entry->prev = LIST_POISON2;
89905+ pax_close_kernel();
89906+}
89907+EXPORT_SYMBOL(pax_list_del);
89908+
89909+void pax_list_del_init(struct list_head *entry)
89910+{
89911+ pax_open_kernel();
89912+ __list_del(entry->prev, entry->next);
89913+ INIT_LIST_HEAD(entry);
89914+ pax_close_kernel();
89915+}
89916+EXPORT_SYMBOL(pax_list_del_init);
89917+
89918+void __pax_list_add_rcu(struct list_head *new,
89919+ struct list_head *prev, struct list_head *next)
89920+{
89921+#ifdef CONFIG_DEBUG_LIST
89922+ if (!__list_add_debug(new, prev, next))
89923+ return;
89924+#endif
89925+
89926+ pax_open_kernel();
89927+ new->next = next;
89928+ new->prev = prev;
89929+ rcu_assign_pointer(list_next_rcu(prev), new);
89930+ next->prev = new;
89931+ pax_close_kernel();
89932+}
89933+EXPORT_SYMBOL(__pax_list_add_rcu);
89934+
89935+void pax_list_del_rcu(struct list_head *entry)
89936+{
89937+#ifdef CONFIG_DEBUG_LIST
89938+ if (!__list_del_entry_debug(entry))
89939+ return;
89940+#endif
89941+
89942+ pax_open_kernel();
89943+ __list_del(entry->prev, entry->next);
89944+ entry->next = LIST_POISON1;
89945+ entry->prev = LIST_POISON2;
89946+ pax_close_kernel();
89947+}
89948+EXPORT_SYMBOL(pax_list_del_rcu);
89949diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
89950index 1a53d49..ace934c 100644
89951--- a/lib/percpu-refcount.c
89952+++ b/lib/percpu-refcount.c
89953@@ -29,7 +29,7 @@
89954 * can't hit 0 before we've added up all the percpu refs.
89955 */
89956
89957-#define PCPU_COUNT_BIAS (1U << 31)
89958+#define PCPU_COUNT_BIAS (1U << 30)
89959
89960 /**
89961 * percpu_ref_init - initialize a percpu refcount
89962diff --git a/lib/radix-tree.c b/lib/radix-tree.c
89963index 7811ed3..f80ca19 100644
89964--- a/lib/radix-tree.c
89965+++ b/lib/radix-tree.c
89966@@ -93,7 +93,7 @@ struct radix_tree_preload {
89967 int nr;
89968 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
89969 };
89970-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
89971+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
89972
89973 static inline void *ptr_to_indirect(void *ptr)
89974 {
89975diff --git a/lib/rbtree.c b/lib/rbtree.c
89976index 65f4eff..2cfa167 100644
89977--- a/lib/rbtree.c
89978+++ b/lib/rbtree.c
89979@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
89980 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
89981
89982 static const struct rb_augment_callbacks dummy_callbacks = {
89983- dummy_propagate, dummy_copy, dummy_rotate
89984+ .propagate = dummy_propagate,
89985+ .copy = dummy_copy,
89986+ .rotate = dummy_rotate
89987 };
89988
89989 void rb_insert_color(struct rb_node *node, struct rb_root *root)
89990diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
89991index bb2b201..46abaf9 100644
89992--- a/lib/strncpy_from_user.c
89993+++ b/lib/strncpy_from_user.c
89994@@ -21,7 +21,7 @@
89995 */
89996 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
89997 {
89998- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
89999+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90000 long res = 0;
90001
90002 /*
90003diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
90004index a28df52..3d55877 100644
90005--- a/lib/strnlen_user.c
90006+++ b/lib/strnlen_user.c
90007@@ -26,7 +26,7 @@
90008 */
90009 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
90010 {
90011- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90012+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90013 long align, res = 0;
90014 unsigned long c;
90015
90016diff --git a/lib/swiotlb.c b/lib/swiotlb.c
90017index e4399fa..5e8b214 100644
90018--- a/lib/swiotlb.c
90019+++ b/lib/swiotlb.c
90020@@ -668,7 +668,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
90021
90022 void
90023 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
90024- dma_addr_t dev_addr)
90025+ dma_addr_t dev_addr, struct dma_attrs *attrs)
90026 {
90027 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
90028
90029diff --git a/lib/usercopy.c b/lib/usercopy.c
90030index 4f5b1dd..7cab418 100644
90031--- a/lib/usercopy.c
90032+++ b/lib/usercopy.c
90033@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
90034 WARN(1, "Buffer overflow detected!\n");
90035 }
90036 EXPORT_SYMBOL(copy_from_user_overflow);
90037+
90038+void copy_to_user_overflow(void)
90039+{
90040+ WARN(1, "Buffer overflow detected!\n");
90041+}
90042+EXPORT_SYMBOL(copy_to_user_overflow);
90043diff --git a/lib/vsprintf.c b/lib/vsprintf.c
90044index 10909c5..653e1b8 100644
90045--- a/lib/vsprintf.c
90046+++ b/lib/vsprintf.c
90047@@ -16,6 +16,9 @@
90048 * - scnprintf and vscnprintf
90049 */
90050
90051+#ifdef CONFIG_GRKERNSEC_HIDESYM
90052+#define __INCLUDED_BY_HIDESYM 1
90053+#endif
90054 #include <stdarg.h>
90055 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
90056 #include <linux/types.h>
90057@@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
90058 return number(buf, end, *(const netdev_features_t *)addr, spec);
90059 }
90060
90061+#ifdef CONFIG_GRKERNSEC_HIDESYM
90062+int kptr_restrict __read_mostly = 2;
90063+#else
90064 int kptr_restrict __read_mostly;
90065+#endif
90066
90067 /*
90068 * Show a '%p' thing. A kernel extension is that the '%p' is followed
90069@@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
90070 * - 'f' For simple symbolic function names without offset
90071 * - 'S' For symbolic direct pointers with offset
90072 * - 's' For symbolic direct pointers without offset
90073+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
90074 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
90075 * - 'B' For backtraced symbolic direct pointers with offset
90076 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
90077@@ -1234,12 +1242,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90078
90079 if (!ptr && *fmt != 'K') {
90080 /*
90081- * Print (null) with the same width as a pointer so it makes
90082+ * Print (nil) with the same width as a pointer so it makes
90083 * tabular output look nice.
90084 */
90085 if (spec.field_width == -1)
90086 spec.field_width = default_width;
90087- return string(buf, end, "(null)", spec);
90088+ return string(buf, end, "(nil)", spec);
90089 }
90090
90091 switch (*fmt) {
90092@@ -1249,6 +1257,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90093 /* Fallthrough */
90094 case 'S':
90095 case 's':
90096+#ifdef CONFIG_GRKERNSEC_HIDESYM
90097+ break;
90098+#else
90099+ return symbol_string(buf, end, ptr, spec, fmt);
90100+#endif
90101+ case 'A':
90102 case 'B':
90103 return symbol_string(buf, end, ptr, spec, fmt);
90104 case 'R':
90105@@ -1304,6 +1318,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90106 va_end(va);
90107 return buf;
90108 }
90109+ case 'P':
90110+ break;
90111 case 'K':
90112 /*
90113 * %pK cannot be used in IRQ context because its test
90114@@ -1365,6 +1381,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90115 ((const struct file *)ptr)->f_path.dentry,
90116 spec, fmt);
90117 }
90118+
90119+#ifdef CONFIG_GRKERNSEC_HIDESYM
90120+ /* 'P' = approved pointers to copy to userland,
90121+ as in the /proc/kallsyms case, as we make it display nothing
90122+ for non-root users, and the real contents for root users
90123+ Also ignore 'K' pointers, since we force their NULLing for non-root users
90124+ above
90125+ */
90126+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
90127+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
90128+ dump_stack();
90129+ ptr = NULL;
90130+ }
90131+#endif
90132+
90133 spec.flags |= SMALL;
90134 if (spec.field_width == -1) {
90135 spec.field_width = default_width;
90136@@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
90137 typeof(type) value; \
90138 if (sizeof(type) == 8) { \
90139 args = PTR_ALIGN(args, sizeof(u32)); \
90140- *(u32 *)&value = *(u32 *)args; \
90141- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
90142+ *(u32 *)&value = *(const u32 *)args; \
90143+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
90144 } else { \
90145 args = PTR_ALIGN(args, sizeof(type)); \
90146- value = *(typeof(type) *)args; \
90147+ value = *(const typeof(type) *)args; \
90148 } \
90149 args += sizeof(type); \
90150 value; \
90151@@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
90152 case FORMAT_TYPE_STR: {
90153 const char *str_arg = args;
90154 args += strlen(str_arg) + 1;
90155- str = string(str, end, (char *)str_arg, spec);
90156+ str = string(str, end, str_arg, spec);
90157 break;
90158 }
90159
90160diff --git a/localversion-grsec b/localversion-grsec
90161new file mode 100644
90162index 0000000..7cd6065
90163--- /dev/null
90164+++ b/localversion-grsec
90165@@ -0,0 +1 @@
90166+-grsec
90167diff --git a/mm/Kconfig b/mm/Kconfig
90168index 723bbe0..ea624b1 100644
90169--- a/mm/Kconfig
90170+++ b/mm/Kconfig
90171@@ -326,10 +326,11 @@ config KSM
90172 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
90173
90174 config DEFAULT_MMAP_MIN_ADDR
90175- int "Low address space to protect from user allocation"
90176+ int "Low address space to protect from user allocation"
90177 depends on MMU
90178- default 4096
90179- help
90180+ default 32768 if ALPHA || ARM || PARISC || SPARC32
90181+ default 65536
90182+ help
90183 This is the portion of low virtual memory which should be protected
90184 from userspace allocation. Keeping a user from writing to low pages
90185 can help reduce the impact of kernel NULL pointer bugs.
90186@@ -360,7 +361,7 @@ config MEMORY_FAILURE
90187
90188 config HWPOISON_INJECT
90189 tristate "HWPoison pages injector"
90190- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
90191+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
90192 select PROC_PAGE_MONITOR
90193
90194 config NOMMU_INITIAL_TRIM_EXCESS
90195diff --git a/mm/backing-dev.c b/mm/backing-dev.c
90196index ce682f7..1fb54f9 100644
90197--- a/mm/backing-dev.c
90198+++ b/mm/backing-dev.c
90199@@ -12,7 +12,7 @@
90200 #include <linux/device.h>
90201 #include <trace/events/writeback.h>
90202
90203-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
90204+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
90205
90206 struct backing_dev_info default_backing_dev_info = {
90207 .name = "default",
90208@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
90209 return err;
90210
90211 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
90212- atomic_long_inc_return(&bdi_seq));
90213+ atomic_long_inc_return_unchecked(&bdi_seq));
90214 if (err) {
90215 bdi_destroy(bdi);
90216 return err;
90217diff --git a/mm/filemap.c b/mm/filemap.c
90218index b7749a9..50d1123 100644
90219--- a/mm/filemap.c
90220+++ b/mm/filemap.c
90221@@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
90222 struct address_space *mapping = file->f_mapping;
90223
90224 if (!mapping->a_ops->readpage)
90225- return -ENOEXEC;
90226+ return -ENODEV;
90227 file_accessed(file);
90228 vma->vm_ops = &generic_file_vm_ops;
90229 return 0;
90230@@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
90231
90232 while (bytes) {
90233 char __user *buf = iov->iov_base + base;
90234- int copy = min(bytes, iov->iov_len - base);
90235+ size_t copy = min(bytes, iov->iov_len - base);
90236
90237 base = 0;
90238 left = __copy_from_user_inatomic(vaddr, buf, copy);
90239@@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
90240 BUG_ON(!in_atomic());
90241 kaddr = kmap_atomic(page);
90242 if (likely(i->nr_segs == 1)) {
90243- int left;
90244+ size_t left;
90245 char __user *buf = i->iov->iov_base + i->iov_offset;
90246 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
90247 copied = bytes - left;
90248@@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
90249
90250 kaddr = kmap(page);
90251 if (likely(i->nr_segs == 1)) {
90252- int left;
90253+ size_t left;
90254 char __user *buf = i->iov->iov_base + i->iov_offset;
90255 left = __copy_from_user(kaddr + offset, buf, bytes);
90256 copied = bytes - left;
90257@@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
90258 * zero-length segments (without overruning the iovec).
90259 */
90260 while (bytes || unlikely(i->count && !iov->iov_len)) {
90261- int copy;
90262+ size_t copy;
90263
90264 copy = min(bytes, iov->iov_len - base);
90265 BUG_ON(!i->count || i->count < copy);
90266@@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
90267 *pos = i_size_read(inode);
90268
90269 if (limit != RLIM_INFINITY) {
90270+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
90271 if (*pos >= limit) {
90272 send_sig(SIGXFSZ, current, 0);
90273 return -EFBIG;
90274diff --git a/mm/fremap.c b/mm/fremap.c
90275index bbc4d66..117b798 100644
90276--- a/mm/fremap.c
90277+++ b/mm/fremap.c
90278@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
90279 retry:
90280 vma = find_vma(mm, start);
90281
90282+#ifdef CONFIG_PAX_SEGMEXEC
90283+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
90284+ goto out;
90285+#endif
90286+
90287 /*
90288 * Make sure the vma is shared, that it supports prefaulting,
90289 * and that the remapped range is valid and fully within
90290diff --git a/mm/highmem.c b/mm/highmem.c
90291index b32b70c..e512eb0 100644
90292--- a/mm/highmem.c
90293+++ b/mm/highmem.c
90294@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
90295 * So no dangers, even with speculative execution.
90296 */
90297 page = pte_page(pkmap_page_table[i]);
90298+ pax_open_kernel();
90299 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
90300-
90301+ pax_close_kernel();
90302 set_page_address(page, NULL);
90303 need_flush = 1;
90304 }
90305@@ -198,9 +199,11 @@ start:
90306 }
90307 }
90308 vaddr = PKMAP_ADDR(last_pkmap_nr);
90309+
90310+ pax_open_kernel();
90311 set_pte_at(&init_mm, vaddr,
90312 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
90313-
90314+ pax_close_kernel();
90315 pkmap_count[last_pkmap_nr] = 1;
90316 set_page_address(page, (void *)vaddr);
90317
90318diff --git a/mm/hugetlb.c b/mm/hugetlb.c
90319index dee6cf4..52b94f7 100644
90320--- a/mm/hugetlb.c
90321+++ b/mm/hugetlb.c
90322@@ -2077,15 +2077,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
90323 struct hstate *h = &default_hstate;
90324 unsigned long tmp;
90325 int ret;
90326+ ctl_table_no_const hugetlb_table;
90327
90328 tmp = h->max_huge_pages;
90329
90330 if (write && h->order >= MAX_ORDER)
90331 return -EINVAL;
90332
90333- table->data = &tmp;
90334- table->maxlen = sizeof(unsigned long);
90335- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
90336+ hugetlb_table = *table;
90337+ hugetlb_table.data = &tmp;
90338+ hugetlb_table.maxlen = sizeof(unsigned long);
90339+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
90340 if (ret)
90341 goto out;
90342
90343@@ -2130,15 +2132,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
90344 struct hstate *h = &default_hstate;
90345 unsigned long tmp;
90346 int ret;
90347+ ctl_table_no_const hugetlb_table;
90348
90349 tmp = h->nr_overcommit_huge_pages;
90350
90351 if (write && h->order >= MAX_ORDER)
90352 return -EINVAL;
90353
90354- table->data = &tmp;
90355- table->maxlen = sizeof(unsigned long);
90356- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
90357+ hugetlb_table = *table;
90358+ hugetlb_table.data = &tmp;
90359+ hugetlb_table.maxlen = sizeof(unsigned long);
90360+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
90361 if (ret)
90362 goto out;
90363
90364@@ -2596,6 +2600,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
90365 return 1;
90366 }
90367
90368+#ifdef CONFIG_PAX_SEGMEXEC
90369+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
90370+{
90371+ struct mm_struct *mm = vma->vm_mm;
90372+ struct vm_area_struct *vma_m;
90373+ unsigned long address_m;
90374+ pte_t *ptep_m;
90375+
90376+ vma_m = pax_find_mirror_vma(vma);
90377+ if (!vma_m)
90378+ return;
90379+
90380+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
90381+ address_m = address + SEGMEXEC_TASK_SIZE;
90382+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
90383+ get_page(page_m);
90384+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
90385+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
90386+}
90387+#endif
90388+
90389 /*
90390 * Hugetlb_cow() should be called with page lock of the original hugepage held.
90391 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
90392@@ -2712,6 +2737,11 @@ retry_avoidcopy:
90393 make_huge_pte(vma, new_page, 1));
90394 page_remove_rmap(old_page);
90395 hugepage_add_new_anon_rmap(new_page, vma, address);
90396+
90397+#ifdef CONFIG_PAX_SEGMEXEC
90398+ pax_mirror_huge_pte(vma, address, new_page);
90399+#endif
90400+
90401 /* Make the old page be freed below */
90402 new_page = old_page;
90403 }
90404@@ -2876,6 +2906,10 @@ retry:
90405 && (vma->vm_flags & VM_SHARED)));
90406 set_huge_pte_at(mm, address, ptep, new_pte);
90407
90408+#ifdef CONFIG_PAX_SEGMEXEC
90409+ pax_mirror_huge_pte(vma, address, page);
90410+#endif
90411+
90412 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
90413 /* Optimization, do the COW without a second fault */
90414 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
90415@@ -2906,6 +2940,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
90416 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
90417 struct hstate *h = hstate_vma(vma);
90418
90419+#ifdef CONFIG_PAX_SEGMEXEC
90420+ struct vm_area_struct *vma_m;
90421+#endif
90422+
90423 address &= huge_page_mask(h);
90424
90425 ptep = huge_pte_offset(mm, address);
90426@@ -2919,6 +2957,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
90427 VM_FAULT_SET_HINDEX(hstate_index(h));
90428 }
90429
90430+#ifdef CONFIG_PAX_SEGMEXEC
90431+ vma_m = pax_find_mirror_vma(vma);
90432+ if (vma_m) {
90433+ unsigned long address_m;
90434+
90435+ if (vma->vm_start > vma_m->vm_start) {
90436+ address_m = address;
90437+ address -= SEGMEXEC_TASK_SIZE;
90438+ vma = vma_m;
90439+ h = hstate_vma(vma);
90440+ } else
90441+ address_m = address + SEGMEXEC_TASK_SIZE;
90442+
90443+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
90444+ return VM_FAULT_OOM;
90445+ address_m &= HPAGE_MASK;
90446+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
90447+ }
90448+#endif
90449+
90450 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
90451 if (!ptep)
90452 return VM_FAULT_OOM;
90453diff --git a/mm/internal.h b/mm/internal.h
90454index 684f7aa..9eb9edc 100644
90455--- a/mm/internal.h
90456+++ b/mm/internal.h
90457@@ -97,6 +97,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
90458 * in mm/page_alloc.c
90459 */
90460 extern void __free_pages_bootmem(struct page *page, unsigned int order);
90461+extern void free_compound_page(struct page *page);
90462 extern void prep_compound_page(struct page *page, unsigned long order);
90463 #ifdef CONFIG_MEMORY_FAILURE
90464 extern bool is_free_buddy_page(struct page *page);
90465@@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable;
90466
90467 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
90468 unsigned long, unsigned long,
90469- unsigned long, unsigned long);
90470+ unsigned long, unsigned long) __intentional_overflow(-1);
90471
90472 extern void set_pageblock_order(void);
90473 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
90474diff --git a/mm/kmemleak.c b/mm/kmemleak.c
90475index 31f01c5..7015178 100644
90476--- a/mm/kmemleak.c
90477+++ b/mm/kmemleak.c
90478@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
90479
90480 for (i = 0; i < object->trace_len; i++) {
90481 void *ptr = (void *)object->trace[i];
90482- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
90483+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
90484 }
90485 }
90486
90487@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
90488 return -ENOMEM;
90489 }
90490
90491- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
90492+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
90493 &kmemleak_fops);
90494 if (!dentry)
90495 pr_warning("Failed to create the debugfs kmemleak file\n");
90496diff --git a/mm/maccess.c b/mm/maccess.c
90497index d53adf9..03a24bf 100644
90498--- a/mm/maccess.c
90499+++ b/mm/maccess.c
90500@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
90501 set_fs(KERNEL_DS);
90502 pagefault_disable();
90503 ret = __copy_from_user_inatomic(dst,
90504- (__force const void __user *)src, size);
90505+ (const void __force_user *)src, size);
90506 pagefault_enable();
90507 set_fs(old_fs);
90508
90509@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
90510
90511 set_fs(KERNEL_DS);
90512 pagefault_disable();
90513- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
90514+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
90515 pagefault_enable();
90516 set_fs(old_fs);
90517
90518diff --git a/mm/madvise.c b/mm/madvise.c
90519index 539eeb9..e24a987 100644
90520--- a/mm/madvise.c
90521+++ b/mm/madvise.c
90522@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
90523 pgoff_t pgoff;
90524 unsigned long new_flags = vma->vm_flags;
90525
90526+#ifdef CONFIG_PAX_SEGMEXEC
90527+ struct vm_area_struct *vma_m;
90528+#endif
90529+
90530 switch (behavior) {
90531 case MADV_NORMAL:
90532 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
90533@@ -126,6 +130,13 @@ success:
90534 /*
90535 * vm_flags is protected by the mmap_sem held in write mode.
90536 */
90537+
90538+#ifdef CONFIG_PAX_SEGMEXEC
90539+ vma_m = pax_find_mirror_vma(vma);
90540+ if (vma_m)
90541+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
90542+#endif
90543+
90544 vma->vm_flags = new_flags;
90545
90546 out:
90547@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
90548 struct vm_area_struct **prev,
90549 unsigned long start, unsigned long end)
90550 {
90551+
90552+#ifdef CONFIG_PAX_SEGMEXEC
90553+ struct vm_area_struct *vma_m;
90554+#endif
90555+
90556 *prev = vma;
90557 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
90558 return -EINVAL;
90559@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
90560 zap_page_range(vma, start, end - start, &details);
90561 } else
90562 zap_page_range(vma, start, end - start, NULL);
90563+
90564+#ifdef CONFIG_PAX_SEGMEXEC
90565+ vma_m = pax_find_mirror_vma(vma);
90566+ if (vma_m) {
90567+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
90568+ struct zap_details details = {
90569+ .nonlinear_vma = vma_m,
90570+ .last_index = ULONG_MAX,
90571+ };
90572+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
90573+ } else
90574+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
90575+ }
90576+#endif
90577+
90578 return 0;
90579 }
90580
90581@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
90582 if (end < start)
90583 return error;
90584
90585+#ifdef CONFIG_PAX_SEGMEXEC
90586+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
90587+ if (end > SEGMEXEC_TASK_SIZE)
90588+ return error;
90589+ } else
90590+#endif
90591+
90592+ if (end > TASK_SIZE)
90593+ return error;
90594+
90595 error = 0;
90596 if (end == start)
90597 return error;
90598diff --git a/mm/memory-failure.c b/mm/memory-failure.c
90599index fabe550..f31b51c 100644
90600--- a/mm/memory-failure.c
90601+++ b/mm/memory-failure.c
90602@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
90603
90604 int sysctl_memory_failure_recovery __read_mostly = 1;
90605
90606-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
90607+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
90608
90609 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
90610
90611@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
90612 pfn, t->comm, t->pid);
90613 si.si_signo = SIGBUS;
90614 si.si_errno = 0;
90615- si.si_addr = (void *)addr;
90616+ si.si_addr = (void __user *)addr;
90617 #ifdef __ARCH_SI_TRAPNO
90618 si.si_trapno = trapno;
90619 #endif
90620@@ -762,7 +762,7 @@ static struct page_state {
90621 unsigned long res;
90622 char *msg;
90623 int (*action)(struct page *p, unsigned long pfn);
90624-} error_states[] = {
90625+} __do_const error_states[] = {
90626 { reserved, reserved, "reserved kernel", me_kernel },
90627 /*
90628 * free pages are specially detected outside this table:
90629@@ -1063,7 +1063,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
90630 nr_pages = 1 << compound_order(hpage);
90631 else /* normal page or thp */
90632 nr_pages = 1;
90633- atomic_long_add(nr_pages, &num_poisoned_pages);
90634+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
90635
90636 /*
90637 * We need/can do nothing about count=0 pages.
90638@@ -1093,7 +1093,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
90639 if (!PageHWPoison(hpage)
90640 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
90641 || (p != hpage && TestSetPageHWPoison(hpage))) {
90642- atomic_long_sub(nr_pages, &num_poisoned_pages);
90643+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
90644 return 0;
90645 }
90646 set_page_hwpoison_huge_page(hpage);
90647@@ -1162,7 +1162,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
90648 }
90649 if (hwpoison_filter(p)) {
90650 if (TestClearPageHWPoison(p))
90651- atomic_long_sub(nr_pages, &num_poisoned_pages);
90652+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
90653 unlock_page(hpage);
90654 put_page(hpage);
90655 return 0;
90656@@ -1380,7 +1380,7 @@ int unpoison_memory(unsigned long pfn)
90657 return 0;
90658 }
90659 if (TestClearPageHWPoison(p))
90660- atomic_long_dec(&num_poisoned_pages);
90661+ atomic_long_dec_unchecked(&num_poisoned_pages);
90662 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
90663 return 0;
90664 }
90665@@ -1394,7 +1394,7 @@ int unpoison_memory(unsigned long pfn)
90666 */
90667 if (TestClearPageHWPoison(page)) {
90668 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
90669- atomic_long_sub(nr_pages, &num_poisoned_pages);
90670+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
90671 freeit = 1;
90672 if (PageHuge(page))
90673 clear_page_hwpoison_huge_page(page);
90674@@ -1519,11 +1519,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
90675 if (PageHuge(page)) {
90676 set_page_hwpoison_huge_page(hpage);
90677 dequeue_hwpoisoned_huge_page(hpage);
90678- atomic_long_add(1 << compound_order(hpage),
90679+ atomic_long_add_unchecked(1 << compound_order(hpage),
90680 &num_poisoned_pages);
90681 } else {
90682 SetPageHWPoison(page);
90683- atomic_long_inc(&num_poisoned_pages);
90684+ atomic_long_inc_unchecked(&num_poisoned_pages);
90685 }
90686 }
90687 return ret;
90688@@ -1562,7 +1562,7 @@ static int __soft_offline_page(struct page *page, int flags)
90689 put_page(page);
90690 pr_info("soft_offline: %#lx: invalidated\n", pfn);
90691 SetPageHWPoison(page);
90692- atomic_long_inc(&num_poisoned_pages);
90693+ atomic_long_inc_unchecked(&num_poisoned_pages);
90694 return 0;
90695 }
90696
90697@@ -1607,7 +1607,7 @@ static int __soft_offline_page(struct page *page, int flags)
90698 if (!is_free_buddy_page(page))
90699 pr_info("soft offline: %#lx: page leaked\n",
90700 pfn);
90701- atomic_long_inc(&num_poisoned_pages);
90702+ atomic_long_inc_unchecked(&num_poisoned_pages);
90703 }
90704 } else {
90705 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
90706@@ -1681,11 +1681,11 @@ int soft_offline_page(struct page *page, int flags)
90707 if (PageHuge(page)) {
90708 set_page_hwpoison_huge_page(hpage);
90709 dequeue_hwpoisoned_huge_page(hpage);
90710- atomic_long_add(1 << compound_order(hpage),
90711+ atomic_long_add_unchecked(1 << compound_order(hpage),
90712 &num_poisoned_pages);
90713 } else {
90714 SetPageHWPoison(page);
90715- atomic_long_inc(&num_poisoned_pages);
90716+ atomic_long_inc_unchecked(&num_poisoned_pages);
90717 }
90718 }
90719 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
90720diff --git a/mm/memory.c b/mm/memory.c
90721index 6768ce9..4c41d69 100644
90722--- a/mm/memory.c
90723+++ b/mm/memory.c
90724@@ -402,6 +402,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
90725 free_pte_range(tlb, pmd, addr);
90726 } while (pmd++, addr = next, addr != end);
90727
90728+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
90729 start &= PUD_MASK;
90730 if (start < floor)
90731 return;
90732@@ -416,6 +417,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
90733 pmd = pmd_offset(pud, start);
90734 pud_clear(pud);
90735 pmd_free_tlb(tlb, pmd, start);
90736+#endif
90737+
90738 }
90739
90740 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
90741@@ -435,6 +438,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
90742 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
90743 } while (pud++, addr = next, addr != end);
90744
90745+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
90746 start &= PGDIR_MASK;
90747 if (start < floor)
90748 return;
90749@@ -449,6 +453,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
90750 pud = pud_offset(pgd, start);
90751 pgd_clear(pgd);
90752 pud_free_tlb(tlb, pud, start);
90753+#endif
90754+
90755 }
90756
90757 /*
90758@@ -1635,12 +1641,6 @@ no_page_table:
90759 return page;
90760 }
90761
90762-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
90763-{
90764- return stack_guard_page_start(vma, addr) ||
90765- stack_guard_page_end(vma, addr+PAGE_SIZE);
90766-}
90767-
90768 /**
90769 * __get_user_pages() - pin user pages in memory
90770 * @tsk: task_struct of target task
90771@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
90772
90773 i = 0;
90774
90775- do {
90776+ while (nr_pages) {
90777 struct vm_area_struct *vma;
90778
90779- vma = find_extend_vma(mm, start);
90780+ vma = find_vma(mm, start);
90781 if (!vma && in_gate_area(mm, start)) {
90782 unsigned long pg = start & PAGE_MASK;
90783 pgd_t *pgd;
90784@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
90785 goto next_page;
90786 }
90787
90788- if (!vma ||
90789+ if (!vma || start < vma->vm_start ||
90790 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
90791 !(vm_flags & vma->vm_flags))
90792 return i ? : -EFAULT;
90793@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
90794 int ret;
90795 unsigned int fault_flags = 0;
90796
90797- /* For mlock, just skip the stack guard page. */
90798- if (foll_flags & FOLL_MLOCK) {
90799- if (stack_guard_page(vma, start))
90800- goto next_page;
90801- }
90802 if (foll_flags & FOLL_WRITE)
90803 fault_flags |= FAULT_FLAG_WRITE;
90804 if (nonblocking)
90805@@ -1892,7 +1887,7 @@ next_page:
90806 start += page_increm * PAGE_SIZE;
90807 nr_pages -= page_increm;
90808 } while (nr_pages && start < vma->vm_end);
90809- } while (nr_pages);
90810+ }
90811 return i;
90812 }
90813 EXPORT_SYMBOL(__get_user_pages);
90814@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
90815 page_add_file_rmap(page);
90816 set_pte_at(mm, addr, pte, mk_pte(page, prot));
90817
90818+#ifdef CONFIG_PAX_SEGMEXEC
90819+ pax_mirror_file_pte(vma, addr, page, ptl);
90820+#endif
90821+
90822 retval = 0;
90823 pte_unmap_unlock(pte, ptl);
90824 return retval;
90825@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
90826 if (!page_count(page))
90827 return -EINVAL;
90828 if (!(vma->vm_flags & VM_MIXEDMAP)) {
90829+
90830+#ifdef CONFIG_PAX_SEGMEXEC
90831+ struct vm_area_struct *vma_m;
90832+#endif
90833+
90834 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
90835 BUG_ON(vma->vm_flags & VM_PFNMAP);
90836 vma->vm_flags |= VM_MIXEDMAP;
90837+
90838+#ifdef CONFIG_PAX_SEGMEXEC
90839+ vma_m = pax_find_mirror_vma(vma);
90840+ if (vma_m)
90841+ vma_m->vm_flags |= VM_MIXEDMAP;
90842+#endif
90843+
90844 }
90845 return insert_page(vma, addr, page, vma->vm_page_prot);
90846 }
90847@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
90848 unsigned long pfn)
90849 {
90850 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
90851+ BUG_ON(vma->vm_mirror);
90852
90853 if (addr < vma->vm_start || addr >= vma->vm_end)
90854 return -EFAULT;
90855@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
90856
90857 BUG_ON(pud_huge(*pud));
90858
90859- pmd = pmd_alloc(mm, pud, addr);
90860+ pmd = (mm == &init_mm) ?
90861+ pmd_alloc_kernel(mm, pud, addr) :
90862+ pmd_alloc(mm, pud, addr);
90863 if (!pmd)
90864 return -ENOMEM;
90865 do {
90866@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
90867 unsigned long next;
90868 int err;
90869
90870- pud = pud_alloc(mm, pgd, addr);
90871+ pud = (mm == &init_mm) ?
90872+ pud_alloc_kernel(mm, pgd, addr) :
90873+ pud_alloc(mm, pgd, addr);
90874 if (!pud)
90875 return -ENOMEM;
90876 do {
90877@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
90878 copy_user_highpage(dst, src, va, vma);
90879 }
90880
90881+#ifdef CONFIG_PAX_SEGMEXEC
90882+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
90883+{
90884+ struct mm_struct *mm = vma->vm_mm;
90885+ spinlock_t *ptl;
90886+ pte_t *pte, entry;
90887+
90888+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
90889+ entry = *pte;
90890+ if (!pte_present(entry)) {
90891+ if (!pte_none(entry)) {
90892+ BUG_ON(pte_file(entry));
90893+ free_swap_and_cache(pte_to_swp_entry(entry));
90894+ pte_clear_not_present_full(mm, address, pte, 0);
90895+ }
90896+ } else {
90897+ struct page *page;
90898+
90899+ flush_cache_page(vma, address, pte_pfn(entry));
90900+ entry = ptep_clear_flush(vma, address, pte);
90901+ BUG_ON(pte_dirty(entry));
90902+ page = vm_normal_page(vma, address, entry);
90903+ if (page) {
90904+ update_hiwater_rss(mm);
90905+ if (PageAnon(page))
90906+ dec_mm_counter_fast(mm, MM_ANONPAGES);
90907+ else
90908+ dec_mm_counter_fast(mm, MM_FILEPAGES);
90909+ page_remove_rmap(page);
90910+ page_cache_release(page);
90911+ }
90912+ }
90913+ pte_unmap_unlock(pte, ptl);
90914+}
90915+
90916+/* PaX: if vma is mirrored, synchronize the mirror's PTE
90917+ *
90918+ * the ptl of the lower mapped page is held on entry and is not released on exit
90919+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
90920+ */
90921+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
90922+{
90923+ struct mm_struct *mm = vma->vm_mm;
90924+ unsigned long address_m;
90925+ spinlock_t *ptl_m;
90926+ struct vm_area_struct *vma_m;
90927+ pmd_t *pmd_m;
90928+ pte_t *pte_m, entry_m;
90929+
90930+ BUG_ON(!page_m || !PageAnon(page_m));
90931+
90932+ vma_m = pax_find_mirror_vma(vma);
90933+ if (!vma_m)
90934+ return;
90935+
90936+ BUG_ON(!PageLocked(page_m));
90937+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
90938+ address_m = address + SEGMEXEC_TASK_SIZE;
90939+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
90940+ pte_m = pte_offset_map(pmd_m, address_m);
90941+ ptl_m = pte_lockptr(mm, pmd_m);
90942+ if (ptl != ptl_m) {
90943+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
90944+ if (!pte_none(*pte_m))
90945+ goto out;
90946+ }
90947+
90948+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
90949+ page_cache_get(page_m);
90950+ page_add_anon_rmap(page_m, vma_m, address_m);
90951+ inc_mm_counter_fast(mm, MM_ANONPAGES);
90952+ set_pte_at(mm, address_m, pte_m, entry_m);
90953+ update_mmu_cache(vma_m, address_m, pte_m);
90954+out:
90955+ if (ptl != ptl_m)
90956+ spin_unlock(ptl_m);
90957+ pte_unmap(pte_m);
90958+ unlock_page(page_m);
90959+}
90960+
90961+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
90962+{
90963+ struct mm_struct *mm = vma->vm_mm;
90964+ unsigned long address_m;
90965+ spinlock_t *ptl_m;
90966+ struct vm_area_struct *vma_m;
90967+ pmd_t *pmd_m;
90968+ pte_t *pte_m, entry_m;
90969+
90970+ BUG_ON(!page_m || PageAnon(page_m));
90971+
90972+ vma_m = pax_find_mirror_vma(vma);
90973+ if (!vma_m)
90974+ return;
90975+
90976+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
90977+ address_m = address + SEGMEXEC_TASK_SIZE;
90978+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
90979+ pte_m = pte_offset_map(pmd_m, address_m);
90980+ ptl_m = pte_lockptr(mm, pmd_m);
90981+ if (ptl != ptl_m) {
90982+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
90983+ if (!pte_none(*pte_m))
90984+ goto out;
90985+ }
90986+
90987+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
90988+ page_cache_get(page_m);
90989+ page_add_file_rmap(page_m);
90990+ inc_mm_counter_fast(mm, MM_FILEPAGES);
90991+ set_pte_at(mm, address_m, pte_m, entry_m);
90992+ update_mmu_cache(vma_m, address_m, pte_m);
90993+out:
90994+ if (ptl != ptl_m)
90995+ spin_unlock(ptl_m);
90996+ pte_unmap(pte_m);
90997+}
90998+
90999+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
91000+{
91001+ struct mm_struct *mm = vma->vm_mm;
91002+ unsigned long address_m;
91003+ spinlock_t *ptl_m;
91004+ struct vm_area_struct *vma_m;
91005+ pmd_t *pmd_m;
91006+ pte_t *pte_m, entry_m;
91007+
91008+ vma_m = pax_find_mirror_vma(vma);
91009+ if (!vma_m)
91010+ return;
91011+
91012+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91013+ address_m = address + SEGMEXEC_TASK_SIZE;
91014+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91015+ pte_m = pte_offset_map(pmd_m, address_m);
91016+ ptl_m = pte_lockptr(mm, pmd_m);
91017+ if (ptl != ptl_m) {
91018+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
91019+ if (!pte_none(*pte_m))
91020+ goto out;
91021+ }
91022+
91023+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
91024+ set_pte_at(mm, address_m, pte_m, entry_m);
91025+out:
91026+ if (ptl != ptl_m)
91027+ spin_unlock(ptl_m);
91028+ pte_unmap(pte_m);
91029+}
91030+
91031+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
91032+{
91033+ struct page *page_m;
91034+ pte_t entry;
91035+
91036+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
91037+ goto out;
91038+
91039+ entry = *pte;
91040+ page_m = vm_normal_page(vma, address, entry);
91041+ if (!page_m)
91042+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
91043+ else if (PageAnon(page_m)) {
91044+ if (pax_find_mirror_vma(vma)) {
91045+ pte_unmap_unlock(pte, ptl);
91046+ lock_page(page_m);
91047+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
91048+ if (pte_same(entry, *pte))
91049+ pax_mirror_anon_pte(vma, address, page_m, ptl);
91050+ else
91051+ unlock_page(page_m);
91052+ }
91053+ } else
91054+ pax_mirror_file_pte(vma, address, page_m, ptl);
91055+
91056+out:
91057+ pte_unmap_unlock(pte, ptl);
91058+}
91059+#endif
91060+
91061 /*
91062 * This routine handles present pages, when users try to write
91063 * to a shared page. It is done by copying the page to a new address
91064@@ -2807,6 +3003,12 @@ gotten:
91065 */
91066 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
91067 if (likely(pte_same(*page_table, orig_pte))) {
91068+
91069+#ifdef CONFIG_PAX_SEGMEXEC
91070+ if (pax_find_mirror_vma(vma))
91071+ BUG_ON(!trylock_page(new_page));
91072+#endif
91073+
91074 if (old_page) {
91075 if (!PageAnon(old_page)) {
91076 dec_mm_counter_fast(mm, MM_FILEPAGES);
91077@@ -2858,6 +3060,10 @@ gotten:
91078 page_remove_rmap(old_page);
91079 }
91080
91081+#ifdef CONFIG_PAX_SEGMEXEC
91082+ pax_mirror_anon_pte(vma, address, new_page, ptl);
91083+#endif
91084+
91085 /* Free the old page.. */
91086 new_page = old_page;
91087 ret |= VM_FAULT_WRITE;
91088@@ -3135,6 +3341,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
91089 swap_free(entry);
91090 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
91091 try_to_free_swap(page);
91092+
91093+#ifdef CONFIG_PAX_SEGMEXEC
91094+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
91095+#endif
91096+
91097 unlock_page(page);
91098 if (page != swapcache) {
91099 /*
91100@@ -3158,6 +3369,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
91101
91102 /* No need to invalidate - it was non-present before */
91103 update_mmu_cache(vma, address, page_table);
91104+
91105+#ifdef CONFIG_PAX_SEGMEXEC
91106+ pax_mirror_anon_pte(vma, address, page, ptl);
91107+#endif
91108+
91109 unlock:
91110 pte_unmap_unlock(page_table, ptl);
91111 out:
91112@@ -3177,40 +3393,6 @@ out_release:
91113 }
91114
91115 /*
91116- * This is like a special single-page "expand_{down|up}wards()",
91117- * except we must first make sure that 'address{-|+}PAGE_SIZE'
91118- * doesn't hit another vma.
91119- */
91120-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
91121-{
91122- address &= PAGE_MASK;
91123- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
91124- struct vm_area_struct *prev = vma->vm_prev;
91125-
91126- /*
91127- * Is there a mapping abutting this one below?
91128- *
91129- * That's only ok if it's the same stack mapping
91130- * that has gotten split..
91131- */
91132- if (prev && prev->vm_end == address)
91133- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
91134-
91135- expand_downwards(vma, address - PAGE_SIZE);
91136- }
91137- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
91138- struct vm_area_struct *next = vma->vm_next;
91139-
91140- /* As VM_GROWSDOWN but s/below/above/ */
91141- if (next && next->vm_start == address + PAGE_SIZE)
91142- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
91143-
91144- expand_upwards(vma, address + PAGE_SIZE);
91145- }
91146- return 0;
91147-}
91148-
91149-/*
91150 * We enter with non-exclusive mmap_sem (to exclude vma changes,
91151 * but allow concurrent faults), and pte mapped but not yet locked.
91152 * We return with mmap_sem still held, but pte unmapped and unlocked.
91153@@ -3219,27 +3401,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
91154 unsigned long address, pte_t *page_table, pmd_t *pmd,
91155 unsigned int flags)
91156 {
91157- struct page *page;
91158+ struct page *page = NULL;
91159 spinlock_t *ptl;
91160 pte_t entry;
91161
91162- pte_unmap(page_table);
91163-
91164- /* Check if we need to add a guard page to the stack */
91165- if (check_stack_guard_page(vma, address) < 0)
91166- return VM_FAULT_SIGBUS;
91167-
91168- /* Use the zero-page for reads */
91169 if (!(flags & FAULT_FLAG_WRITE)) {
91170 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
91171 vma->vm_page_prot));
91172- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
91173+ ptl = pte_lockptr(mm, pmd);
91174+ spin_lock(ptl);
91175 if (!pte_none(*page_table))
91176 goto unlock;
91177 goto setpte;
91178 }
91179
91180 /* Allocate our own private page. */
91181+ pte_unmap(page_table);
91182+
91183 if (unlikely(anon_vma_prepare(vma)))
91184 goto oom;
91185 page = alloc_zeroed_user_highpage_movable(vma, address);
91186@@ -3263,6 +3441,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
91187 if (!pte_none(*page_table))
91188 goto release;
91189
91190+#ifdef CONFIG_PAX_SEGMEXEC
91191+ if (pax_find_mirror_vma(vma))
91192+ BUG_ON(!trylock_page(page));
91193+#endif
91194+
91195 inc_mm_counter_fast(mm, MM_ANONPAGES);
91196 page_add_new_anon_rmap(page, vma, address);
91197 setpte:
91198@@ -3270,6 +3453,12 @@ setpte:
91199
91200 /* No need to invalidate - it was non-present before */
91201 update_mmu_cache(vma, address, page_table);
91202+
91203+#ifdef CONFIG_PAX_SEGMEXEC
91204+ if (page)
91205+ pax_mirror_anon_pte(vma, address, page, ptl);
91206+#endif
91207+
91208 unlock:
91209 pte_unmap_unlock(page_table, ptl);
91210 return 0;
91211@@ -3413,6 +3602,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91212 */
91213 /* Only go through if we didn't race with anybody else... */
91214 if (likely(pte_same(*page_table, orig_pte))) {
91215+
91216+#ifdef CONFIG_PAX_SEGMEXEC
91217+ if (anon && pax_find_mirror_vma(vma))
91218+ BUG_ON(!trylock_page(page));
91219+#endif
91220+
91221 flush_icache_page(vma, page);
91222 entry = mk_pte(page, vma->vm_page_prot);
91223 if (flags & FAULT_FLAG_WRITE)
91224@@ -3434,6 +3629,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91225
91226 /* no need to invalidate: a not-present page won't be cached */
91227 update_mmu_cache(vma, address, page_table);
91228+
91229+#ifdef CONFIG_PAX_SEGMEXEC
91230+ if (anon)
91231+ pax_mirror_anon_pte(vma, address, page, ptl);
91232+ else
91233+ pax_mirror_file_pte(vma, address, page, ptl);
91234+#endif
91235+
91236 } else {
91237 if (cow_page)
91238 mem_cgroup_uncharge_page(cow_page);
91239@@ -3681,6 +3884,12 @@ static int handle_pte_fault(struct mm_struct *mm,
91240 if (flags & FAULT_FLAG_WRITE)
91241 flush_tlb_fix_spurious_fault(vma, address);
91242 }
91243+
91244+#ifdef CONFIG_PAX_SEGMEXEC
91245+ pax_mirror_pte(vma, address, pte, pmd, ptl);
91246+ return 0;
91247+#endif
91248+
91249 unlock:
91250 pte_unmap_unlock(pte, ptl);
91251 return 0;
91252@@ -3697,9 +3906,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91253 pmd_t *pmd;
91254 pte_t *pte;
91255
91256+#ifdef CONFIG_PAX_SEGMEXEC
91257+ struct vm_area_struct *vma_m;
91258+#endif
91259+
91260 if (unlikely(is_vm_hugetlb_page(vma)))
91261 return hugetlb_fault(mm, vma, address, flags);
91262
91263+#ifdef CONFIG_PAX_SEGMEXEC
91264+ vma_m = pax_find_mirror_vma(vma);
91265+ if (vma_m) {
91266+ unsigned long address_m;
91267+ pgd_t *pgd_m;
91268+ pud_t *pud_m;
91269+ pmd_t *pmd_m;
91270+
91271+ if (vma->vm_start > vma_m->vm_start) {
91272+ address_m = address;
91273+ address -= SEGMEXEC_TASK_SIZE;
91274+ vma = vma_m;
91275+ } else
91276+ address_m = address + SEGMEXEC_TASK_SIZE;
91277+
91278+ pgd_m = pgd_offset(mm, address_m);
91279+ pud_m = pud_alloc(mm, pgd_m, address_m);
91280+ if (!pud_m)
91281+ return VM_FAULT_OOM;
91282+ pmd_m = pmd_alloc(mm, pud_m, address_m);
91283+ if (!pmd_m)
91284+ return VM_FAULT_OOM;
91285+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
91286+ return VM_FAULT_OOM;
91287+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
91288+ }
91289+#endif
91290+
91291 retry:
91292 pgd = pgd_offset(mm, address);
91293 pud = pud_alloc(mm, pgd, address);
91294@@ -3838,6 +4079,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91295 spin_unlock(&mm->page_table_lock);
91296 return 0;
91297 }
91298+
91299+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91300+{
91301+ pud_t *new = pud_alloc_one(mm, address);
91302+ if (!new)
91303+ return -ENOMEM;
91304+
91305+ smp_wmb(); /* See comment in __pte_alloc */
91306+
91307+ spin_lock(&mm->page_table_lock);
91308+ if (pgd_present(*pgd)) /* Another has populated it */
91309+ pud_free(mm, new);
91310+ else
91311+ pgd_populate_kernel(mm, pgd, new);
91312+ spin_unlock(&mm->page_table_lock);
91313+ return 0;
91314+}
91315 #endif /* __PAGETABLE_PUD_FOLDED */
91316
91317 #ifndef __PAGETABLE_PMD_FOLDED
91318@@ -3868,6 +4126,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
91319 spin_unlock(&mm->page_table_lock);
91320 return 0;
91321 }
91322+
91323+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
91324+{
91325+ pmd_t *new = pmd_alloc_one(mm, address);
91326+ if (!new)
91327+ return -ENOMEM;
91328+
91329+ smp_wmb(); /* See comment in __pte_alloc */
91330+
91331+ spin_lock(&mm->page_table_lock);
91332+#ifndef __ARCH_HAS_4LEVEL_HACK
91333+ if (pud_present(*pud)) /* Another has populated it */
91334+ pmd_free(mm, new);
91335+ else
91336+ pud_populate_kernel(mm, pud, new);
91337+#else
91338+ if (pgd_present(*pud)) /* Another has populated it */
91339+ pmd_free(mm, new);
91340+ else
91341+ pgd_populate_kernel(mm, pud, new);
91342+#endif /* __ARCH_HAS_4LEVEL_HACK */
91343+ spin_unlock(&mm->page_table_lock);
91344+ return 0;
91345+}
91346 #endif /* __PAGETABLE_PMD_FOLDED */
91347
91348 #if !defined(__HAVE_ARCH_GATE_AREA)
91349@@ -3881,7 +4163,7 @@ static int __init gate_vma_init(void)
91350 gate_vma.vm_start = FIXADDR_USER_START;
91351 gate_vma.vm_end = FIXADDR_USER_END;
91352 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
91353- gate_vma.vm_page_prot = __P101;
91354+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
91355
91356 return 0;
91357 }
91358@@ -4015,8 +4297,8 @@ out:
91359 return ret;
91360 }
91361
91362-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91363- void *buf, int len, int write)
91364+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91365+ void *buf, size_t len, int write)
91366 {
91367 resource_size_t phys_addr;
91368 unsigned long prot = 0;
91369@@ -4042,8 +4324,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
91370 * Access another process' address space as given in mm. If non-NULL, use the
91371 * given task for page fault accounting.
91372 */
91373-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91374- unsigned long addr, void *buf, int len, int write)
91375+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91376+ unsigned long addr, void *buf, size_t len, int write)
91377 {
91378 struct vm_area_struct *vma;
91379 void *old_buf = buf;
91380@@ -4051,7 +4333,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91381 down_read(&mm->mmap_sem);
91382 /* ignore errors, just check how much was successfully transferred */
91383 while (len) {
91384- int bytes, ret, offset;
91385+ ssize_t bytes, ret, offset;
91386 void *maddr;
91387 struct page *page = NULL;
91388
91389@@ -4110,8 +4392,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91390 *
91391 * The caller must hold a reference on @mm.
91392 */
91393-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91394- void *buf, int len, int write)
91395+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91396+ void *buf, size_t len, int write)
91397 {
91398 return __access_remote_vm(NULL, mm, addr, buf, len, write);
91399 }
91400@@ -4121,11 +4403,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91401 * Source/target buffer must be kernel space,
91402 * Do not walk the page table directly, use get_user_pages
91403 */
91404-int access_process_vm(struct task_struct *tsk, unsigned long addr,
91405- void *buf, int len, int write)
91406+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
91407+ void *buf, size_t len, int write)
91408 {
91409 struct mm_struct *mm;
91410- int ret;
91411+ ssize_t ret;
91412
91413 mm = get_task_mm(tsk);
91414 if (!mm)
91415diff --git a/mm/mempolicy.c b/mm/mempolicy.c
91416index e1bd997..055f496 100644
91417--- a/mm/mempolicy.c
91418+++ b/mm/mempolicy.c
91419@@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
91420 unsigned long vmstart;
91421 unsigned long vmend;
91422
91423+#ifdef CONFIG_PAX_SEGMEXEC
91424+ struct vm_area_struct *vma_m;
91425+#endif
91426+
91427 vma = find_vma(mm, start);
91428 if (!vma || vma->vm_start > start)
91429 return -EFAULT;
91430@@ -790,6 +794,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
91431 err = vma_replace_policy(vma, new_pol);
91432 if (err)
91433 goto out;
91434+
91435+#ifdef CONFIG_PAX_SEGMEXEC
91436+ vma_m = pax_find_mirror_vma(vma);
91437+ if (vma_m) {
91438+ err = vma_replace_policy(vma_m, new_pol);
91439+ if (err)
91440+ goto out;
91441+ }
91442+#endif
91443+
91444 }
91445
91446 out:
91447@@ -1255,6 +1269,17 @@ static long do_mbind(unsigned long start, unsigned long len,
91448
91449 if (end < start)
91450 return -EINVAL;
91451+
91452+#ifdef CONFIG_PAX_SEGMEXEC
91453+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
91454+ if (end > SEGMEXEC_TASK_SIZE)
91455+ return -EINVAL;
91456+ } else
91457+#endif
91458+
91459+ if (end > TASK_SIZE)
91460+ return -EINVAL;
91461+
91462 if (end == start)
91463 return 0;
91464
91465@@ -1483,8 +1508,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
91466 */
91467 tcred = __task_cred(task);
91468 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
91469- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
91470- !capable(CAP_SYS_NICE)) {
91471+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
91472 rcu_read_unlock();
91473 err = -EPERM;
91474 goto out_put;
91475@@ -1515,6 +1539,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
91476 goto out;
91477 }
91478
91479+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91480+ if (mm != current->mm &&
91481+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
91482+ mmput(mm);
91483+ err = -EPERM;
91484+ goto out;
91485+ }
91486+#endif
91487+
91488 err = do_migrate_pages(mm, old, new,
91489 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
91490
91491diff --git a/mm/migrate.c b/mm/migrate.c
91492index 9194375..75c81e2 100644
91493--- a/mm/migrate.c
91494+++ b/mm/migrate.c
91495@@ -1464,8 +1464,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
91496 */
91497 tcred = __task_cred(task);
91498 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
91499- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
91500- !capable(CAP_SYS_NICE)) {
91501+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
91502 rcu_read_unlock();
91503 err = -EPERM;
91504 goto out;
91505diff --git a/mm/mlock.c b/mm/mlock.c
91506index 192e6ee..b044449 100644
91507--- a/mm/mlock.c
91508+++ b/mm/mlock.c
91509@@ -14,6 +14,7 @@
91510 #include <linux/pagevec.h>
91511 #include <linux/mempolicy.h>
91512 #include <linux/syscalls.h>
91513+#include <linux/security.h>
91514 #include <linux/sched.h>
91515 #include <linux/export.h>
91516 #include <linux/rmap.h>
91517@@ -588,7 +589,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
91518 {
91519 unsigned long nstart, end, tmp;
91520 struct vm_area_struct * vma, * prev;
91521- int error;
91522+ int error = 0;
91523
91524 VM_BUG_ON(start & ~PAGE_MASK);
91525 VM_BUG_ON(len != PAGE_ALIGN(len));
91526@@ -597,6 +598,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
91527 return -EINVAL;
91528 if (end == start)
91529 return 0;
91530+ if (end > TASK_SIZE)
91531+ return -EINVAL;
91532+
91533 vma = find_vma(current->mm, start);
91534 if (!vma || vma->vm_start > start)
91535 return -ENOMEM;
91536@@ -608,6 +612,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
91537 for (nstart = start ; ; ) {
91538 vm_flags_t newflags;
91539
91540+#ifdef CONFIG_PAX_SEGMEXEC
91541+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
91542+ break;
91543+#endif
91544+
91545 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
91546
91547 newflags = vma->vm_flags & ~VM_LOCKED;
91548@@ -720,6 +729,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
91549 lock_limit >>= PAGE_SHIFT;
91550
91551 /* check against resource limits */
91552+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
91553 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
91554 error = do_mlock(start, len, 1);
91555 up_write(&current->mm->mmap_sem);
91556@@ -754,6 +764,11 @@ static int do_mlockall(int flags)
91557 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
91558 vm_flags_t newflags;
91559
91560+#ifdef CONFIG_PAX_SEGMEXEC
91561+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
91562+ break;
91563+#endif
91564+
91565 newflags = vma->vm_flags & ~VM_LOCKED;
91566 if (flags & MCL_CURRENT)
91567 newflags |= VM_LOCKED;
91568@@ -787,6 +802,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
91569 lock_limit >>= PAGE_SHIFT;
91570
91571 ret = -ENOMEM;
91572+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
91573 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
91574 capable(CAP_IPC_LOCK))
91575 ret = do_mlockall(flags);
91576diff --git a/mm/mmap.c b/mm/mmap.c
91577index 834b2d7..650d1b9 100644
91578--- a/mm/mmap.c
91579+++ b/mm/mmap.c
91580@@ -36,6 +36,7 @@
91581 #include <linux/sched/sysctl.h>
91582 #include <linux/notifier.h>
91583 #include <linux/memory.h>
91584+#include <linux/random.h>
91585
91586 #include <asm/uaccess.h>
91587 #include <asm/cacheflush.h>
91588@@ -52,6 +53,16 @@
91589 #define arch_rebalance_pgtables(addr, len) (addr)
91590 #endif
91591
91592+static inline void verify_mm_writelocked(struct mm_struct *mm)
91593+{
91594+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
91595+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
91596+ up_read(&mm->mmap_sem);
91597+ BUG();
91598+ }
91599+#endif
91600+}
91601+
91602 static void unmap_region(struct mm_struct *mm,
91603 struct vm_area_struct *vma, struct vm_area_struct *prev,
91604 unsigned long start, unsigned long end);
91605@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
91606 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
91607 *
91608 */
91609-pgprot_t protection_map[16] = {
91610+pgprot_t protection_map[16] __read_only = {
91611 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
91612 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
91613 };
91614
91615-pgprot_t vm_get_page_prot(unsigned long vm_flags)
91616+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
91617 {
91618- return __pgprot(pgprot_val(protection_map[vm_flags &
91619+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
91620 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
91621 pgprot_val(arch_vm_get_page_prot(vm_flags)));
91622+
91623+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91624+ if (!(__supported_pte_mask & _PAGE_NX) &&
91625+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
91626+ (vm_flags & (VM_READ | VM_WRITE)))
91627+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
91628+#endif
91629+
91630+ return prot;
91631 }
91632 EXPORT_SYMBOL(vm_get_page_prot);
91633
91634@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
91635 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
91636 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
91637 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
91638+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
91639 /*
91640 * Make sure vm_committed_as in one cacheline and not cacheline shared with
91641 * other variables. It can be updated by several CPUs frequently.
91642@@ -245,6 +266,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
91643 struct vm_area_struct *next = vma->vm_next;
91644
91645 might_sleep();
91646+ BUG_ON(vma->vm_mirror);
91647 if (vma->vm_ops && vma->vm_ops->close)
91648 vma->vm_ops->close(vma);
91649 if (vma->vm_file)
91650@@ -289,6 +311,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
91651 * not page aligned -Ram Gupta
91652 */
91653 rlim = rlimit(RLIMIT_DATA);
91654+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
91655 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
91656 (mm->end_data - mm->start_data) > rlim)
91657 goto out;
91658@@ -893,7 +916,15 @@ again: remove_next = 1 + (end > next->vm_end);
91659 static inline int is_mergeable_vma(struct vm_area_struct *vma,
91660 struct file *file, unsigned long vm_flags)
91661 {
91662- if (vma->vm_flags ^ vm_flags)
91663+ /*
91664+ * VM_SOFTDIRTY should not prevent from VMA merging, if we
91665+ * match the flags but dirty bit -- the caller should mark
91666+ * merged VMA as dirty. If dirty bit won't be excluded from
91667+ * comparison, we increase pressue on the memory system forcing
91668+ * the kernel to generate new VMAs when old one could be
91669+ * extended instead.
91670+ */
91671+ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
91672 return 0;
91673 if (vma->vm_file != file)
91674 return 0;
91675@@ -931,6 +962,12 @@ static int
91676 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
91677 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
91678 {
91679+
91680+#ifdef CONFIG_PAX_SEGMEXEC
91681+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
91682+ return 0;
91683+#endif
91684+
91685 if (is_mergeable_vma(vma, file, vm_flags) &&
91686 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
91687 if (vma->vm_pgoff == vm_pgoff)
91688@@ -950,6 +987,12 @@ static int
91689 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
91690 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
91691 {
91692+
91693+#ifdef CONFIG_PAX_SEGMEXEC
91694+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
91695+ return 0;
91696+#endif
91697+
91698 if (is_mergeable_vma(vma, file, vm_flags) &&
91699 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
91700 pgoff_t vm_pglen;
91701@@ -992,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
91702 struct vm_area_struct *vma_merge(struct mm_struct *mm,
91703 struct vm_area_struct *prev, unsigned long addr,
91704 unsigned long end, unsigned long vm_flags,
91705- struct anon_vma *anon_vma, struct file *file,
91706+ struct anon_vma *anon_vma, struct file *file,
91707 pgoff_t pgoff, struct mempolicy *policy)
91708 {
91709 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
91710 struct vm_area_struct *area, *next;
91711 int err;
91712
91713+#ifdef CONFIG_PAX_SEGMEXEC
91714+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
91715+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
91716+
91717+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
91718+#endif
91719+
91720 /*
91721 * We later require that vma->vm_flags == vm_flags,
91722 * so this tests vma->vm_flags & VM_SPECIAL, too.
91723@@ -1014,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
91724 if (next && next->vm_end == end) /* cases 6, 7, 8 */
91725 next = next->vm_next;
91726
91727+#ifdef CONFIG_PAX_SEGMEXEC
91728+ if (prev)
91729+ prev_m = pax_find_mirror_vma(prev);
91730+ if (area)
91731+ area_m = pax_find_mirror_vma(area);
91732+ if (next)
91733+ next_m = pax_find_mirror_vma(next);
91734+#endif
91735+
91736 /*
91737 * Can it merge with the predecessor?
91738 */
91739@@ -1033,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
91740 /* cases 1, 6 */
91741 err = vma_adjust(prev, prev->vm_start,
91742 next->vm_end, prev->vm_pgoff, NULL);
91743- } else /* cases 2, 5, 7 */
91744+
91745+#ifdef CONFIG_PAX_SEGMEXEC
91746+ if (!err && prev_m)
91747+ err = vma_adjust(prev_m, prev_m->vm_start,
91748+ next_m->vm_end, prev_m->vm_pgoff, NULL);
91749+#endif
91750+
91751+ } else { /* cases 2, 5, 7 */
91752 err = vma_adjust(prev, prev->vm_start,
91753 end, prev->vm_pgoff, NULL);
91754+
91755+#ifdef CONFIG_PAX_SEGMEXEC
91756+ if (!err && prev_m)
91757+ err = vma_adjust(prev_m, prev_m->vm_start,
91758+ end_m, prev_m->vm_pgoff, NULL);
91759+#endif
91760+
91761+ }
91762 if (err)
91763 return NULL;
91764 khugepaged_enter_vma_merge(prev);
91765@@ -1049,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
91766 mpol_equal(policy, vma_policy(next)) &&
91767 can_vma_merge_before(next, vm_flags,
91768 anon_vma, file, pgoff+pglen)) {
91769- if (prev && addr < prev->vm_end) /* case 4 */
91770+ if (prev && addr < prev->vm_end) { /* case 4 */
91771 err = vma_adjust(prev, prev->vm_start,
91772 addr, prev->vm_pgoff, NULL);
91773- else /* cases 3, 8 */
91774+
91775+#ifdef CONFIG_PAX_SEGMEXEC
91776+ if (!err && prev_m)
91777+ err = vma_adjust(prev_m, prev_m->vm_start,
91778+ addr_m, prev_m->vm_pgoff, NULL);
91779+#endif
91780+
91781+ } else { /* cases 3, 8 */
91782 err = vma_adjust(area, addr, next->vm_end,
91783 next->vm_pgoff - pglen, NULL);
91784+
91785+#ifdef CONFIG_PAX_SEGMEXEC
91786+ if (!err && area_m)
91787+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
91788+ next_m->vm_pgoff - pglen, NULL);
91789+#endif
91790+
91791+ }
91792 if (err)
91793 return NULL;
91794 khugepaged_enter_vma_merge(area);
91795@@ -1082,7 +1171,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
91796 return a->vm_end == b->vm_start &&
91797 mpol_equal(vma_policy(a), vma_policy(b)) &&
91798 a->vm_file == b->vm_file &&
91799- !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
91800+ !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
91801 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
91802 }
91803
91804@@ -1163,8 +1252,10 @@ none:
91805 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
91806 struct file *file, long pages)
91807 {
91808- const unsigned long stack_flags
91809- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
91810+
91811+#ifdef CONFIG_PAX_RANDMMAP
91812+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
91813+#endif
91814
91815 mm->total_vm += pages;
91816
91817@@ -1172,7 +1263,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
91818 mm->shared_vm += pages;
91819 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
91820 mm->exec_vm += pages;
91821- } else if (flags & stack_flags)
91822+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
91823 mm->stack_vm += pages;
91824 }
91825 #endif /* CONFIG_PROC_FS */
91826@@ -1210,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91827 * (the exception is when the underlying filesystem is noexec
91828 * mounted, in which case we dont add PROT_EXEC.)
91829 */
91830- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
91831+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
91832 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
91833 prot |= PROT_EXEC;
91834
91835@@ -1236,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91836 /* Obtain the address to map to. we verify (or select) it and ensure
91837 * that it represents a valid section of the address space.
91838 */
91839- addr = get_unmapped_area(file, addr, len, pgoff, flags);
91840+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
91841 if (addr & ~PAGE_MASK)
91842 return addr;
91843
91844@@ -1247,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91845 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
91846 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
91847
91848+#ifdef CONFIG_PAX_MPROTECT
91849+ if (mm->pax_flags & MF_PAX_MPROTECT) {
91850+
91851+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
91852+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
91853+ mm->binfmt->handle_mmap)
91854+ mm->binfmt->handle_mmap(file);
91855+#endif
91856+
91857+#ifndef CONFIG_PAX_MPROTECT_COMPAT
91858+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
91859+ gr_log_rwxmmap(file);
91860+
91861+#ifdef CONFIG_PAX_EMUPLT
91862+ vm_flags &= ~VM_EXEC;
91863+#else
91864+ return -EPERM;
91865+#endif
91866+
91867+ }
91868+
91869+ if (!(vm_flags & VM_EXEC))
91870+ vm_flags &= ~VM_MAYEXEC;
91871+#else
91872+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
91873+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
91874+#endif
91875+ else
91876+ vm_flags &= ~VM_MAYWRITE;
91877+ }
91878+#endif
91879+
91880+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91881+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
91882+ vm_flags &= ~VM_PAGEEXEC;
91883+#endif
91884+
91885 if (flags & MAP_LOCKED)
91886 if (!can_do_mlock())
91887 return -EPERM;
91888@@ -1258,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91889 locked += mm->locked_vm;
91890 lock_limit = rlimit(RLIMIT_MEMLOCK);
91891 lock_limit >>= PAGE_SHIFT;
91892+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
91893 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
91894 return -EAGAIN;
91895 }
91896@@ -1342,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91897 vm_flags |= VM_NORESERVE;
91898 }
91899
91900+ if (!gr_acl_handle_mmap(file, prot))
91901+ return -EACCES;
91902+
91903 addr = mmap_region(file, addr, len, vm_flags, pgoff);
91904 if (!IS_ERR_VALUE(addr) &&
91905 ((vm_flags & VM_LOCKED) ||
91906@@ -1435,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
91907 vm_flags_t vm_flags = vma->vm_flags;
91908
91909 /* If it was private or non-writable, the write bit is already clear */
91910- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
91911+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
91912 return 0;
91913
91914 /* The backer wishes to know when pages are first written to? */
91915@@ -1481,7 +1613,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
91916 struct rb_node **rb_link, *rb_parent;
91917 unsigned long charged = 0;
91918
91919+#ifdef CONFIG_PAX_SEGMEXEC
91920+ struct vm_area_struct *vma_m = NULL;
91921+#endif
91922+
91923+ /*
91924+ * mm->mmap_sem is required to protect against another thread
91925+ * changing the mappings in case we sleep.
91926+ */
91927+ verify_mm_writelocked(mm);
91928+
91929 /* Check against address space limit. */
91930+
91931+#ifdef CONFIG_PAX_RANDMMAP
91932+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
91933+#endif
91934+
91935 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
91936 unsigned long nr_pages;
91937
91938@@ -1500,11 +1647,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
91939
91940 /* Clear old maps */
91941 error = -ENOMEM;
91942-munmap_back:
91943 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
91944 if (do_munmap(mm, addr, len))
91945 return -ENOMEM;
91946- goto munmap_back;
91947+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
91948 }
91949
91950 /*
91951@@ -1535,6 +1681,16 @@ munmap_back:
91952 goto unacct_error;
91953 }
91954
91955+#ifdef CONFIG_PAX_SEGMEXEC
91956+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
91957+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
91958+ if (!vma_m) {
91959+ error = -ENOMEM;
91960+ goto free_vma;
91961+ }
91962+ }
91963+#endif
91964+
91965 vma->vm_mm = mm;
91966 vma->vm_start = addr;
91967 vma->vm_end = addr + len;
91968@@ -1554,6 +1710,13 @@ munmap_back:
91969 if (error)
91970 goto unmap_and_free_vma;
91971
91972+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91973+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
91974+ vma->vm_flags |= VM_PAGEEXEC;
91975+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
91976+ }
91977+#endif
91978+
91979 /* Can addr have changed??
91980 *
91981 * Answer: Yes, several device drivers can do it in their
91982@@ -1587,6 +1750,12 @@ munmap_back:
91983 }
91984
91985 vma_link(mm, vma, prev, rb_link, rb_parent);
91986+
91987+#ifdef CONFIG_PAX_SEGMEXEC
91988+ if (vma_m)
91989+ BUG_ON(pax_mirror_vma(vma_m, vma));
91990+#endif
91991+
91992 /* Once vma denies write, undo our temporary denial count */
91993 if (vm_flags & VM_DENYWRITE)
91994 allow_write_access(file);
91995@@ -1595,6 +1764,7 @@ out:
91996 perf_event_mmap(vma);
91997
91998 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
91999+ track_exec_limit(mm, addr, addr + len, vm_flags);
92000 if (vm_flags & VM_LOCKED) {
92001 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
92002 vma == get_gate_vma(current->mm)))
92003@@ -1627,6 +1797,12 @@ unmap_and_free_vma:
92004 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
92005 charged = 0;
92006 free_vma:
92007+
92008+#ifdef CONFIG_PAX_SEGMEXEC
92009+ if (vma_m)
92010+ kmem_cache_free(vm_area_cachep, vma_m);
92011+#endif
92012+
92013 kmem_cache_free(vm_area_cachep, vma);
92014 unacct_error:
92015 if (charged)
92016@@ -1634,7 +1810,63 @@ unacct_error:
92017 return error;
92018 }
92019
92020-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
92021+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
92022+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
92023+{
92024+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
92025+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
92026+
92027+ return 0;
92028+}
92029+#endif
92030+
92031+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
92032+{
92033+ if (!vma) {
92034+#ifdef CONFIG_STACK_GROWSUP
92035+ if (addr > sysctl_heap_stack_gap)
92036+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
92037+ else
92038+ vma = find_vma(current->mm, 0);
92039+ if (vma && (vma->vm_flags & VM_GROWSUP))
92040+ return false;
92041+#endif
92042+ return true;
92043+ }
92044+
92045+ if (addr + len > vma->vm_start)
92046+ return false;
92047+
92048+ if (vma->vm_flags & VM_GROWSDOWN)
92049+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
92050+#ifdef CONFIG_STACK_GROWSUP
92051+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
92052+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
92053+#endif
92054+ else if (offset)
92055+ return offset <= vma->vm_start - addr - len;
92056+
92057+ return true;
92058+}
92059+
92060+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
92061+{
92062+ if (vma->vm_start < len)
92063+ return -ENOMEM;
92064+
92065+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
92066+ if (offset <= vma->vm_start - len)
92067+ return vma->vm_start - len - offset;
92068+ else
92069+ return -ENOMEM;
92070+ }
92071+
92072+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
92073+ return vma->vm_start - len - sysctl_heap_stack_gap;
92074+ return -ENOMEM;
92075+}
92076+
92077+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
92078 {
92079 /*
92080 * We implement the search by looking for an rbtree node that
92081@@ -1682,11 +1914,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
92082 }
92083 }
92084
92085- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
92086+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
92087 check_current:
92088 /* Check if current node has a suitable gap */
92089 if (gap_start > high_limit)
92090 return -ENOMEM;
92091+
92092+ if (gap_end - gap_start > info->threadstack_offset)
92093+ gap_start += info->threadstack_offset;
92094+ else
92095+ gap_start = gap_end;
92096+
92097+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
92098+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92099+ gap_start += sysctl_heap_stack_gap;
92100+ else
92101+ gap_start = gap_end;
92102+ }
92103+ if (vma->vm_flags & VM_GROWSDOWN) {
92104+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92105+ gap_end -= sysctl_heap_stack_gap;
92106+ else
92107+ gap_end = gap_start;
92108+ }
92109 if (gap_end >= low_limit && gap_end - gap_start >= length)
92110 goto found;
92111
92112@@ -1736,7 +1986,7 @@ found:
92113 return gap_start;
92114 }
92115
92116-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
92117+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
92118 {
92119 struct mm_struct *mm = current->mm;
92120 struct vm_area_struct *vma;
92121@@ -1790,6 +2040,24 @@ check_current:
92122 gap_end = vma->vm_start;
92123 if (gap_end < low_limit)
92124 return -ENOMEM;
92125+
92126+ if (gap_end - gap_start > info->threadstack_offset)
92127+ gap_end -= info->threadstack_offset;
92128+ else
92129+ gap_end = gap_start;
92130+
92131+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
92132+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92133+ gap_start += sysctl_heap_stack_gap;
92134+ else
92135+ gap_start = gap_end;
92136+ }
92137+ if (vma->vm_flags & VM_GROWSDOWN) {
92138+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92139+ gap_end -= sysctl_heap_stack_gap;
92140+ else
92141+ gap_end = gap_start;
92142+ }
92143 if (gap_start <= high_limit && gap_end - gap_start >= length)
92144 goto found;
92145
92146@@ -1853,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92147 struct mm_struct *mm = current->mm;
92148 struct vm_area_struct *vma;
92149 struct vm_unmapped_area_info info;
92150+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
92151
92152 if (len > TASK_SIZE - mmap_min_addr)
92153 return -ENOMEM;
92154@@ -1860,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92155 if (flags & MAP_FIXED)
92156 return addr;
92157
92158+#ifdef CONFIG_PAX_RANDMMAP
92159+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
92160+#endif
92161+
92162 if (addr) {
92163 addr = PAGE_ALIGN(addr);
92164 vma = find_vma(mm, addr);
92165 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
92166- (!vma || addr + len <= vma->vm_start))
92167+ check_heap_stack_gap(vma, addr, len, offset))
92168 return addr;
92169 }
92170
92171@@ -1873,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92172 info.low_limit = mm->mmap_base;
92173 info.high_limit = TASK_SIZE;
92174 info.align_mask = 0;
92175+ info.threadstack_offset = offset;
92176 return vm_unmapped_area(&info);
92177 }
92178 #endif
92179@@ -1891,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92180 struct mm_struct *mm = current->mm;
92181 unsigned long addr = addr0;
92182 struct vm_unmapped_area_info info;
92183+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
92184
92185 /* requested length too big for entire address space */
92186 if (len > TASK_SIZE - mmap_min_addr)
92187@@ -1899,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92188 if (flags & MAP_FIXED)
92189 return addr;
92190
92191+#ifdef CONFIG_PAX_RANDMMAP
92192+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
92193+#endif
92194+
92195 /* requesting a specific address */
92196 if (addr) {
92197 addr = PAGE_ALIGN(addr);
92198 vma = find_vma(mm, addr);
92199 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
92200- (!vma || addr + len <= vma->vm_start))
92201+ check_heap_stack_gap(vma, addr, len, offset))
92202 return addr;
92203 }
92204
92205@@ -1913,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92206 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
92207 info.high_limit = mm->mmap_base;
92208 info.align_mask = 0;
92209+ info.threadstack_offset = offset;
92210 addr = vm_unmapped_area(&info);
92211
92212 /*
92213@@ -1925,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92214 VM_BUG_ON(addr != -ENOMEM);
92215 info.flags = 0;
92216 info.low_limit = TASK_UNMAPPED_BASE;
92217+
92218+#ifdef CONFIG_PAX_RANDMMAP
92219+ if (mm->pax_flags & MF_PAX_RANDMMAP)
92220+ info.low_limit += mm->delta_mmap;
92221+#endif
92222+
92223 info.high_limit = TASK_SIZE;
92224 addr = vm_unmapped_area(&info);
92225 }
92226@@ -2026,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
92227 return vma;
92228 }
92229
92230+#ifdef CONFIG_PAX_SEGMEXEC
92231+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
92232+{
92233+ struct vm_area_struct *vma_m;
92234+
92235+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
92236+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
92237+ BUG_ON(vma->vm_mirror);
92238+ return NULL;
92239+ }
92240+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
92241+ vma_m = vma->vm_mirror;
92242+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
92243+ BUG_ON(vma->vm_file != vma_m->vm_file);
92244+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
92245+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
92246+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
92247+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
92248+ return vma_m;
92249+}
92250+#endif
92251+
92252 /*
92253 * Verify that the stack growth is acceptable and
92254 * update accounting. This is shared with both the
92255@@ -2042,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
92256 return -ENOMEM;
92257
92258 /* Stack limit test */
92259+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
92260 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
92261 return -ENOMEM;
92262
92263@@ -2052,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
92264 locked = mm->locked_vm + grow;
92265 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
92266 limit >>= PAGE_SHIFT;
92267+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
92268 if (locked > limit && !capable(CAP_IPC_LOCK))
92269 return -ENOMEM;
92270 }
92271@@ -2081,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
92272 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
92273 * vma is the last one with address > vma->vm_end. Have to extend vma.
92274 */
92275+#ifndef CONFIG_IA64
92276+static
92277+#endif
92278 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
92279 {
92280 int error;
92281+ bool locknext;
92282
92283 if (!(vma->vm_flags & VM_GROWSUP))
92284 return -EFAULT;
92285
92286+ /* Also guard against wrapping around to address 0. */
92287+ if (address < PAGE_ALIGN(address+1))
92288+ address = PAGE_ALIGN(address+1);
92289+ else
92290+ return -ENOMEM;
92291+
92292 /*
92293 * We must make sure the anon_vma is allocated
92294 * so that the anon_vma locking is not a noop.
92295 */
92296 if (unlikely(anon_vma_prepare(vma)))
92297 return -ENOMEM;
92298+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
92299+ if (locknext && anon_vma_prepare(vma->vm_next))
92300+ return -ENOMEM;
92301 vma_lock_anon_vma(vma);
92302+ if (locknext)
92303+ vma_lock_anon_vma(vma->vm_next);
92304
92305 /*
92306 * vma->vm_start/vm_end cannot change under us because the caller
92307 * is required to hold the mmap_sem in read mode. We need the
92308- * anon_vma lock to serialize against concurrent expand_stacks.
92309- * Also guard against wrapping around to address 0.
92310+ * anon_vma locks to serialize against concurrent expand_stacks
92311+ * and expand_upwards.
92312 */
92313- if (address < PAGE_ALIGN(address+4))
92314- address = PAGE_ALIGN(address+4);
92315- else {
92316- vma_unlock_anon_vma(vma);
92317- return -ENOMEM;
92318- }
92319 error = 0;
92320
92321 /* Somebody else might have raced and expanded it already */
92322- if (address > vma->vm_end) {
92323+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
92324+ error = -ENOMEM;
92325+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
92326 unsigned long size, grow;
92327
92328 size = address - vma->vm_start;
92329@@ -2146,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
92330 }
92331 }
92332 }
92333+ if (locknext)
92334+ vma_unlock_anon_vma(vma->vm_next);
92335 vma_unlock_anon_vma(vma);
92336 khugepaged_enter_vma_merge(vma);
92337 validate_mm(vma->vm_mm);
92338@@ -2160,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
92339 unsigned long address)
92340 {
92341 int error;
92342+ bool lockprev = false;
92343+ struct vm_area_struct *prev;
92344
92345 /*
92346 * We must make sure the anon_vma is allocated
92347@@ -2173,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
92348 if (error)
92349 return error;
92350
92351+ prev = vma->vm_prev;
92352+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
92353+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
92354+#endif
92355+ if (lockprev && anon_vma_prepare(prev))
92356+ return -ENOMEM;
92357+ if (lockprev)
92358+ vma_lock_anon_vma(prev);
92359+
92360 vma_lock_anon_vma(vma);
92361
92362 /*
92363@@ -2182,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
92364 */
92365
92366 /* Somebody else might have raced and expanded it already */
92367- if (address < vma->vm_start) {
92368+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
92369+ error = -ENOMEM;
92370+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
92371 unsigned long size, grow;
92372
92373+#ifdef CONFIG_PAX_SEGMEXEC
92374+ struct vm_area_struct *vma_m;
92375+
92376+ vma_m = pax_find_mirror_vma(vma);
92377+#endif
92378+
92379 size = vma->vm_end - address;
92380 grow = (vma->vm_start - address) >> PAGE_SHIFT;
92381
92382@@ -2209,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
92383 vma->vm_pgoff -= grow;
92384 anon_vma_interval_tree_post_update_vma(vma);
92385 vma_gap_update(vma);
92386+
92387+#ifdef CONFIG_PAX_SEGMEXEC
92388+ if (vma_m) {
92389+ anon_vma_interval_tree_pre_update_vma(vma_m);
92390+ vma_m->vm_start -= grow << PAGE_SHIFT;
92391+ vma_m->vm_pgoff -= grow;
92392+ anon_vma_interval_tree_post_update_vma(vma_m);
92393+ vma_gap_update(vma_m);
92394+ }
92395+#endif
92396+
92397 spin_unlock(&vma->vm_mm->page_table_lock);
92398
92399+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
92400 perf_event_mmap(vma);
92401 }
92402 }
92403 }
92404 vma_unlock_anon_vma(vma);
92405+ if (lockprev)
92406+ vma_unlock_anon_vma(prev);
92407 khugepaged_enter_vma_merge(vma);
92408 validate_mm(vma->vm_mm);
92409 return error;
92410@@ -2313,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
92411 do {
92412 long nrpages = vma_pages(vma);
92413
92414+#ifdef CONFIG_PAX_SEGMEXEC
92415+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
92416+ vma = remove_vma(vma);
92417+ continue;
92418+ }
92419+#endif
92420+
92421 if (vma->vm_flags & VM_ACCOUNT)
92422 nr_accounted += nrpages;
92423 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
92424@@ -2357,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
92425 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
92426 vma->vm_prev = NULL;
92427 do {
92428+
92429+#ifdef CONFIG_PAX_SEGMEXEC
92430+ if (vma->vm_mirror) {
92431+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
92432+ vma->vm_mirror->vm_mirror = NULL;
92433+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
92434+ vma->vm_mirror = NULL;
92435+ }
92436+#endif
92437+
92438 vma_rb_erase(vma, &mm->mm_rb);
92439 mm->map_count--;
92440 tail_vma = vma;
92441@@ -2382,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92442 struct vm_area_struct *new;
92443 int err = -ENOMEM;
92444
92445+#ifdef CONFIG_PAX_SEGMEXEC
92446+ struct vm_area_struct *vma_m, *new_m = NULL;
92447+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
92448+#endif
92449+
92450 if (is_vm_hugetlb_page(vma) && (addr &
92451 ~(huge_page_mask(hstate_vma(vma)))))
92452 return -EINVAL;
92453
92454+#ifdef CONFIG_PAX_SEGMEXEC
92455+ vma_m = pax_find_mirror_vma(vma);
92456+#endif
92457+
92458 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
92459 if (!new)
92460 goto out_err;
92461
92462+#ifdef CONFIG_PAX_SEGMEXEC
92463+ if (vma_m) {
92464+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
92465+ if (!new_m) {
92466+ kmem_cache_free(vm_area_cachep, new);
92467+ goto out_err;
92468+ }
92469+ }
92470+#endif
92471+
92472 /* most fields are the same, copy all, and then fixup */
92473 *new = *vma;
92474
92475@@ -2402,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92476 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
92477 }
92478
92479+#ifdef CONFIG_PAX_SEGMEXEC
92480+ if (vma_m) {
92481+ *new_m = *vma_m;
92482+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
92483+ new_m->vm_mirror = new;
92484+ new->vm_mirror = new_m;
92485+
92486+ if (new_below)
92487+ new_m->vm_end = addr_m;
92488+ else {
92489+ new_m->vm_start = addr_m;
92490+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
92491+ }
92492+ }
92493+#endif
92494+
92495 err = vma_dup_policy(vma, new);
92496 if (err)
92497 goto out_free_vma;
92498@@ -2421,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92499 else
92500 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
92501
92502+#ifdef CONFIG_PAX_SEGMEXEC
92503+ if (!err && vma_m) {
92504+ struct mempolicy *pol = vma_policy(new);
92505+
92506+ if (anon_vma_clone(new_m, vma_m))
92507+ goto out_free_mpol;
92508+
92509+ mpol_get(pol);
92510+ set_vma_policy(new_m, pol);
92511+
92512+ if (new_m->vm_file)
92513+ get_file(new_m->vm_file);
92514+
92515+ if (new_m->vm_ops && new_m->vm_ops->open)
92516+ new_m->vm_ops->open(new_m);
92517+
92518+ if (new_below)
92519+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
92520+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
92521+ else
92522+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
92523+
92524+ if (err) {
92525+ if (new_m->vm_ops && new_m->vm_ops->close)
92526+ new_m->vm_ops->close(new_m);
92527+ if (new_m->vm_file)
92528+ fput(new_m->vm_file);
92529+ mpol_put(pol);
92530+ }
92531+ }
92532+#endif
92533+
92534 /* Success. */
92535 if (!err)
92536 return 0;
92537@@ -2430,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92538 new->vm_ops->close(new);
92539 if (new->vm_file)
92540 fput(new->vm_file);
92541- unlink_anon_vmas(new);
92542 out_free_mpol:
92543 mpol_put(vma_policy(new));
92544 out_free_vma:
92545+
92546+#ifdef CONFIG_PAX_SEGMEXEC
92547+ if (new_m) {
92548+ unlink_anon_vmas(new_m);
92549+ kmem_cache_free(vm_area_cachep, new_m);
92550+ }
92551+#endif
92552+
92553+ unlink_anon_vmas(new);
92554 kmem_cache_free(vm_area_cachep, new);
92555 out_err:
92556 return err;
92557@@ -2446,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92558 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
92559 unsigned long addr, int new_below)
92560 {
92561+
92562+#ifdef CONFIG_PAX_SEGMEXEC
92563+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
92564+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
92565+ if (mm->map_count >= sysctl_max_map_count-1)
92566+ return -ENOMEM;
92567+ } else
92568+#endif
92569+
92570 if (mm->map_count >= sysctl_max_map_count)
92571 return -ENOMEM;
92572
92573@@ -2457,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
92574 * work. This now handles partial unmappings.
92575 * Jeremy Fitzhardinge <jeremy@goop.org>
92576 */
92577+#ifdef CONFIG_PAX_SEGMEXEC
92578 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92579 {
92580+ int ret = __do_munmap(mm, start, len);
92581+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
92582+ return ret;
92583+
92584+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
92585+}
92586+
92587+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92588+#else
92589+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92590+#endif
92591+{
92592 unsigned long end;
92593 struct vm_area_struct *vma, *prev, *last;
92594
92595+ /*
92596+ * mm->mmap_sem is required to protect against another thread
92597+ * changing the mappings in case we sleep.
92598+ */
92599+ verify_mm_writelocked(mm);
92600+
92601 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
92602 return -EINVAL;
92603
92604@@ -2536,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92605 /* Fix up all other VM information */
92606 remove_vma_list(mm, vma);
92607
92608+ track_exec_limit(mm, start, end, 0UL);
92609+
92610 return 0;
92611 }
92612
92613@@ -2544,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
92614 int ret;
92615 struct mm_struct *mm = current->mm;
92616
92617+
92618+#ifdef CONFIG_PAX_SEGMEXEC
92619+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
92620+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
92621+ return -EINVAL;
92622+#endif
92623+
92624 down_write(&mm->mmap_sem);
92625 ret = do_munmap(mm, start, len);
92626 up_write(&mm->mmap_sem);
92627@@ -2557,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
92628 return vm_munmap(addr, len);
92629 }
92630
92631-static inline void verify_mm_writelocked(struct mm_struct *mm)
92632-{
92633-#ifdef CONFIG_DEBUG_VM
92634- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
92635- WARN_ON(1);
92636- up_read(&mm->mmap_sem);
92637- }
92638-#endif
92639-}
92640-
92641 /*
92642 * this is really a simplified "do_mmap". it only handles
92643 * anonymous maps. eventually we may be able to do some
92644@@ -2580,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92645 struct rb_node ** rb_link, * rb_parent;
92646 pgoff_t pgoff = addr >> PAGE_SHIFT;
92647 int error;
92648+ unsigned long charged;
92649
92650 len = PAGE_ALIGN(len);
92651 if (!len)
92652@@ -2587,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92653
92654 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
92655
92656+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
92657+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
92658+ flags &= ~VM_EXEC;
92659+
92660+#ifdef CONFIG_PAX_MPROTECT
92661+ if (mm->pax_flags & MF_PAX_MPROTECT)
92662+ flags &= ~VM_MAYEXEC;
92663+#endif
92664+
92665+ }
92666+#endif
92667+
92668 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
92669 if (error & ~PAGE_MASK)
92670 return error;
92671
92672+ charged = len >> PAGE_SHIFT;
92673+
92674 /*
92675 * mlock MCL_FUTURE?
92676 */
92677 if (mm->def_flags & VM_LOCKED) {
92678 unsigned long locked, lock_limit;
92679- locked = len >> PAGE_SHIFT;
92680+ locked = charged;
92681 locked += mm->locked_vm;
92682 lock_limit = rlimit(RLIMIT_MEMLOCK);
92683 lock_limit >>= PAGE_SHIFT;
92684@@ -2613,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92685 /*
92686 * Clear old maps. this also does some error checking for us
92687 */
92688- munmap_back:
92689 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
92690 if (do_munmap(mm, addr, len))
92691 return -ENOMEM;
92692- goto munmap_back;
92693+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
92694 }
92695
92696 /* Check against address space limits *after* clearing old maps... */
92697- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
92698+ if (!may_expand_vm(mm, charged))
92699 return -ENOMEM;
92700
92701 if (mm->map_count > sysctl_max_map_count)
92702 return -ENOMEM;
92703
92704- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
92705+ if (security_vm_enough_memory_mm(mm, charged))
92706 return -ENOMEM;
92707
92708 /* Can we just expand an old private anonymous mapping? */
92709@@ -2641,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92710 */
92711 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92712 if (!vma) {
92713- vm_unacct_memory(len >> PAGE_SHIFT);
92714+ vm_unacct_memory(charged);
92715 return -ENOMEM;
92716 }
92717
92718@@ -2655,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92719 vma_link(mm, vma, prev, rb_link, rb_parent);
92720 out:
92721 perf_event_mmap(vma);
92722- mm->total_vm += len >> PAGE_SHIFT;
92723+ mm->total_vm += charged;
92724 if (flags & VM_LOCKED)
92725- mm->locked_vm += (len >> PAGE_SHIFT);
92726+ mm->locked_vm += charged;
92727 vma->vm_flags |= VM_SOFTDIRTY;
92728+ track_exec_limit(mm, addr, addr + len, flags);
92729 return addr;
92730 }
92731
92732@@ -2720,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
92733 while (vma) {
92734 if (vma->vm_flags & VM_ACCOUNT)
92735 nr_accounted += vma_pages(vma);
92736+ vma->vm_mirror = NULL;
92737 vma = remove_vma(vma);
92738 }
92739 vm_unacct_memory(nr_accounted);
92740@@ -2737,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
92741 struct vm_area_struct *prev;
92742 struct rb_node **rb_link, *rb_parent;
92743
92744+#ifdef CONFIG_PAX_SEGMEXEC
92745+ struct vm_area_struct *vma_m = NULL;
92746+#endif
92747+
92748+ if (security_mmap_addr(vma->vm_start))
92749+ return -EPERM;
92750+
92751 /*
92752 * The vm_pgoff of a purely anonymous vma should be irrelevant
92753 * until its first write fault, when page's anon_vma and index
92754@@ -2760,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
92755 security_vm_enough_memory_mm(mm, vma_pages(vma)))
92756 return -ENOMEM;
92757
92758+#ifdef CONFIG_PAX_SEGMEXEC
92759+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
92760+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92761+ if (!vma_m)
92762+ return -ENOMEM;
92763+ }
92764+#endif
92765+
92766 vma_link(mm, vma, prev, rb_link, rb_parent);
92767+
92768+#ifdef CONFIG_PAX_SEGMEXEC
92769+ if (vma_m)
92770+ BUG_ON(pax_mirror_vma(vma_m, vma));
92771+#endif
92772+
92773 return 0;
92774 }
92775
92776@@ -2779,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
92777 struct rb_node **rb_link, *rb_parent;
92778 bool faulted_in_anon_vma = true;
92779
92780+ BUG_ON(vma->vm_mirror);
92781+
92782 /*
92783 * If anonymous vma has not yet been faulted, update new pgoff
92784 * to match new location, to increase its chance of merging.
92785@@ -2843,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
92786 return NULL;
92787 }
92788
92789+#ifdef CONFIG_PAX_SEGMEXEC
92790+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
92791+{
92792+ struct vm_area_struct *prev_m;
92793+ struct rb_node **rb_link_m, *rb_parent_m;
92794+ struct mempolicy *pol_m;
92795+
92796+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
92797+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
92798+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
92799+ *vma_m = *vma;
92800+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
92801+ if (anon_vma_clone(vma_m, vma))
92802+ return -ENOMEM;
92803+ pol_m = vma_policy(vma_m);
92804+ mpol_get(pol_m);
92805+ set_vma_policy(vma_m, pol_m);
92806+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
92807+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
92808+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
92809+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
92810+ if (vma_m->vm_file)
92811+ get_file(vma_m->vm_file);
92812+ if (vma_m->vm_ops && vma_m->vm_ops->open)
92813+ vma_m->vm_ops->open(vma_m);
92814+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
92815+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
92816+ vma_m->vm_mirror = vma;
92817+ vma->vm_mirror = vma_m;
92818+ return 0;
92819+}
92820+#endif
92821+
92822 /*
92823 * Return true if the calling process may expand its vm space by the passed
92824 * number of pages
92825@@ -2854,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
92826
92827 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
92828
92829+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
92830 if (cur + npages > lim)
92831 return 0;
92832 return 1;
92833@@ -2924,6 +3472,22 @@ int install_special_mapping(struct mm_struct *mm,
92834 vma->vm_start = addr;
92835 vma->vm_end = addr + len;
92836
92837+#ifdef CONFIG_PAX_MPROTECT
92838+ if (mm->pax_flags & MF_PAX_MPROTECT) {
92839+#ifndef CONFIG_PAX_MPROTECT_COMPAT
92840+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
92841+ return -EPERM;
92842+ if (!(vm_flags & VM_EXEC))
92843+ vm_flags &= ~VM_MAYEXEC;
92844+#else
92845+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
92846+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
92847+#endif
92848+ else
92849+ vm_flags &= ~VM_MAYWRITE;
92850+ }
92851+#endif
92852+
92853 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
92854 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
92855
92856diff --git a/mm/mprotect.c b/mm/mprotect.c
92857index bb53a65..249c052 100644
92858--- a/mm/mprotect.c
92859+++ b/mm/mprotect.c
92860@@ -23,10 +23,18 @@
92861 #include <linux/mmu_notifier.h>
92862 #include <linux/migrate.h>
92863 #include <linux/perf_event.h>
92864+#include <linux/sched/sysctl.h>
92865+
92866+#ifdef CONFIG_PAX_MPROTECT
92867+#include <linux/elf.h>
92868+#include <linux/binfmts.h>
92869+#endif
92870+
92871 #include <asm/uaccess.h>
92872 #include <asm/pgtable.h>
92873 #include <asm/cacheflush.h>
92874 #include <asm/tlbflush.h>
92875+#include <asm/mmu_context.h>
92876
92877 #ifndef pgprot_modify
92878 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
92879@@ -222,6 +230,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
92880 return pages;
92881 }
92882
92883+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
92884+/* called while holding the mmap semaphor for writing except stack expansion */
92885+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
92886+{
92887+ unsigned long oldlimit, newlimit = 0UL;
92888+
92889+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
92890+ return;
92891+
92892+ spin_lock(&mm->page_table_lock);
92893+ oldlimit = mm->context.user_cs_limit;
92894+ if ((prot & VM_EXEC) && oldlimit < end)
92895+ /* USER_CS limit moved up */
92896+ newlimit = end;
92897+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
92898+ /* USER_CS limit moved down */
92899+ newlimit = start;
92900+
92901+ if (newlimit) {
92902+ mm->context.user_cs_limit = newlimit;
92903+
92904+#ifdef CONFIG_SMP
92905+ wmb();
92906+ cpus_clear(mm->context.cpu_user_cs_mask);
92907+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
92908+#endif
92909+
92910+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
92911+ }
92912+ spin_unlock(&mm->page_table_lock);
92913+ if (newlimit == end) {
92914+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
92915+
92916+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
92917+ if (is_vm_hugetlb_page(vma))
92918+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
92919+ else
92920+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
92921+ }
92922+}
92923+#endif
92924+
92925 int
92926 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
92927 unsigned long start, unsigned long end, unsigned long newflags)
92928@@ -234,11 +284,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
92929 int error;
92930 int dirty_accountable = 0;
92931
92932+#ifdef CONFIG_PAX_SEGMEXEC
92933+ struct vm_area_struct *vma_m = NULL;
92934+ unsigned long start_m, end_m;
92935+
92936+ start_m = start + SEGMEXEC_TASK_SIZE;
92937+ end_m = end + SEGMEXEC_TASK_SIZE;
92938+#endif
92939+
92940 if (newflags == oldflags) {
92941 *pprev = vma;
92942 return 0;
92943 }
92944
92945+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
92946+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
92947+
92948+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
92949+ return -ENOMEM;
92950+
92951+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
92952+ return -ENOMEM;
92953+ }
92954+
92955 /*
92956 * If we make a private mapping writable we increase our commit;
92957 * but (without finer accounting) cannot reduce our commit if we
92958@@ -255,6 +323,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
92959 }
92960 }
92961
92962+#ifdef CONFIG_PAX_SEGMEXEC
92963+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
92964+ if (start != vma->vm_start) {
92965+ error = split_vma(mm, vma, start, 1);
92966+ if (error)
92967+ goto fail;
92968+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
92969+ *pprev = (*pprev)->vm_next;
92970+ }
92971+
92972+ if (end != vma->vm_end) {
92973+ error = split_vma(mm, vma, end, 0);
92974+ if (error)
92975+ goto fail;
92976+ }
92977+
92978+ if (pax_find_mirror_vma(vma)) {
92979+ error = __do_munmap(mm, start_m, end_m - start_m);
92980+ if (error)
92981+ goto fail;
92982+ } else {
92983+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92984+ if (!vma_m) {
92985+ error = -ENOMEM;
92986+ goto fail;
92987+ }
92988+ vma->vm_flags = newflags;
92989+ error = pax_mirror_vma(vma_m, vma);
92990+ if (error) {
92991+ vma->vm_flags = oldflags;
92992+ goto fail;
92993+ }
92994+ }
92995+ }
92996+#endif
92997+
92998 /*
92999 * First try to merge with previous and/or next vma.
93000 */
93001@@ -285,9 +389,21 @@ success:
93002 * vm_flags and vm_page_prot are protected by the mmap_sem
93003 * held in write mode.
93004 */
93005+
93006+#ifdef CONFIG_PAX_SEGMEXEC
93007+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
93008+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
93009+#endif
93010+
93011 vma->vm_flags = newflags;
93012+
93013+#ifdef CONFIG_PAX_MPROTECT
93014+ if (mm->binfmt && mm->binfmt->handle_mprotect)
93015+ mm->binfmt->handle_mprotect(vma, newflags);
93016+#endif
93017+
93018 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
93019- vm_get_page_prot(newflags));
93020+ vm_get_page_prot(vma->vm_flags));
93021
93022 if (vma_wants_writenotify(vma)) {
93023 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
93024@@ -326,6 +442,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93025 end = start + len;
93026 if (end <= start)
93027 return -ENOMEM;
93028+
93029+#ifdef CONFIG_PAX_SEGMEXEC
93030+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
93031+ if (end > SEGMEXEC_TASK_SIZE)
93032+ return -EINVAL;
93033+ } else
93034+#endif
93035+
93036+ if (end > TASK_SIZE)
93037+ return -EINVAL;
93038+
93039 if (!arch_validate_prot(prot))
93040 return -EINVAL;
93041
93042@@ -333,7 +460,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93043 /*
93044 * Does the application expect PROT_READ to imply PROT_EXEC:
93045 */
93046- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
93047+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
93048 prot |= PROT_EXEC;
93049
93050 vm_flags = calc_vm_prot_bits(prot);
93051@@ -365,6 +492,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93052 if (start > vma->vm_start)
93053 prev = vma;
93054
93055+#ifdef CONFIG_PAX_MPROTECT
93056+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
93057+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
93058+#endif
93059+
93060 for (nstart = start ; ; ) {
93061 unsigned long newflags;
93062
93063@@ -375,6 +507,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93064
93065 /* newflags >> 4 shift VM_MAY% in place of VM_% */
93066 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
93067+ if (prot & (PROT_WRITE | PROT_EXEC))
93068+ gr_log_rwxmprotect(vma);
93069+
93070+ error = -EACCES;
93071+ goto out;
93072+ }
93073+
93074+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
93075 error = -EACCES;
93076 goto out;
93077 }
93078@@ -389,6 +529,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93079 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
93080 if (error)
93081 goto out;
93082+
93083+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
93084+
93085 nstart = tmp;
93086
93087 if (nstart < prev->vm_end)
93088diff --git a/mm/mremap.c b/mm/mremap.c
93089index 0843feb..4f5b2e6 100644
93090--- a/mm/mremap.c
93091+++ b/mm/mremap.c
93092@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
93093 continue;
93094 pte = ptep_get_and_clear(mm, old_addr, old_pte);
93095 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
93096+
93097+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
93098+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
93099+ pte = pte_exprotect(pte);
93100+#endif
93101+
93102 pte = move_soft_dirty_pte(pte);
93103 set_pte_at(mm, new_addr, new_pte, pte);
93104 }
93105@@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
93106 if (is_vm_hugetlb_page(vma))
93107 goto Einval;
93108
93109+#ifdef CONFIG_PAX_SEGMEXEC
93110+ if (pax_find_mirror_vma(vma))
93111+ goto Einval;
93112+#endif
93113+
93114 /* We can't remap across vm area boundaries */
93115 if (old_len > vma->vm_end - addr)
93116 goto Efault;
93117@@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
93118 unsigned long ret = -EINVAL;
93119 unsigned long charged = 0;
93120 unsigned long map_flags;
93121+ unsigned long pax_task_size = TASK_SIZE;
93122
93123 if (new_addr & ~PAGE_MASK)
93124 goto out;
93125
93126- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
93127+#ifdef CONFIG_PAX_SEGMEXEC
93128+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
93129+ pax_task_size = SEGMEXEC_TASK_SIZE;
93130+#endif
93131+
93132+ pax_task_size -= PAGE_SIZE;
93133+
93134+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
93135 goto out;
93136
93137 /* Check if the location we're moving into overlaps the
93138 * old location at all, and fail if it does.
93139 */
93140- if ((new_addr <= addr) && (new_addr+new_len) > addr)
93141- goto out;
93142-
93143- if ((addr <= new_addr) && (addr+old_len) > new_addr)
93144+ if (addr + old_len > new_addr && new_addr + new_len > addr)
93145 goto out;
93146
93147 ret = do_munmap(mm, new_addr, new_len);
93148@@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93149 unsigned long ret = -EINVAL;
93150 unsigned long charged = 0;
93151 bool locked = false;
93152+ unsigned long pax_task_size = TASK_SIZE;
93153
93154 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
93155 return ret;
93156@@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93157 if (!new_len)
93158 return ret;
93159
93160+#ifdef CONFIG_PAX_SEGMEXEC
93161+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
93162+ pax_task_size = SEGMEXEC_TASK_SIZE;
93163+#endif
93164+
93165+ pax_task_size -= PAGE_SIZE;
93166+
93167+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
93168+ old_len > pax_task_size || addr > pax_task_size-old_len)
93169+ return ret;
93170+
93171 down_write(&current->mm->mmap_sem);
93172
93173 if (flags & MREMAP_FIXED) {
93174@@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93175 new_addr = addr;
93176 }
93177 ret = addr;
93178+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
93179 goto out;
93180 }
93181 }
93182@@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93183 goto out;
93184 }
93185
93186+ map_flags = vma->vm_flags;
93187 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
93188+ if (!(ret & ~PAGE_MASK)) {
93189+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
93190+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
93191+ }
93192 }
93193 out:
93194 if (ret & ~PAGE_MASK)
93195diff --git a/mm/nommu.c b/mm/nommu.c
93196index fec093a..8162f74 100644
93197--- a/mm/nommu.c
93198+++ b/mm/nommu.c
93199@@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
93200 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
93201 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
93202 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
93203-int heap_stack_gap = 0;
93204
93205 atomic_long_t mmap_pages_allocated;
93206
93207@@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
93208 EXPORT_SYMBOL(find_vma);
93209
93210 /*
93211- * find a VMA
93212- * - we don't extend stack VMAs under NOMMU conditions
93213- */
93214-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
93215-{
93216- return find_vma(mm, addr);
93217-}
93218-
93219-/*
93220 * expand a stack to a given address
93221 * - not supported under NOMMU conditions
93222 */
93223@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
93224
93225 /* most fields are the same, copy all, and then fixup */
93226 *new = *vma;
93227+ INIT_LIST_HEAD(&new->anon_vma_chain);
93228 *region = *vma->vm_region;
93229 new->vm_region = region;
93230
93231@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
93232 }
93233 EXPORT_SYMBOL(generic_file_remap_pages);
93234
93235-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93236- unsigned long addr, void *buf, int len, int write)
93237+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93238+ unsigned long addr, void *buf, size_t len, int write)
93239 {
93240 struct vm_area_struct *vma;
93241
93242@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93243 *
93244 * The caller must hold a reference on @mm.
93245 */
93246-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
93247- void *buf, int len, int write)
93248+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
93249+ void *buf, size_t len, int write)
93250 {
93251 return __access_remote_vm(NULL, mm, addr, buf, len, write);
93252 }
93253@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
93254 * Access another process' address space.
93255 * - source/target buffer must be kernel space
93256 */
93257-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
93258+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
93259 {
93260 struct mm_struct *mm;
93261
93262diff --git a/mm/page-writeback.c b/mm/page-writeback.c
93263index 6380758..4064aec 100644
93264--- a/mm/page-writeback.c
93265+++ b/mm/page-writeback.c
93266@@ -690,7 +690,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
93267 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
93268 * - the bdi dirty thresh drops quickly due to change of JBOD workload
93269 */
93270-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
93271+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
93272 unsigned long thresh,
93273 unsigned long bg_thresh,
93274 unsigned long dirty,
93275diff --git a/mm/page_alloc.c b/mm/page_alloc.c
93276index 5248fe0..0f693aa 100644
93277--- a/mm/page_alloc.c
93278+++ b/mm/page_alloc.c
93279@@ -61,6 +61,7 @@
93280 #include <linux/page-debug-flags.h>
93281 #include <linux/hugetlb.h>
93282 #include <linux/sched/rt.h>
93283+#include <linux/random.h>
93284
93285 #include <asm/sections.h>
93286 #include <asm/tlbflush.h>
93287@@ -354,7 +355,7 @@ out:
93288 * This usage means that zero-order pages may not be compound.
93289 */
93290
93291-static void free_compound_page(struct page *page)
93292+void free_compound_page(struct page *page)
93293 {
93294 __free_pages_ok(page, compound_order(page));
93295 }
93296@@ -712,6 +713,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
93297 int i;
93298 int bad = 0;
93299
93300+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93301+ unsigned long index = 1UL << order;
93302+#endif
93303+
93304 trace_mm_page_free(page, order);
93305 kmemcheck_free_shadow(page, order);
93306
93307@@ -728,6 +733,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
93308 debug_check_no_obj_freed(page_address(page),
93309 PAGE_SIZE << order);
93310 }
93311+
93312+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93313+ for (; index; --index)
93314+ sanitize_highpage(page + index - 1);
93315+#endif
93316+
93317 arch_free_page(page, order);
93318 kernel_map_pages(page, 1 << order, 0);
93319
93320@@ -750,6 +761,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
93321 local_irq_restore(flags);
93322 }
93323
93324+#ifdef CONFIG_PAX_LATENT_ENTROPY
93325+bool __meminitdata extra_latent_entropy;
93326+
93327+static int __init setup_pax_extra_latent_entropy(char *str)
93328+{
93329+ extra_latent_entropy = true;
93330+ return 0;
93331+}
93332+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
93333+
93334+volatile u64 latent_entropy __latent_entropy;
93335+EXPORT_SYMBOL(latent_entropy);
93336+#endif
93337+
93338 void __init __free_pages_bootmem(struct page *page, unsigned int order)
93339 {
93340 unsigned int nr_pages = 1 << order;
93341@@ -765,6 +790,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
93342 __ClearPageReserved(p);
93343 set_page_count(p, 0);
93344
93345+#ifdef CONFIG_PAX_LATENT_ENTROPY
93346+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
93347+ u64 hash = 0;
93348+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
93349+ const u64 *data = lowmem_page_address(page);
93350+
93351+ for (index = 0; index < end; index++)
93352+ hash ^= hash + data[index];
93353+ latent_entropy ^= hash;
93354+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
93355+ }
93356+#endif
93357+
93358 page_zone(page)->managed_pages += nr_pages;
93359 set_page_refcounted(page);
93360 __free_pages(page, order);
93361@@ -870,8 +908,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
93362 arch_alloc_page(page, order);
93363 kernel_map_pages(page, 1 << order, 1);
93364
93365+#ifndef CONFIG_PAX_MEMORY_SANITIZE
93366 if (gfp_flags & __GFP_ZERO)
93367 prep_zero_page(page, order, gfp_flags);
93368+#endif
93369
93370 if (order && (gfp_flags & __GFP_COMP))
93371 prep_compound_page(page, order);
93372diff --git a/mm/page_io.c b/mm/page_io.c
93373index 8c79a47..a689e0d 100644
93374--- a/mm/page_io.c
93375+++ b/mm/page_io.c
93376@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
93377 struct file *swap_file = sis->swap_file;
93378 struct address_space *mapping = swap_file->f_mapping;
93379 struct iovec iov = {
93380- .iov_base = kmap(page),
93381+ .iov_base = (void __force_user *)kmap(page),
93382 .iov_len = PAGE_SIZE,
93383 };
93384
93385diff --git a/mm/percpu.c b/mm/percpu.c
93386index 0d10def..6dc822d 100644
93387--- a/mm/percpu.c
93388+++ b/mm/percpu.c
93389@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
93390 static unsigned int pcpu_high_unit_cpu __read_mostly;
93391
93392 /* the address of the first chunk which starts with the kernel static area */
93393-void *pcpu_base_addr __read_mostly;
93394+void *pcpu_base_addr __read_only;
93395 EXPORT_SYMBOL_GPL(pcpu_base_addr);
93396
93397 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
93398diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
93399index fd26d04..0cea1b0 100644
93400--- a/mm/process_vm_access.c
93401+++ b/mm/process_vm_access.c
93402@@ -13,6 +13,7 @@
93403 #include <linux/uio.h>
93404 #include <linux/sched.h>
93405 #include <linux/highmem.h>
93406+#include <linux/security.h>
93407 #include <linux/ptrace.h>
93408 #include <linux/slab.h>
93409 #include <linux/syscalls.h>
93410@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
93411 size_t iov_l_curr_offset = 0;
93412 ssize_t iov_len;
93413
93414+ return -ENOSYS; // PaX: until properly audited
93415+
93416 /*
93417 * Work out how many pages of struct pages we're going to need
93418 * when eventually calling get_user_pages
93419 */
93420 for (i = 0; i < riovcnt; i++) {
93421 iov_len = rvec[i].iov_len;
93422- if (iov_len > 0) {
93423- nr_pages_iov = ((unsigned long)rvec[i].iov_base
93424- + iov_len)
93425- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
93426- / PAGE_SIZE + 1;
93427- nr_pages = max(nr_pages, nr_pages_iov);
93428- }
93429+ if (iov_len <= 0)
93430+ continue;
93431+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
93432+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
93433+ nr_pages = max(nr_pages, nr_pages_iov);
93434 }
93435
93436 if (nr_pages == 0)
93437@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
93438 goto free_proc_pages;
93439 }
93440
93441+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
93442+ rc = -EPERM;
93443+ goto put_task_struct;
93444+ }
93445+
93446 mm = mm_access(task, PTRACE_MODE_ATTACH);
93447 if (!mm || IS_ERR(mm)) {
93448 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
93449diff --git a/mm/rmap.c b/mm/rmap.c
93450index 068522d..f539f21 100644
93451--- a/mm/rmap.c
93452+++ b/mm/rmap.c
93453@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93454 struct anon_vma *anon_vma = vma->anon_vma;
93455 struct anon_vma_chain *avc;
93456
93457+#ifdef CONFIG_PAX_SEGMEXEC
93458+ struct anon_vma_chain *avc_m = NULL;
93459+#endif
93460+
93461 might_sleep();
93462 if (unlikely(!anon_vma)) {
93463 struct mm_struct *mm = vma->vm_mm;
93464@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93465 if (!avc)
93466 goto out_enomem;
93467
93468+#ifdef CONFIG_PAX_SEGMEXEC
93469+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
93470+ if (!avc_m)
93471+ goto out_enomem_free_avc;
93472+#endif
93473+
93474 anon_vma = find_mergeable_anon_vma(vma);
93475 allocated = NULL;
93476 if (!anon_vma) {
93477@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93478 /* page_table_lock to protect against threads */
93479 spin_lock(&mm->page_table_lock);
93480 if (likely(!vma->anon_vma)) {
93481+
93482+#ifdef CONFIG_PAX_SEGMEXEC
93483+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
93484+
93485+ if (vma_m) {
93486+ BUG_ON(vma_m->anon_vma);
93487+ vma_m->anon_vma = anon_vma;
93488+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
93489+ avc_m = NULL;
93490+ }
93491+#endif
93492+
93493 vma->anon_vma = anon_vma;
93494 anon_vma_chain_link(vma, avc, anon_vma);
93495 allocated = NULL;
93496@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93497
93498 if (unlikely(allocated))
93499 put_anon_vma(allocated);
93500+
93501+#ifdef CONFIG_PAX_SEGMEXEC
93502+ if (unlikely(avc_m))
93503+ anon_vma_chain_free(avc_m);
93504+#endif
93505+
93506 if (unlikely(avc))
93507 anon_vma_chain_free(avc);
93508 }
93509 return 0;
93510
93511 out_enomem_free_avc:
93512+
93513+#ifdef CONFIG_PAX_SEGMEXEC
93514+ if (avc_m)
93515+ anon_vma_chain_free(avc_m);
93516+#endif
93517+
93518 anon_vma_chain_free(avc);
93519 out_enomem:
93520 return -ENOMEM;
93521@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
93522 * Attach the anon_vmas from src to dst.
93523 * Returns 0 on success, -ENOMEM on failure.
93524 */
93525-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
93526+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
93527 {
93528 struct anon_vma_chain *avc, *pavc;
93529 struct anon_vma *root = NULL;
93530@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
93531 * the corresponding VMA in the parent process is attached to.
93532 * Returns 0 on success, non-zero on failure.
93533 */
93534-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
93535+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
93536 {
93537 struct anon_vma_chain *avc;
93538 struct anon_vma *anon_vma;
93539@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
93540 void __init anon_vma_init(void)
93541 {
93542 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
93543- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
93544- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
93545+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
93546+ anon_vma_ctor);
93547+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
93548+ SLAB_PANIC|SLAB_NO_SANITIZE);
93549 }
93550
93551 /*
93552diff --git a/mm/shmem.c b/mm/shmem.c
93553index 902a148..58f9d59 100644
93554--- a/mm/shmem.c
93555+++ b/mm/shmem.c
93556@@ -33,7 +33,7 @@
93557 #include <linux/swap.h>
93558 #include <linux/aio.h>
93559
93560-static struct vfsmount *shm_mnt;
93561+struct vfsmount *shm_mnt;
93562
93563 #ifdef CONFIG_SHMEM
93564 /*
93565@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
93566 #define BOGO_DIRENT_SIZE 20
93567
93568 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
93569-#define SHORT_SYMLINK_LEN 128
93570+#define SHORT_SYMLINK_LEN 64
93571
93572 /*
93573 * shmem_fallocate and shmem_writepage communicate via inode->i_private
93574@@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
93575 static int shmem_xattr_validate(const char *name)
93576 {
93577 struct { const char *prefix; size_t len; } arr[] = {
93578+
93579+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
93580+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
93581+#endif
93582+
93583 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
93584 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
93585 };
93586@@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
93587 if (err)
93588 return err;
93589
93590+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
93591+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
93592+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
93593+ return -EOPNOTSUPP;
93594+ if (size > 8)
93595+ return -EINVAL;
93596+ }
93597+#endif
93598+
93599 return simple_xattr_set(&info->xattrs, name, value, size, flags);
93600 }
93601
93602@@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
93603 int err = -ENOMEM;
93604
93605 /* Round up to L1_CACHE_BYTES to resist false sharing */
93606- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
93607- L1_CACHE_BYTES), GFP_KERNEL);
93608+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
93609 if (!sbinfo)
93610 return -ENOMEM;
93611
93612diff --git a/mm/slab.c b/mm/slab.c
93613index eb043bf..d82f5a8 100644
93614--- a/mm/slab.c
93615+++ b/mm/slab.c
93616@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
93617 if ((x)->max_freeable < i) \
93618 (x)->max_freeable = i; \
93619 } while (0)
93620-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
93621-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
93622-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
93623-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
93624+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
93625+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
93626+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
93627+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
93628+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
93629+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
93630 #else
93631 #define STATS_INC_ACTIVE(x) do { } while (0)
93632 #define STATS_DEC_ACTIVE(x) do { } while (0)
93633@@ -320,6 +322,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
93634 #define STATS_INC_ALLOCMISS(x) do { } while (0)
93635 #define STATS_INC_FREEHIT(x) do { } while (0)
93636 #define STATS_INC_FREEMISS(x) do { } while (0)
93637+#define STATS_INC_SANITIZED(x) do { } while (0)
93638+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
93639 #endif
93640
93641 #if DEBUG
93642@@ -403,7 +407,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
93643 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
93644 */
93645 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
93646- const struct page *page, void *obj)
93647+ const struct page *page, const void *obj)
93648 {
93649 u32 offset = (obj - page->s_mem);
93650 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
93651@@ -1489,12 +1493,12 @@ void __init kmem_cache_init(void)
93652 */
93653
93654 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
93655- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
93656+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
93657
93658 if (INDEX_AC != INDEX_NODE)
93659 kmalloc_caches[INDEX_NODE] =
93660 create_kmalloc_cache("kmalloc-node",
93661- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
93662+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
93663
93664 slab_early_init = 0;
93665
93666@@ -3428,6 +3432,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
93667 struct array_cache *ac = cpu_cache_get(cachep);
93668
93669 check_irq_off();
93670+
93671+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93672+ if (pax_sanitize_slab) {
93673+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
93674+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
93675+
93676+ if (cachep->ctor)
93677+ cachep->ctor(objp);
93678+
93679+ STATS_INC_SANITIZED(cachep);
93680+ } else
93681+ STATS_INC_NOT_SANITIZED(cachep);
93682+ }
93683+#endif
93684+
93685 kmemleak_free_recursive(objp, cachep->flags);
93686 objp = cache_free_debugcheck(cachep, objp, caller);
93687
93688@@ -3656,6 +3675,7 @@ void kfree(const void *objp)
93689
93690 if (unlikely(ZERO_OR_NULL_PTR(objp)))
93691 return;
93692+ VM_BUG_ON(!virt_addr_valid(objp));
93693 local_irq_save(flags);
93694 kfree_debugcheck(objp);
93695 c = virt_to_cache(objp);
93696@@ -4097,14 +4117,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
93697 }
93698 /* cpu stats */
93699 {
93700- unsigned long allochit = atomic_read(&cachep->allochit);
93701- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
93702- unsigned long freehit = atomic_read(&cachep->freehit);
93703- unsigned long freemiss = atomic_read(&cachep->freemiss);
93704+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
93705+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
93706+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
93707+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
93708
93709 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
93710 allochit, allocmiss, freehit, freemiss);
93711 }
93712+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93713+ {
93714+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
93715+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
93716+
93717+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
93718+ }
93719+#endif
93720 #endif
93721 }
93722
93723@@ -4334,13 +4362,69 @@ static const struct file_operations proc_slabstats_operations = {
93724 static int __init slab_proc_init(void)
93725 {
93726 #ifdef CONFIG_DEBUG_SLAB_LEAK
93727- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
93728+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
93729 #endif
93730 return 0;
93731 }
93732 module_init(slab_proc_init);
93733 #endif
93734
93735+bool is_usercopy_object(const void *ptr)
93736+{
93737+ struct page *page;
93738+ struct kmem_cache *cachep;
93739+
93740+ if (ZERO_OR_NULL_PTR(ptr))
93741+ return false;
93742+
93743+ if (!slab_is_available())
93744+ return false;
93745+
93746+ if (!virt_addr_valid(ptr))
93747+ return false;
93748+
93749+ page = virt_to_head_page(ptr);
93750+
93751+ if (!PageSlab(page))
93752+ return false;
93753+
93754+ cachep = page->slab_cache;
93755+ return cachep->flags & SLAB_USERCOPY;
93756+}
93757+
93758+#ifdef CONFIG_PAX_USERCOPY
93759+const char *check_heap_object(const void *ptr, unsigned long n)
93760+{
93761+ struct page *page;
93762+ struct kmem_cache *cachep;
93763+ unsigned int objnr;
93764+ unsigned long offset;
93765+
93766+ if (ZERO_OR_NULL_PTR(ptr))
93767+ return "<null>";
93768+
93769+ if (!virt_addr_valid(ptr))
93770+ return NULL;
93771+
93772+ page = virt_to_head_page(ptr);
93773+
93774+ if (!PageSlab(page))
93775+ return NULL;
93776+
93777+ cachep = page->slab_cache;
93778+ if (!(cachep->flags & SLAB_USERCOPY))
93779+ return cachep->name;
93780+
93781+ objnr = obj_to_index(cachep, page, ptr);
93782+ BUG_ON(objnr >= cachep->num);
93783+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
93784+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
93785+ return NULL;
93786+
93787+ return cachep->name;
93788+}
93789+#endif
93790+
93791 /**
93792 * ksize - get the actual amount of memory allocated for a given object
93793 * @objp: Pointer to the object
93794diff --git a/mm/slab.h b/mm/slab.h
93795index 0859c42..2f7b737 100644
93796--- a/mm/slab.h
93797+++ b/mm/slab.h
93798@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
93799 /* The slab cache that manages slab cache information */
93800 extern struct kmem_cache *kmem_cache;
93801
93802+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93803+#ifdef CONFIG_X86_64
93804+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
93805+#else
93806+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
93807+#endif
93808+extern bool pax_sanitize_slab;
93809+#endif
93810+
93811 unsigned long calculate_alignment(unsigned long flags,
93812 unsigned long align, unsigned long size);
93813
93814@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
93815
93816 /* Legal flag mask for kmem_cache_create(), for various configurations */
93817 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
93818- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
93819+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
93820+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
93821
93822 #if defined(CONFIG_DEBUG_SLAB)
93823 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
93824@@ -233,6 +243,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
93825 return s;
93826
93827 page = virt_to_head_page(x);
93828+
93829+ BUG_ON(!PageSlab(page));
93830+
93831 cachep = page->slab_cache;
93832 if (slab_equal_or_root(cachep, s))
93833 return cachep;
93834diff --git a/mm/slab_common.c b/mm/slab_common.c
93835index 0b7bb39..334c328 100644
93836--- a/mm/slab_common.c
93837+++ b/mm/slab_common.c
93838@@ -23,11 +23,22 @@
93839
93840 #include "slab.h"
93841
93842-enum slab_state slab_state;
93843+enum slab_state slab_state __read_only;
93844 LIST_HEAD(slab_caches);
93845 DEFINE_MUTEX(slab_mutex);
93846 struct kmem_cache *kmem_cache;
93847
93848+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93849+bool pax_sanitize_slab __read_only = true;
93850+static int __init pax_sanitize_slab_setup(char *str)
93851+{
93852+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
93853+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
93854+ return 1;
93855+}
93856+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
93857+#endif
93858+
93859 #ifdef CONFIG_DEBUG_VM
93860 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
93861 size_t size)
93862@@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
93863
93864 err = __kmem_cache_create(s, flags);
93865 if (!err) {
93866- s->refcount = 1;
93867+ atomic_set(&s->refcount, 1);
93868 list_add(&s->list, &slab_caches);
93869 memcg_cache_list_add(memcg, s);
93870 } else {
93871@@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
93872
93873 get_online_cpus();
93874 mutex_lock(&slab_mutex);
93875- s->refcount--;
93876- if (!s->refcount) {
93877+ if (atomic_dec_and_test(&s->refcount)) {
93878 list_del(&s->list);
93879
93880 if (!__kmem_cache_shutdown(s)) {
93881@@ -305,7 +315,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
93882 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
93883 name, size, err);
93884
93885- s->refcount = -1; /* Exempt from merging for now */
93886+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
93887 }
93888
93889 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
93890@@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
93891
93892 create_boot_cache(s, name, size, flags);
93893 list_add(&s->list, &slab_caches);
93894- s->refcount = 1;
93895+ atomic_set(&s->refcount, 1);
93896 return s;
93897 }
93898
93899@@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93900 EXPORT_SYMBOL(kmalloc_dma_caches);
93901 #endif
93902
93903+#ifdef CONFIG_PAX_USERCOPY_SLABS
93904+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
93905+EXPORT_SYMBOL(kmalloc_usercopy_caches);
93906+#endif
93907+
93908 /*
93909 * Conversion table for small slabs sizes / 8 to the index in the
93910 * kmalloc array. This is necessary for slabs < 192 since we have non power
93911@@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
93912 return kmalloc_dma_caches[index];
93913
93914 #endif
93915+
93916+#ifdef CONFIG_PAX_USERCOPY_SLABS
93917+ if (unlikely((flags & GFP_USERCOPY)))
93918+ return kmalloc_usercopy_caches[index];
93919+
93920+#endif
93921+
93922 return kmalloc_caches[index];
93923 }
93924
93925@@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsigned long flags)
93926 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
93927 if (!kmalloc_caches[i]) {
93928 kmalloc_caches[i] = create_kmalloc_cache(NULL,
93929- 1 << i, flags);
93930+ 1 << i, SLAB_USERCOPY | flags);
93931 }
93932
93933 /*
93934@@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsigned long flags)
93935 * earlier power of two caches
93936 */
93937 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
93938- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
93939+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
93940
93941 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
93942- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
93943+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
93944 }
93945
93946 /* Kmalloc array is now usable */
93947@@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsigned long flags)
93948 }
93949 }
93950 #endif
93951+
93952+#ifdef CONFIG_PAX_USERCOPY_SLABS
93953+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
93954+ struct kmem_cache *s = kmalloc_caches[i];
93955+
93956+ if (s) {
93957+ int size = kmalloc_size(i);
93958+ char *n = kasprintf(GFP_NOWAIT,
93959+ "usercopy-kmalloc-%d", size);
93960+
93961+ BUG_ON(!n);
93962+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
93963+ size, SLAB_USERCOPY | flags);
93964+ }
93965+ }
93966+#endif
93967+
93968 }
93969 #endif /* !CONFIG_SLOB */
93970
93971@@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_file *m)
93972 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
93973 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
93974 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
93975+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93976+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
93977+#endif
93978 #endif
93979 seq_putc(m, '\n');
93980 }
93981diff --git a/mm/slob.c b/mm/slob.c
93982index 4bf8809..98a6914 100644
93983--- a/mm/slob.c
93984+++ b/mm/slob.c
93985@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
93986 /*
93987 * Return the size of a slob block.
93988 */
93989-static slobidx_t slob_units(slob_t *s)
93990+static slobidx_t slob_units(const slob_t *s)
93991 {
93992 if (s->units > 0)
93993 return s->units;
93994@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
93995 /*
93996 * Return the next free slob block pointer after this one.
93997 */
93998-static slob_t *slob_next(slob_t *s)
93999+static slob_t *slob_next(const slob_t *s)
94000 {
94001 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
94002 slobidx_t next;
94003@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
94004 /*
94005 * Returns true if s is the last free block in its page.
94006 */
94007-static int slob_last(slob_t *s)
94008+static int slob_last(const slob_t *s)
94009 {
94010 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
94011 }
94012
94013-static void *slob_new_pages(gfp_t gfp, int order, int node)
94014+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
94015 {
94016- void *page;
94017+ struct page *page;
94018
94019 #ifdef CONFIG_NUMA
94020 if (node != NUMA_NO_NODE)
94021@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
94022 if (!page)
94023 return NULL;
94024
94025- return page_address(page);
94026+ __SetPageSlab(page);
94027+ return page;
94028 }
94029
94030-static void slob_free_pages(void *b, int order)
94031+static void slob_free_pages(struct page *sp, int order)
94032 {
94033 if (current->reclaim_state)
94034 current->reclaim_state->reclaimed_slab += 1 << order;
94035- free_pages((unsigned long)b, order);
94036+ __ClearPageSlab(sp);
94037+ page_mapcount_reset(sp);
94038+ sp->private = 0;
94039+ __free_pages(sp, order);
94040 }
94041
94042 /*
94043@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
94044
94045 /* Not enough space: must allocate a new page */
94046 if (!b) {
94047- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
94048- if (!b)
94049+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
94050+ if (!sp)
94051 return NULL;
94052- sp = virt_to_page(b);
94053- __SetPageSlab(sp);
94054+ b = page_address(sp);
94055
94056 spin_lock_irqsave(&slob_lock, flags);
94057 sp->units = SLOB_UNITS(PAGE_SIZE);
94058 sp->freelist = b;
94059+ sp->private = 0;
94060 INIT_LIST_HEAD(&sp->list);
94061 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
94062 set_slob_page_free(sp, slob_list);
94063@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
94064 if (slob_page_free(sp))
94065 clear_slob_page_free(sp);
94066 spin_unlock_irqrestore(&slob_lock, flags);
94067- __ClearPageSlab(sp);
94068- page_mapcount_reset(sp);
94069- slob_free_pages(b, 0);
94070+ slob_free_pages(sp, 0);
94071 return;
94072 }
94073
94074+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94075+ if (pax_sanitize_slab)
94076+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
94077+#endif
94078+
94079 if (!slob_page_free(sp)) {
94080 /* This slob page is about to become partially free. Easy! */
94081 sp->units = units;
94082@@ -424,11 +431,10 @@ out:
94083 */
94084
94085 static __always_inline void *
94086-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94087+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
94088 {
94089- unsigned int *m;
94090- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94091- void *ret;
94092+ slob_t *m;
94093+ void *ret = NULL;
94094
94095 gfp &= gfp_allowed_mask;
94096
94097@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94098
94099 if (!m)
94100 return NULL;
94101- *m = size;
94102+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
94103+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
94104+ m[0].units = size;
94105+ m[1].units = align;
94106 ret = (void *)m + align;
94107
94108 trace_kmalloc_node(caller, ret,
94109 size, size + align, gfp, node);
94110 } else {
94111 unsigned int order = get_order(size);
94112+ struct page *page;
94113
94114 if (likely(order))
94115 gfp |= __GFP_COMP;
94116- ret = slob_new_pages(gfp, order, node);
94117+ page = slob_new_pages(gfp, order, node);
94118+ if (page) {
94119+ ret = page_address(page);
94120+ page->private = size;
94121+ }
94122
94123 trace_kmalloc_node(caller, ret,
94124 size, PAGE_SIZE << order, gfp, node);
94125 }
94126
94127- kmemleak_alloc(ret, size, 1, gfp);
94128+ return ret;
94129+}
94130+
94131+static __always_inline void *
94132+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94133+{
94134+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94135+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
94136+
94137+ if (!ZERO_OR_NULL_PTR(ret))
94138+ kmemleak_alloc(ret, size, 1, gfp);
94139 return ret;
94140 }
94141
94142@@ -493,34 +517,112 @@ void kfree(const void *block)
94143 return;
94144 kmemleak_free(block);
94145
94146+ VM_BUG_ON(!virt_addr_valid(block));
94147 sp = virt_to_page(block);
94148- if (PageSlab(sp)) {
94149+ VM_BUG_ON(!PageSlab(sp));
94150+ if (!sp->private) {
94151 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94152- unsigned int *m = (unsigned int *)(block - align);
94153- slob_free(m, *m + align);
94154- } else
94155+ slob_t *m = (slob_t *)(block - align);
94156+ slob_free(m, m[0].units + align);
94157+ } else {
94158+ __ClearPageSlab(sp);
94159+ page_mapcount_reset(sp);
94160+ sp->private = 0;
94161 __free_pages(sp, compound_order(sp));
94162+ }
94163 }
94164 EXPORT_SYMBOL(kfree);
94165
94166+bool is_usercopy_object(const void *ptr)
94167+{
94168+ if (!slab_is_available())
94169+ return false;
94170+
94171+ // PAX: TODO
94172+
94173+ return false;
94174+}
94175+
94176+#ifdef CONFIG_PAX_USERCOPY
94177+const char *check_heap_object(const void *ptr, unsigned long n)
94178+{
94179+ struct page *page;
94180+ const slob_t *free;
94181+ const void *base;
94182+ unsigned long flags;
94183+
94184+ if (ZERO_OR_NULL_PTR(ptr))
94185+ return "<null>";
94186+
94187+ if (!virt_addr_valid(ptr))
94188+ return NULL;
94189+
94190+ page = virt_to_head_page(ptr);
94191+ if (!PageSlab(page))
94192+ return NULL;
94193+
94194+ if (page->private) {
94195+ base = page;
94196+ if (base <= ptr && n <= page->private - (ptr - base))
94197+ return NULL;
94198+ return "<slob>";
94199+ }
94200+
94201+ /* some tricky double walking to find the chunk */
94202+ spin_lock_irqsave(&slob_lock, flags);
94203+ base = (void *)((unsigned long)ptr & PAGE_MASK);
94204+ free = page->freelist;
94205+
94206+ while (!slob_last(free) && (void *)free <= ptr) {
94207+ base = free + slob_units(free);
94208+ free = slob_next(free);
94209+ }
94210+
94211+ while (base < (void *)free) {
94212+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
94213+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
94214+ int offset;
94215+
94216+ if (ptr < base + align)
94217+ break;
94218+
94219+ offset = ptr - base - align;
94220+ if (offset >= m) {
94221+ base += size;
94222+ continue;
94223+ }
94224+
94225+ if (n > m - offset)
94226+ break;
94227+
94228+ spin_unlock_irqrestore(&slob_lock, flags);
94229+ return NULL;
94230+ }
94231+
94232+ spin_unlock_irqrestore(&slob_lock, flags);
94233+ return "<slob>";
94234+}
94235+#endif
94236+
94237 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
94238 size_t ksize(const void *block)
94239 {
94240 struct page *sp;
94241 int align;
94242- unsigned int *m;
94243+ slob_t *m;
94244
94245 BUG_ON(!block);
94246 if (unlikely(block == ZERO_SIZE_PTR))
94247 return 0;
94248
94249 sp = virt_to_page(block);
94250- if (unlikely(!PageSlab(sp)))
94251- return PAGE_SIZE << compound_order(sp);
94252+ VM_BUG_ON(!PageSlab(sp));
94253+ if (sp->private)
94254+ return sp->private;
94255
94256 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94257- m = (unsigned int *)(block - align);
94258- return SLOB_UNITS(*m) * SLOB_UNIT;
94259+ m = (slob_t *)(block - align);
94260+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
94261 }
94262 EXPORT_SYMBOL(ksize);
94263
94264@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
94265
94266 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
94267 {
94268- void *b;
94269+ void *b = NULL;
94270
94271 flags &= gfp_allowed_mask;
94272
94273 lockdep_trace_alloc(flags);
94274
94275+#ifdef CONFIG_PAX_USERCOPY_SLABS
94276+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
94277+#else
94278 if (c->size < PAGE_SIZE) {
94279 b = slob_alloc(c->size, flags, c->align, node);
94280 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
94281 SLOB_UNITS(c->size) * SLOB_UNIT,
94282 flags, node);
94283 } else {
94284- b = slob_new_pages(flags, get_order(c->size), node);
94285+ struct page *sp;
94286+
94287+ sp = slob_new_pages(flags, get_order(c->size), node);
94288+ if (sp) {
94289+ b = page_address(sp);
94290+ sp->private = c->size;
94291+ }
94292 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
94293 PAGE_SIZE << get_order(c->size),
94294 flags, node);
94295 }
94296+#endif
94297
94298 if (b && c->ctor)
94299 c->ctor(b);
94300@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
94301
94302 static void __kmem_cache_free(void *b, int size)
94303 {
94304- if (size < PAGE_SIZE)
94305+ struct page *sp;
94306+
94307+ sp = virt_to_page(b);
94308+ BUG_ON(!PageSlab(sp));
94309+ if (!sp->private)
94310 slob_free(b, size);
94311 else
94312- slob_free_pages(b, get_order(size));
94313+ slob_free_pages(sp, get_order(size));
94314 }
94315
94316 static void kmem_rcu_free(struct rcu_head *head)
94317@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
94318
94319 void kmem_cache_free(struct kmem_cache *c, void *b)
94320 {
94321+ int size = c->size;
94322+
94323+#ifdef CONFIG_PAX_USERCOPY_SLABS
94324+ if (size + c->align < PAGE_SIZE) {
94325+ size += c->align;
94326+ b -= c->align;
94327+ }
94328+#endif
94329+
94330 kmemleak_free_recursive(b, c->flags);
94331 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
94332 struct slob_rcu *slob_rcu;
94333- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
94334- slob_rcu->size = c->size;
94335+ slob_rcu = b + (size - sizeof(struct slob_rcu));
94336+ slob_rcu->size = size;
94337 call_rcu(&slob_rcu->head, kmem_rcu_free);
94338 } else {
94339- __kmem_cache_free(b, c->size);
94340+ __kmem_cache_free(b, size);
94341 }
94342
94343+#ifdef CONFIG_PAX_USERCOPY_SLABS
94344+ trace_kfree(_RET_IP_, b);
94345+#else
94346 trace_kmem_cache_free(_RET_IP_, b);
94347+#endif
94348+
94349 }
94350 EXPORT_SYMBOL(kmem_cache_free);
94351
94352diff --git a/mm/slub.c b/mm/slub.c
94353index 545a170..a086226 100644
94354--- a/mm/slub.c
94355+++ b/mm/slub.c
94356@@ -207,7 +207,7 @@ struct track {
94357
94358 enum track_item { TRACK_ALLOC, TRACK_FREE };
94359
94360-#ifdef CONFIG_SYSFS
94361+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94362 static int sysfs_slab_add(struct kmem_cache *);
94363 static int sysfs_slab_alias(struct kmem_cache *, const char *);
94364 static void sysfs_slab_remove(struct kmem_cache *);
94365@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
94366 if (!t->addr)
94367 return;
94368
94369- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
94370+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
94371 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
94372 #ifdef CONFIG_STACKTRACE
94373 {
94374@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
94375
94376 slab_free_hook(s, x);
94377
94378+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94379+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
94380+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
94381+ if (s->ctor)
94382+ s->ctor(x);
94383+ }
94384+#endif
94385+
94386 redo:
94387 /*
94388 * Determine the currently cpus per cpu slab.
94389@@ -2710,7 +2718,7 @@ static int slub_min_objects;
94390 * Merge control. If this is set then no merging of slab caches will occur.
94391 * (Could be removed. This was introduced to pacify the merge skeptics.)
94392 */
94393-static int slub_nomerge;
94394+static int slub_nomerge = 1;
94395
94396 /*
94397 * Calculate the order of allocation given an slab object size.
94398@@ -2987,6 +2995,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
94399 s->inuse = size;
94400
94401 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
94402+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94403+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
94404+#endif
94405 s->ctor)) {
94406 /*
94407 * Relocate free pointer after the object if it is not
94408@@ -3332,6 +3343,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
94409 EXPORT_SYMBOL(__kmalloc_node);
94410 #endif
94411
94412+bool is_usercopy_object(const void *ptr)
94413+{
94414+ struct page *page;
94415+ struct kmem_cache *s;
94416+
94417+ if (ZERO_OR_NULL_PTR(ptr))
94418+ return false;
94419+
94420+ if (!slab_is_available())
94421+ return false;
94422+
94423+ if (!virt_addr_valid(ptr))
94424+ return false;
94425+
94426+ page = virt_to_head_page(ptr);
94427+
94428+ if (!PageSlab(page))
94429+ return false;
94430+
94431+ s = page->slab_cache;
94432+ return s->flags & SLAB_USERCOPY;
94433+}
94434+
94435+#ifdef CONFIG_PAX_USERCOPY
94436+const char *check_heap_object(const void *ptr, unsigned long n)
94437+{
94438+ struct page *page;
94439+ struct kmem_cache *s;
94440+ unsigned long offset;
94441+
94442+ if (ZERO_OR_NULL_PTR(ptr))
94443+ return "<null>";
94444+
94445+ if (!virt_addr_valid(ptr))
94446+ return NULL;
94447+
94448+ page = virt_to_head_page(ptr);
94449+
94450+ if (!PageSlab(page))
94451+ return NULL;
94452+
94453+ s = page->slab_cache;
94454+ if (!(s->flags & SLAB_USERCOPY))
94455+ return s->name;
94456+
94457+ offset = (ptr - page_address(page)) % s->size;
94458+ if (offset <= s->object_size && n <= s->object_size - offset)
94459+ return NULL;
94460+
94461+ return s->name;
94462+}
94463+#endif
94464+
94465 size_t ksize(const void *object)
94466 {
94467 struct page *page;
94468@@ -3360,6 +3424,7 @@ void kfree(const void *x)
94469 if (unlikely(ZERO_OR_NULL_PTR(x)))
94470 return;
94471
94472+ VM_BUG_ON(!virt_addr_valid(x));
94473 page = virt_to_head_page(x);
94474 if (unlikely(!PageSlab(page))) {
94475 BUG_ON(!PageCompound(page));
94476@@ -3665,7 +3730,7 @@ static int slab_unmergeable(struct kmem_cache *s)
94477 /*
94478 * We may have set a slab to be unmergeable during bootstrap.
94479 */
94480- if (s->refcount < 0)
94481+ if (atomic_read(&s->refcount) < 0)
94482 return 1;
94483
94484 return 0;
94485@@ -3723,7 +3788,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
94486
94487 s = find_mergeable(memcg, size, align, flags, name, ctor);
94488 if (s) {
94489- s->refcount++;
94490+ atomic_inc(&s->refcount);
94491 /*
94492 * Adjust the object sizes so that we clear
94493 * the complete object on kzalloc.
94494@@ -3732,7 +3797,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
94495 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
94496
94497 if (sysfs_slab_alias(s, name)) {
94498- s->refcount--;
94499+ atomic_dec(&s->refcount);
94500 s = NULL;
94501 }
94502 }
94503@@ -3852,7 +3917,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
94504 }
94505 #endif
94506
94507-#ifdef CONFIG_SYSFS
94508+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94509 static int count_inuse(struct page *page)
94510 {
94511 return page->inuse;
94512@@ -4241,12 +4306,12 @@ static void resiliency_test(void)
94513 validate_slab_cache(kmalloc_caches[9]);
94514 }
94515 #else
94516-#ifdef CONFIG_SYSFS
94517+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94518 static void resiliency_test(void) {};
94519 #endif
94520 #endif
94521
94522-#ifdef CONFIG_SYSFS
94523+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94524 enum slab_stat_type {
94525 SL_ALL, /* All slabs */
94526 SL_PARTIAL, /* Only partially allocated slabs */
94527@@ -4486,7 +4551,7 @@ SLAB_ATTR_RO(ctor);
94528
94529 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
94530 {
94531- return sprintf(buf, "%d\n", s->refcount - 1);
94532+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
94533 }
94534 SLAB_ATTR_RO(aliases);
94535
94536@@ -4574,6 +4639,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
94537 SLAB_ATTR_RO(cache_dma);
94538 #endif
94539
94540+#ifdef CONFIG_PAX_USERCOPY_SLABS
94541+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
94542+{
94543+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
94544+}
94545+SLAB_ATTR_RO(usercopy);
94546+#endif
94547+
94548 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
94549 {
94550 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
94551@@ -4908,6 +4981,9 @@ static struct attribute *slab_attrs[] = {
94552 #ifdef CONFIG_ZONE_DMA
94553 &cache_dma_attr.attr,
94554 #endif
94555+#ifdef CONFIG_PAX_USERCOPY_SLABS
94556+ &usercopy_attr.attr,
94557+#endif
94558 #ifdef CONFIG_NUMA
94559 &remote_node_defrag_ratio_attr.attr,
94560 #endif
94561@@ -5140,6 +5216,7 @@ static char *create_unique_id(struct kmem_cache *s)
94562 return name;
94563 }
94564
94565+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94566 static int sysfs_slab_add(struct kmem_cache *s)
94567 {
94568 int err;
94569@@ -5163,7 +5240,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
94570 }
94571
94572 s->kobj.kset = slab_kset;
94573- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
94574+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
94575 if (err) {
94576 kobject_put(&s->kobj);
94577 return err;
94578@@ -5197,6 +5274,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
94579 kobject_del(&s->kobj);
94580 kobject_put(&s->kobj);
94581 }
94582+#endif
94583
94584 /*
94585 * Need to buffer aliases during bootup until sysfs becomes
94586@@ -5210,6 +5288,7 @@ struct saved_alias {
94587
94588 static struct saved_alias *alias_list;
94589
94590+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94591 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
94592 {
94593 struct saved_alias *al;
94594@@ -5232,6 +5311,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
94595 alias_list = al;
94596 return 0;
94597 }
94598+#endif
94599
94600 static int __init slab_sysfs_init(void)
94601 {
94602diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
94603index 27eeab3..7c3f7f2 100644
94604--- a/mm/sparse-vmemmap.c
94605+++ b/mm/sparse-vmemmap.c
94606@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
94607 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
94608 if (!p)
94609 return NULL;
94610- pud_populate(&init_mm, pud, p);
94611+ pud_populate_kernel(&init_mm, pud, p);
94612 }
94613 return pud;
94614 }
94615@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
94616 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
94617 if (!p)
94618 return NULL;
94619- pgd_populate(&init_mm, pgd, p);
94620+ pgd_populate_kernel(&init_mm, pgd, p);
94621 }
94622 return pgd;
94623 }
94624diff --git a/mm/sparse.c b/mm/sparse.c
94625index 8cc7be0..d0f7d7a 100644
94626--- a/mm/sparse.c
94627+++ b/mm/sparse.c
94628@@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
94629
94630 for (i = 0; i < PAGES_PER_SECTION; i++) {
94631 if (PageHWPoison(&memmap[i])) {
94632- atomic_long_sub(1, &num_poisoned_pages);
94633+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
94634 ClearPageHWPoison(&memmap[i]);
94635 }
94636 }
94637diff --git a/mm/swap.c b/mm/swap.c
94638index 84b26aa..ce39899 100644
94639--- a/mm/swap.c
94640+++ b/mm/swap.c
94641@@ -77,6 +77,8 @@ static void __put_compound_page(struct page *page)
94642
94643 __page_cache_release(page);
94644 dtor = get_compound_page_dtor(page);
94645+ if (!PageHuge(page))
94646+ BUG_ON(dtor != free_compound_page);
94647 (*dtor)(page);
94648 }
94649
94650diff --git a/mm/swapfile.c b/mm/swapfile.c
94651index 612a7c9..66b0f5a 100644
94652--- a/mm/swapfile.c
94653+++ b/mm/swapfile.c
94654@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
94655
94656 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
94657 /* Activity counter to indicate that a swapon or swapoff has occurred */
94658-static atomic_t proc_poll_event = ATOMIC_INIT(0);
94659+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
94660
94661 static inline unsigned char swap_count(unsigned char ent)
94662 {
94663@@ -1949,7 +1949,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
94664 }
94665 filp_close(swap_file, NULL);
94666 err = 0;
94667- atomic_inc(&proc_poll_event);
94668+ atomic_inc_unchecked(&proc_poll_event);
94669 wake_up_interruptible(&proc_poll_wait);
94670
94671 out_dput:
94672@@ -1966,8 +1966,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
94673
94674 poll_wait(file, &proc_poll_wait, wait);
94675
94676- if (seq->poll_event != atomic_read(&proc_poll_event)) {
94677- seq->poll_event = atomic_read(&proc_poll_event);
94678+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
94679+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
94680 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
94681 }
94682
94683@@ -2065,7 +2065,7 @@ static int swaps_open(struct inode *inode, struct file *file)
94684 return ret;
94685
94686 seq = file->private_data;
94687- seq->poll_event = atomic_read(&proc_poll_event);
94688+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
94689 return 0;
94690 }
94691
94692@@ -2524,7 +2524,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
94693 (frontswap_map) ? "FS" : "");
94694
94695 mutex_unlock(&swapon_mutex);
94696- atomic_inc(&proc_poll_event);
94697+ atomic_inc_unchecked(&proc_poll_event);
94698 wake_up_interruptible(&proc_poll_wait);
94699
94700 if (S_ISREG(inode->i_mode))
94701diff --git a/mm/util.c b/mm/util.c
94702index 808f375..e4764b5 100644
94703--- a/mm/util.c
94704+++ b/mm/util.c
94705@@ -297,6 +297,12 @@ done:
94706 void arch_pick_mmap_layout(struct mm_struct *mm)
94707 {
94708 mm->mmap_base = TASK_UNMAPPED_BASE;
94709+
94710+#ifdef CONFIG_PAX_RANDMMAP
94711+ if (mm->pax_flags & MF_PAX_RANDMMAP)
94712+ mm->mmap_base += mm->delta_mmap;
94713+#endif
94714+
94715 mm->get_unmapped_area = arch_get_unmapped_area;
94716 }
94717 #endif
94718diff --git a/mm/vmalloc.c b/mm/vmalloc.c
94719index 0fdf968..d6686e8 100644
94720--- a/mm/vmalloc.c
94721+++ b/mm/vmalloc.c
94722@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
94723
94724 pte = pte_offset_kernel(pmd, addr);
94725 do {
94726- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
94727- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
94728+
94729+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
94730+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
94731+ BUG_ON(!pte_exec(*pte));
94732+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
94733+ continue;
94734+ }
94735+#endif
94736+
94737+ {
94738+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
94739+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
94740+ }
94741 } while (pte++, addr += PAGE_SIZE, addr != end);
94742 }
94743
94744@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
94745 pte = pte_alloc_kernel(pmd, addr);
94746 if (!pte)
94747 return -ENOMEM;
94748+
94749+ pax_open_kernel();
94750 do {
94751 struct page *page = pages[*nr];
94752
94753- if (WARN_ON(!pte_none(*pte)))
94754+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
94755+ if (pgprot_val(prot) & _PAGE_NX)
94756+#endif
94757+
94758+ if (!pte_none(*pte)) {
94759+ pax_close_kernel();
94760+ WARN_ON(1);
94761 return -EBUSY;
94762- if (WARN_ON(!page))
94763+ }
94764+ if (!page) {
94765+ pax_close_kernel();
94766+ WARN_ON(1);
94767 return -ENOMEM;
94768+ }
94769 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
94770 (*nr)++;
94771 } while (pte++, addr += PAGE_SIZE, addr != end);
94772+ pax_close_kernel();
94773 return 0;
94774 }
94775
94776@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
94777 pmd_t *pmd;
94778 unsigned long next;
94779
94780- pmd = pmd_alloc(&init_mm, pud, addr);
94781+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94782 if (!pmd)
94783 return -ENOMEM;
94784 do {
94785@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
94786 pud_t *pud;
94787 unsigned long next;
94788
94789- pud = pud_alloc(&init_mm, pgd, addr);
94790+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94791 if (!pud)
94792 return -ENOMEM;
94793 do {
94794@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
94795 if (addr >= MODULES_VADDR && addr < MODULES_END)
94796 return 1;
94797 #endif
94798+
94799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
94800+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
94801+ return 1;
94802+#endif
94803+
94804 return is_vmalloc_addr(x);
94805 }
94806
94807@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
94808
94809 if (!pgd_none(*pgd)) {
94810 pud_t *pud = pud_offset(pgd, addr);
94811+#ifdef CONFIG_X86
94812+ if (!pud_large(*pud))
94813+#endif
94814 if (!pud_none(*pud)) {
94815 pmd_t *pmd = pmd_offset(pud, addr);
94816+#ifdef CONFIG_X86
94817+ if (!pmd_large(*pmd))
94818+#endif
94819 if (!pmd_none(*pmd)) {
94820 pte_t *ptep, pte;
94821
94822@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
94823 struct vm_struct *area;
94824
94825 BUG_ON(in_interrupt());
94826+
94827+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94828+ if (flags & VM_KERNEXEC) {
94829+ if (start != VMALLOC_START || end != VMALLOC_END)
94830+ return NULL;
94831+ start = (unsigned long)MODULES_EXEC_VADDR;
94832+ end = (unsigned long)MODULES_EXEC_END;
94833+ }
94834+#endif
94835+
94836 if (flags & VM_IOREMAP)
94837 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
94838
94839@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count,
94840 if (count > totalram_pages)
94841 return NULL;
94842
94843+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94844+ if (!(pgprot_val(prot) & _PAGE_NX))
94845+ flags |= VM_KERNEXEC;
94846+#endif
94847+
94848 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
94849 __builtin_return_address(0));
94850 if (!area)
94851@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
94852 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
94853 goto fail;
94854
94855+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94856+ if (!(pgprot_val(prot) & _PAGE_NX))
94857+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
94858+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
94859+ else
94860+#endif
94861+
94862 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
94863 start, end, node, gfp_mask, caller);
94864 if (!area)
94865@@ -1810,10 +1868,9 @@ EXPORT_SYMBOL(vzalloc_node);
94866 * For tight control over page level allocator and protection flags
94867 * use __vmalloc() instead.
94868 */
94869-
94870 void *vmalloc_exec(unsigned long size)
94871 {
94872- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
94873+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
94874 NUMA_NO_NODE, __builtin_return_address(0));
94875 }
94876
94877@@ -2120,6 +2177,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
94878 {
94879 struct vm_struct *area;
94880
94881+ BUG_ON(vma->vm_mirror);
94882+
94883 size = PAGE_ALIGN(size);
94884
94885 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
94886@@ -2602,7 +2661,11 @@ static int s_show(struct seq_file *m, void *p)
94887 v->addr, v->addr + v->size, v->size);
94888
94889 if (v->caller)
94890+#ifdef CONFIG_GRKERNSEC_HIDESYM
94891+ seq_printf(m, " %pK", v->caller);
94892+#else
94893 seq_printf(m, " %pS", v->caller);
94894+#endif
94895
94896 if (v->nr_pages)
94897 seq_printf(m, " pages=%d", v->nr_pages);
94898diff --git a/mm/vmstat.c b/mm/vmstat.c
94899index 7249614..2639fc7 100644
94900--- a/mm/vmstat.c
94901+++ b/mm/vmstat.c
94902@@ -20,6 +20,7 @@
94903 #include <linux/writeback.h>
94904 #include <linux/compaction.h>
94905 #include <linux/mm_inline.h>
94906+#include <linux/grsecurity.h>
94907
94908 #include "internal.h"
94909
94910@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
94911 *
94912 * vm_stat contains the global counters
94913 */
94914-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
94915+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
94916 EXPORT_SYMBOL(vm_stat);
94917
94918 #ifdef CONFIG_SMP
94919@@ -423,7 +424,7 @@ static inline void fold_diff(int *diff)
94920
94921 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
94922 if (diff[i])
94923- atomic_long_add(diff[i], &vm_stat[i]);
94924+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
94925 }
94926
94927 /*
94928@@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void)
94929 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
94930 if (v) {
94931
94932- atomic_long_add(v, &zone->vm_stat[i]);
94933+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
94934 global_diff[i] += v;
94935 #ifdef CONFIG_NUMA
94936 /* 3 seconds idle till flush */
94937@@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu)
94938
94939 v = p->vm_stat_diff[i];
94940 p->vm_stat_diff[i] = 0;
94941- atomic_long_add(v, &zone->vm_stat[i]);
94942+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
94943 global_diff[i] += v;
94944 }
94945 }
94946@@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
94947 if (pset->vm_stat_diff[i]) {
94948 int v = pset->vm_stat_diff[i];
94949 pset->vm_stat_diff[i] = 0;
94950- atomic_long_add(v, &zone->vm_stat[i]);
94951- atomic_long_add(v, &vm_stat[i]);
94952+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
94953+ atomic_long_add_unchecked(v, &vm_stat[i]);
94954 }
94955 }
94956 #endif
94957@@ -1148,10 +1149,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
94958 stat_items_size += sizeof(struct vm_event_state);
94959 #endif
94960
94961- v = kmalloc(stat_items_size, GFP_KERNEL);
94962+ v = kzalloc(stat_items_size, GFP_KERNEL);
94963 m->private = v;
94964 if (!v)
94965 return ERR_PTR(-ENOMEM);
94966+
94967+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94968+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94969+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
94970+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
94971+ && !in_group_p(grsec_proc_gid)
94972+#endif
94973+ )
94974+ return (unsigned long *)m->private + *pos;
94975+#endif
94976+#endif
94977+
94978 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
94979 v[i] = global_page_state(i);
94980 v += NR_VM_ZONE_STAT_ITEMS;
94981@@ -1300,10 +1313,16 @@ static int __init setup_vmstat(void)
94982 put_online_cpus();
94983 #endif
94984 #ifdef CONFIG_PROC_FS
94985- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
94986- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
94987- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
94988- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
94989+ {
94990+ mode_t gr_mode = S_IRUGO;
94991+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94992+ gr_mode = S_IRUSR;
94993+#endif
94994+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
94995+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
94996+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
94997+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
94998+ }
94999 #endif
95000 return 0;
95001 }
95002diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
95003index b3d17d1..e8e4cdd 100644
95004--- a/net/8021q/vlan.c
95005+++ b/net/8021q/vlan.c
95006@@ -472,7 +472,7 @@ out:
95007 return NOTIFY_DONE;
95008 }
95009
95010-static struct notifier_block vlan_notifier_block __read_mostly = {
95011+static struct notifier_block vlan_notifier_block = {
95012 .notifier_call = vlan_device_event,
95013 };
95014
95015@@ -547,8 +547,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
95016 err = -EPERM;
95017 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
95018 break;
95019- if ((args.u.name_type >= 0) &&
95020- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
95021+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
95022 struct vlan_net *vn;
95023
95024 vn = net_generic(net, vlan_net_id);
95025diff --git a/net/9p/client.c b/net/9p/client.c
95026index ee8fd6b..0469d50 100644
95027--- a/net/9p/client.c
95028+++ b/net/9p/client.c
95029@@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
95030 len - inline_len);
95031 } else {
95032 err = copy_from_user(ename + inline_len,
95033- uidata, len - inline_len);
95034+ (char __force_user *)uidata, len - inline_len);
95035 if (err) {
95036 err = -EFAULT;
95037 goto out_err;
95038@@ -1563,7 +1563,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
95039 kernel_buf = 1;
95040 indata = data;
95041 } else
95042- indata = (__force char *)udata;
95043+ indata = (__force_kernel char *)udata;
95044 /*
95045 * response header len is 11
95046 * PDU Header(7) + IO Size (4)
95047@@ -1638,7 +1638,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
95048 kernel_buf = 1;
95049 odata = data;
95050 } else
95051- odata = (char *)udata;
95052+ odata = (char __force_kernel *)udata;
95053 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
95054 P9_ZC_HDR_SZ, kernel_buf, "dqd",
95055 fid->fid, offset, rsize);
95056diff --git a/net/9p/mod.c b/net/9p/mod.c
95057index 6ab36ae..6f1841b 100644
95058--- a/net/9p/mod.c
95059+++ b/net/9p/mod.c
95060@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
95061 void v9fs_register_trans(struct p9_trans_module *m)
95062 {
95063 spin_lock(&v9fs_trans_lock);
95064- list_add_tail(&m->list, &v9fs_trans_list);
95065+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
95066 spin_unlock(&v9fs_trans_lock);
95067 }
95068 EXPORT_SYMBOL(v9fs_register_trans);
95069@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
95070 void v9fs_unregister_trans(struct p9_trans_module *m)
95071 {
95072 spin_lock(&v9fs_trans_lock);
95073- list_del_init(&m->list);
95074+ pax_list_del_init((struct list_head *)&m->list);
95075 spin_unlock(&v9fs_trans_lock);
95076 }
95077 EXPORT_SYMBOL(v9fs_unregister_trans);
95078diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
95079index 9321a77..ed2f256 100644
95080--- a/net/9p/trans_fd.c
95081+++ b/net/9p/trans_fd.c
95082@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
95083 oldfs = get_fs();
95084 set_fs(get_ds());
95085 /* The cast to a user pointer is valid due to the set_fs() */
95086- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
95087+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
95088 set_fs(oldfs);
95089
95090 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
95091diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
95092index 876fbe8..8bbea9f 100644
95093--- a/net/atm/atm_misc.c
95094+++ b/net/atm/atm_misc.c
95095@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
95096 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
95097 return 1;
95098 atm_return(vcc, truesize);
95099- atomic_inc(&vcc->stats->rx_drop);
95100+ atomic_inc_unchecked(&vcc->stats->rx_drop);
95101 return 0;
95102 }
95103 EXPORT_SYMBOL(atm_charge);
95104@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
95105 }
95106 }
95107 atm_return(vcc, guess);
95108- atomic_inc(&vcc->stats->rx_drop);
95109+ atomic_inc_unchecked(&vcc->stats->rx_drop);
95110 return NULL;
95111 }
95112 EXPORT_SYMBOL(atm_alloc_charge);
95113@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
95114
95115 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
95116 {
95117-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
95118+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
95119 __SONET_ITEMS
95120 #undef __HANDLE_ITEM
95121 }
95122@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
95123
95124 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
95125 {
95126-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
95127+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
95128 __SONET_ITEMS
95129 #undef __HANDLE_ITEM
95130 }
95131diff --git a/net/atm/lec.c b/net/atm/lec.c
95132index f23916b..dd4d26b 100644
95133--- a/net/atm/lec.c
95134+++ b/net/atm/lec.c
95135@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
95136 }
95137
95138 static struct lane2_ops lane2_ops = {
95139- lane2_resolve, /* resolve, spec 3.1.3 */
95140- lane2_associate_req, /* associate_req, spec 3.1.4 */
95141- NULL /* associate indicator, spec 3.1.5 */
95142+ .resolve = lane2_resolve,
95143+ .associate_req = lane2_associate_req,
95144+ .associate_indicator = NULL
95145 };
95146
95147 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
95148diff --git a/net/atm/lec.h b/net/atm/lec.h
95149index 4149db1..f2ab682 100644
95150--- a/net/atm/lec.h
95151+++ b/net/atm/lec.h
95152@@ -48,7 +48,7 @@ struct lane2_ops {
95153 const u8 *tlvs, u32 sizeoftlvs);
95154 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
95155 const u8 *tlvs, u32 sizeoftlvs);
95156-};
95157+} __no_const;
95158
95159 /*
95160 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
95161diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
95162index d1b2d9a..d549f7f 100644
95163--- a/net/atm/mpoa_caches.c
95164+++ b/net/atm/mpoa_caches.c
95165@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
95166
95167
95168 static struct in_cache_ops ingress_ops = {
95169- in_cache_add_entry, /* add_entry */
95170- in_cache_get, /* get */
95171- in_cache_get_with_mask, /* get_with_mask */
95172- in_cache_get_by_vcc, /* get_by_vcc */
95173- in_cache_put, /* put */
95174- in_cache_remove_entry, /* remove_entry */
95175- cache_hit, /* cache_hit */
95176- clear_count_and_expired, /* clear_count */
95177- check_resolving_entries, /* check_resolving */
95178- refresh_entries, /* refresh */
95179- in_destroy_cache /* destroy_cache */
95180+ .add_entry = in_cache_add_entry,
95181+ .get = in_cache_get,
95182+ .get_with_mask = in_cache_get_with_mask,
95183+ .get_by_vcc = in_cache_get_by_vcc,
95184+ .put = in_cache_put,
95185+ .remove_entry = in_cache_remove_entry,
95186+ .cache_hit = cache_hit,
95187+ .clear_count = clear_count_and_expired,
95188+ .check_resolving = check_resolving_entries,
95189+ .refresh = refresh_entries,
95190+ .destroy_cache = in_destroy_cache
95191 };
95192
95193 static struct eg_cache_ops egress_ops = {
95194- eg_cache_add_entry, /* add_entry */
95195- eg_cache_get_by_cache_id, /* get_by_cache_id */
95196- eg_cache_get_by_tag, /* get_by_tag */
95197- eg_cache_get_by_vcc, /* get_by_vcc */
95198- eg_cache_get_by_src_ip, /* get_by_src_ip */
95199- eg_cache_put, /* put */
95200- eg_cache_remove_entry, /* remove_entry */
95201- update_eg_cache_entry, /* update */
95202- clear_expired, /* clear_expired */
95203- eg_destroy_cache /* destroy_cache */
95204+ .add_entry = eg_cache_add_entry,
95205+ .get_by_cache_id = eg_cache_get_by_cache_id,
95206+ .get_by_tag = eg_cache_get_by_tag,
95207+ .get_by_vcc = eg_cache_get_by_vcc,
95208+ .get_by_src_ip = eg_cache_get_by_src_ip,
95209+ .put = eg_cache_put,
95210+ .remove_entry = eg_cache_remove_entry,
95211+ .update = update_eg_cache_entry,
95212+ .clear_expired = clear_expired,
95213+ .destroy_cache = eg_destroy_cache
95214 };
95215
95216
95217diff --git a/net/atm/proc.c b/net/atm/proc.c
95218index bbb6461..cf04016 100644
95219--- a/net/atm/proc.c
95220+++ b/net/atm/proc.c
95221@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
95222 const struct k_atm_aal_stats *stats)
95223 {
95224 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
95225- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
95226- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
95227- atomic_read(&stats->rx_drop));
95228+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
95229+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
95230+ atomic_read_unchecked(&stats->rx_drop));
95231 }
95232
95233 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
95234diff --git a/net/atm/resources.c b/net/atm/resources.c
95235index 0447d5d..3cf4728 100644
95236--- a/net/atm/resources.c
95237+++ b/net/atm/resources.c
95238@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
95239 static void copy_aal_stats(struct k_atm_aal_stats *from,
95240 struct atm_aal_stats *to)
95241 {
95242-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
95243+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
95244 __AAL_STAT_ITEMS
95245 #undef __HANDLE_ITEM
95246 }
95247@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
95248 static void subtract_aal_stats(struct k_atm_aal_stats *from,
95249 struct atm_aal_stats *to)
95250 {
95251-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
95252+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
95253 __AAL_STAT_ITEMS
95254 #undef __HANDLE_ITEM
95255 }
95256diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
95257index 919a5ce..cc6b444 100644
95258--- a/net/ax25/sysctl_net_ax25.c
95259+++ b/net/ax25/sysctl_net_ax25.c
95260@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
95261 {
95262 char path[sizeof("net/ax25/") + IFNAMSIZ];
95263 int k;
95264- struct ctl_table *table;
95265+ ctl_table_no_const *table;
95266
95267 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
95268 if (!table)
95269diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
95270index b9c8a6e..ed0f711 100644
95271--- a/net/batman-adv/bat_iv_ogm.c
95272+++ b/net/batman-adv/bat_iv_ogm.c
95273@@ -297,7 +297,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
95274
95275 /* randomize initial seqno to avoid collision */
95276 get_random_bytes(&random_seqno, sizeof(random_seqno));
95277- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
95278+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
95279
95280 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
95281 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
95282@@ -884,9 +884,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
95283 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
95284
95285 /* change sequence number to network order */
95286- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
95287+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
95288 batadv_ogm_packet->seqno = htonl(seqno);
95289- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
95290+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
95291
95292 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
95293 batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
95294@@ -1251,7 +1251,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
95295 return;
95296
95297 /* could be changed by schedule_own_packet() */
95298- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
95299+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
95300
95301 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
95302 has_directlink_flag = 1;
95303diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
95304index 6ddb614..ca7e886 100644
95305--- a/net/batman-adv/fragmentation.c
95306+++ b/net/batman-adv/fragmentation.c
95307@@ -447,7 +447,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
95308 frag_header.packet_type = BATADV_UNICAST_FRAG;
95309 frag_header.version = BATADV_COMPAT_VERSION;
95310 frag_header.ttl = BATADV_TTL;
95311- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
95312+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
95313 frag_header.reserved = 0;
95314 frag_header.no = 0;
95315 frag_header.total_size = htons(skb->len);
95316diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
95317index a8f99d1..11797ef 100644
95318--- a/net/batman-adv/soft-interface.c
95319+++ b/net/batman-adv/soft-interface.c
95320@@ -278,7 +278,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
95321 primary_if->net_dev->dev_addr, ETH_ALEN);
95322
95323 /* set broadcast sequence number */
95324- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
95325+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
95326 bcast_packet->seqno = htonl(seqno);
95327
95328 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
95329@@ -688,7 +688,7 @@ static int batadv_softif_init_late(struct net_device *dev)
95330 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
95331
95332 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
95333- atomic_set(&bat_priv->bcast_seqno, 1);
95334+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
95335 atomic_set(&bat_priv->tt.vn, 0);
95336 atomic_set(&bat_priv->tt.local_changes, 0);
95337 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
95338@@ -700,7 +700,7 @@ static int batadv_softif_init_late(struct net_device *dev)
95339
95340 /* randomize initial seqno to avoid collision */
95341 get_random_bytes(&random_seqno, sizeof(random_seqno));
95342- atomic_set(&bat_priv->frag_seqno, random_seqno);
95343+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
95344
95345 bat_priv->primary_if = NULL;
95346 bat_priv->num_ifaces = 0;
95347diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
95348index 91dd369..9c25750 100644
95349--- a/net/batman-adv/types.h
95350+++ b/net/batman-adv/types.h
95351@@ -56,7 +56,7 @@
95352 struct batadv_hard_iface_bat_iv {
95353 unsigned char *ogm_buff;
95354 int ogm_buff_len;
95355- atomic_t ogm_seqno;
95356+ atomic_unchecked_t ogm_seqno;
95357 };
95358
95359 /**
95360@@ -673,7 +673,7 @@ struct batadv_priv {
95361 atomic_t bonding;
95362 atomic_t fragmentation;
95363 atomic_t packet_size_max;
95364- atomic_t frag_seqno;
95365+ atomic_unchecked_t frag_seqno;
95366 #ifdef CONFIG_BATMAN_ADV_BLA
95367 atomic_t bridge_loop_avoidance;
95368 #endif
95369@@ -687,7 +687,7 @@ struct batadv_priv {
95370 #ifdef CONFIG_BATMAN_ADV_DEBUG
95371 atomic_t log_level;
95372 #endif
95373- atomic_t bcast_seqno;
95374+ atomic_unchecked_t bcast_seqno;
95375 atomic_t bcast_queue_left;
95376 atomic_t batman_queue_left;
95377 char num_ifaces;
95378diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
95379index 7552f9e..074ce29 100644
95380--- a/net/bluetooth/hci_sock.c
95381+++ b/net/bluetooth/hci_sock.c
95382@@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
95383 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
95384 }
95385
95386- len = min_t(unsigned int, len, sizeof(uf));
95387+ len = min((size_t)len, sizeof(uf));
95388 if (copy_from_user(&uf, optval, len)) {
95389 err = -EFAULT;
95390 break;
95391diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
95392index 4af3821..f2ba46c 100644
95393--- a/net/bluetooth/l2cap_core.c
95394+++ b/net/bluetooth/l2cap_core.c
95395@@ -3500,8 +3500,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
95396 break;
95397
95398 case L2CAP_CONF_RFC:
95399- if (olen == sizeof(rfc))
95400- memcpy(&rfc, (void *)val, olen);
95401+ if (olen != sizeof(rfc))
95402+ break;
95403+
95404+ memcpy(&rfc, (void *)val, olen);
95405
95406 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
95407 rfc.mode != chan->mode)
95408diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
95409index 7cc24d2..e83f531 100644
95410--- a/net/bluetooth/l2cap_sock.c
95411+++ b/net/bluetooth/l2cap_sock.c
95412@@ -545,7 +545,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
95413 struct sock *sk = sock->sk;
95414 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
95415 struct l2cap_options opts;
95416- int len, err = 0;
95417+ int err = 0;
95418+ size_t len = optlen;
95419 u32 opt;
95420
95421 BT_DBG("sk %p", sk);
95422@@ -567,7 +568,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
95423 opts.max_tx = chan->max_tx;
95424 opts.txwin_size = chan->tx_win;
95425
95426- len = min_t(unsigned int, sizeof(opts), optlen);
95427+ len = min(sizeof(opts), len);
95428 if (copy_from_user((char *) &opts, optval, len)) {
95429 err = -EFAULT;
95430 break;
95431@@ -647,7 +648,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
95432 struct bt_security sec;
95433 struct bt_power pwr;
95434 struct l2cap_conn *conn;
95435- int len, err = 0;
95436+ int err = 0;
95437+ size_t len = optlen;
95438 u32 opt;
95439
95440 BT_DBG("sk %p", sk);
95441@@ -670,7 +672,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
95442
95443 sec.level = BT_SECURITY_LOW;
95444
95445- len = min_t(unsigned int, sizeof(sec), optlen);
95446+ len = min(sizeof(sec), len);
95447 if (copy_from_user((char *) &sec, optval, len)) {
95448 err = -EFAULT;
95449 break;
95450@@ -770,7 +772,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
95451
95452 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
95453
95454- len = min_t(unsigned int, sizeof(pwr), optlen);
95455+ len = min(sizeof(pwr), len);
95456 if (copy_from_user((char *) &pwr, optval, len)) {
95457 err = -EFAULT;
95458 break;
95459diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
95460index 3c2d3e4..884855a 100644
95461--- a/net/bluetooth/rfcomm/sock.c
95462+++ b/net/bluetooth/rfcomm/sock.c
95463@@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
95464 struct sock *sk = sock->sk;
95465 struct bt_security sec;
95466 int err = 0;
95467- size_t len;
95468+ size_t len = optlen;
95469 u32 opt;
95470
95471 BT_DBG("sk %p", sk);
95472@@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
95473
95474 sec.level = BT_SECURITY_LOW;
95475
95476- len = min_t(unsigned int, sizeof(sec), optlen);
95477+ len = min(sizeof(sec), len);
95478 if (copy_from_user((char *) &sec, optval, len)) {
95479 err = -EFAULT;
95480 break;
95481diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
95482index 84fcf9f..e389b27 100644
95483--- a/net/bluetooth/rfcomm/tty.c
95484+++ b/net/bluetooth/rfcomm/tty.c
95485@@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
95486 BT_DBG("tty %p id %d", tty, tty->index);
95487
95488 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
95489- dev->channel, dev->port.count);
95490+ dev->channel, atomic_read(&dev->port.count));
95491
95492 err = tty_port_open(&dev->port, tty, filp);
95493 if (err)
95494@@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
95495 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
95496
95497 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
95498- dev->port.count);
95499+ atomic_read(&dev->port.count));
95500
95501 tty_port_close(&dev->port, tty, filp);
95502 }
95503diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
95504index ac78024..161a80c 100644
95505--- a/net/bridge/netfilter/ebtables.c
95506+++ b/net/bridge/netfilter/ebtables.c
95507@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
95508 tmp.valid_hooks = t->table->valid_hooks;
95509 }
95510 mutex_unlock(&ebt_mutex);
95511- if (copy_to_user(user, &tmp, *len) != 0){
95512+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
95513 BUGPRINT("c2u Didn't work\n");
95514 ret = -EFAULT;
95515 break;
95516@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
95517 goto out;
95518 tmp.valid_hooks = t->valid_hooks;
95519
95520- if (copy_to_user(user, &tmp, *len) != 0) {
95521+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
95522 ret = -EFAULT;
95523 break;
95524 }
95525@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
95526 tmp.entries_size = t->table->entries_size;
95527 tmp.valid_hooks = t->table->valid_hooks;
95528
95529- if (copy_to_user(user, &tmp, *len) != 0) {
95530+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
95531 ret = -EFAULT;
95532 break;
95533 }
95534diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
95535index 0f45522..dab651f 100644
95536--- a/net/caif/cfctrl.c
95537+++ b/net/caif/cfctrl.c
95538@@ -10,6 +10,7 @@
95539 #include <linux/spinlock.h>
95540 #include <linux/slab.h>
95541 #include <linux/pkt_sched.h>
95542+#include <linux/sched.h>
95543 #include <net/caif/caif_layer.h>
95544 #include <net/caif/cfpkt.h>
95545 #include <net/caif/cfctrl.h>
95546@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
95547 memset(&dev_info, 0, sizeof(dev_info));
95548 dev_info.id = 0xff;
95549 cfsrvl_init(&this->serv, 0, &dev_info, false);
95550- atomic_set(&this->req_seq_no, 1);
95551- atomic_set(&this->rsp_seq_no, 1);
95552+ atomic_set_unchecked(&this->req_seq_no, 1);
95553+ atomic_set_unchecked(&this->rsp_seq_no, 1);
95554 this->serv.layer.receive = cfctrl_recv;
95555 sprintf(this->serv.layer.name, "ctrl");
95556 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
95557@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
95558 struct cfctrl_request_info *req)
95559 {
95560 spin_lock_bh(&ctrl->info_list_lock);
95561- atomic_inc(&ctrl->req_seq_no);
95562- req->sequence_no = atomic_read(&ctrl->req_seq_no);
95563+ atomic_inc_unchecked(&ctrl->req_seq_no);
95564+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
95565 list_add_tail(&req->list, &ctrl->list);
95566 spin_unlock_bh(&ctrl->info_list_lock);
95567 }
95568@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
95569 if (p != first)
95570 pr_warn("Requests are not received in order\n");
95571
95572- atomic_set(&ctrl->rsp_seq_no,
95573+ atomic_set_unchecked(&ctrl->rsp_seq_no,
95574 p->sequence_no);
95575 list_del(&p->list);
95576 goto out;
95577diff --git a/net/can/af_can.c b/net/can/af_can.c
95578index d249874..99e197b 100644
95579--- a/net/can/af_can.c
95580+++ b/net/can/af_can.c
95581@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
95582 };
95583
95584 /* notifier block for netdevice event */
95585-static struct notifier_block can_netdev_notifier __read_mostly = {
95586+static struct notifier_block can_netdev_notifier = {
95587 .notifier_call = can_notifier,
95588 };
95589
95590diff --git a/net/can/gw.c b/net/can/gw.c
95591index 3f9b0f3..fc6d4fa 100644
95592--- a/net/can/gw.c
95593+++ b/net/can/gw.c
95594@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
95595 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
95596
95597 static HLIST_HEAD(cgw_list);
95598-static struct notifier_block notifier;
95599
95600 static struct kmem_cache *cgw_cache __read_mostly;
95601
95602@@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
95603 return err;
95604 }
95605
95606+static struct notifier_block notifier = {
95607+ .notifier_call = cgw_notifier
95608+};
95609+
95610 static __init int cgw_module_init(void)
95611 {
95612 /* sanitize given module parameter */
95613@@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
95614 return -ENOMEM;
95615
95616 /* set notifier */
95617- notifier.notifier_call = cgw_notifier;
95618 register_netdevice_notifier(&notifier);
95619
95620 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
95621diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
95622index 4a5df7b..9ad1f1d 100644
95623--- a/net/ceph/messenger.c
95624+++ b/net/ceph/messenger.c
95625@@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
95626 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
95627
95628 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
95629-static atomic_t addr_str_seq = ATOMIC_INIT(0);
95630+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
95631
95632 static struct page *zero_page; /* used in certain error cases */
95633
95634@@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
95635 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
95636 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
95637
95638- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
95639+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
95640 s = addr_str[i];
95641
95642 switch (ss->ss_family) {
95643diff --git a/net/compat.c b/net/compat.c
95644index f50161f..94fa415 100644
95645--- a/net/compat.c
95646+++ b/net/compat.c
95647@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
95648 return -EFAULT;
95649 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
95650 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
95651- kmsg->msg_name = compat_ptr(tmp1);
95652- kmsg->msg_iov = compat_ptr(tmp2);
95653- kmsg->msg_control = compat_ptr(tmp3);
95654+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
95655+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
95656+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
95657 return 0;
95658 }
95659
95660@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
95661
95662 if (kern_msg->msg_namelen) {
95663 if (mode == VERIFY_READ) {
95664- int err = move_addr_to_kernel(kern_msg->msg_name,
95665+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
95666 kern_msg->msg_namelen,
95667 kern_address);
95668 if (err < 0)
95669@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
95670 kern_msg->msg_name = NULL;
95671
95672 tot_len = iov_from_user_compat_to_kern(kern_iov,
95673- (struct compat_iovec __user *)kern_msg->msg_iov,
95674+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
95675 kern_msg->msg_iovlen);
95676 if (tot_len >= 0)
95677 kern_msg->msg_iov = kern_iov;
95678@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
95679
95680 #define CMSG_COMPAT_FIRSTHDR(msg) \
95681 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
95682- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
95683+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
95684 (struct compat_cmsghdr __user *)NULL)
95685
95686 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
95687 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
95688 (ucmlen) <= (unsigned long) \
95689 ((mhdr)->msg_controllen - \
95690- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
95691+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
95692
95693 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
95694 struct compat_cmsghdr __user *cmsg, int cmsg_len)
95695 {
95696 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
95697- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
95698+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
95699 msg->msg_controllen)
95700 return NULL;
95701 return (struct compat_cmsghdr __user *)ptr;
95702@@ -222,7 +222,7 @@ Efault:
95703
95704 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
95705 {
95706- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
95707+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
95708 struct compat_cmsghdr cmhdr;
95709 struct compat_timeval ctv;
95710 struct compat_timespec cts[3];
95711@@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
95712
95713 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
95714 {
95715- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
95716+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
95717 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
95718 int fdnum = scm->fp->count;
95719 struct file **fp = scm->fp->fp;
95720@@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
95721 return -EFAULT;
95722 old_fs = get_fs();
95723 set_fs(KERNEL_DS);
95724- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
95725+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
95726 set_fs(old_fs);
95727
95728 return err;
95729@@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
95730 len = sizeof(ktime);
95731 old_fs = get_fs();
95732 set_fs(KERNEL_DS);
95733- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
95734+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
95735 set_fs(old_fs);
95736
95737 if (!err) {
95738@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
95739 case MCAST_JOIN_GROUP:
95740 case MCAST_LEAVE_GROUP:
95741 {
95742- struct compat_group_req __user *gr32 = (void *)optval;
95743+ struct compat_group_req __user *gr32 = (void __user *)optval;
95744 struct group_req __user *kgr =
95745 compat_alloc_user_space(sizeof(struct group_req));
95746 u32 interface;
95747@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
95748 case MCAST_BLOCK_SOURCE:
95749 case MCAST_UNBLOCK_SOURCE:
95750 {
95751- struct compat_group_source_req __user *gsr32 = (void *)optval;
95752+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
95753 struct group_source_req __user *kgsr = compat_alloc_user_space(
95754 sizeof(struct group_source_req));
95755 u32 interface;
95756@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
95757 }
95758 case MCAST_MSFILTER:
95759 {
95760- struct compat_group_filter __user *gf32 = (void *)optval;
95761+ struct compat_group_filter __user *gf32 = (void __user *)optval;
95762 struct group_filter __user *kgf;
95763 u32 interface, fmode, numsrc;
95764
95765@@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
95766 char __user *optval, int __user *optlen,
95767 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
95768 {
95769- struct compat_group_filter __user *gf32 = (void *)optval;
95770+ struct compat_group_filter __user *gf32 = (void __user *)optval;
95771 struct group_filter __user *kgf;
95772 int __user *koptlen;
95773 u32 interface, fmode, numsrc;
95774@@ -803,7 +803,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
95775
95776 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
95777 return -EINVAL;
95778- if (copy_from_user(a, args, nas[call]))
95779+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
95780 return -EFAULT;
95781 a0 = a[0];
95782 a1 = a[1];
95783diff --git a/net/core/datagram.c b/net/core/datagram.c
95784index a16ed7b..eb44d17 100644
95785--- a/net/core/datagram.c
95786+++ b/net/core/datagram.c
95787@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
95788 }
95789
95790 kfree_skb(skb);
95791- atomic_inc(&sk->sk_drops);
95792+ atomic_inc_unchecked(&sk->sk_drops);
95793 sk_mem_reclaim_partial(sk);
95794
95795 return err;
95796diff --git a/net/core/dev.c b/net/core/dev.c
95797index 0ce469e..dfb53d2 100644
95798--- a/net/core/dev.c
95799+++ b/net/core/dev.c
95800@@ -1684,14 +1684,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
95801 {
95802 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
95803 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
95804- atomic_long_inc(&dev->rx_dropped);
95805+ atomic_long_inc_unchecked(&dev->rx_dropped);
95806 kfree_skb(skb);
95807 return NET_RX_DROP;
95808 }
95809 }
95810
95811 if (unlikely(!is_skb_forwardable(dev, skb))) {
95812- atomic_long_inc(&dev->rx_dropped);
95813+ atomic_long_inc_unchecked(&dev->rx_dropped);
95814 kfree_skb(skb);
95815 return NET_RX_DROP;
95816 }
95817@@ -2434,7 +2434,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
95818
95819 struct dev_gso_cb {
95820 void (*destructor)(struct sk_buff *skb);
95821-};
95822+} __no_const;
95823
95824 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
95825
95826@@ -3222,7 +3222,7 @@ enqueue:
95827
95828 local_irq_restore(flags);
95829
95830- atomic_long_inc(&skb->dev->rx_dropped);
95831+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
95832 kfree_skb(skb);
95833 return NET_RX_DROP;
95834 }
95835@@ -3294,7 +3294,7 @@ int netif_rx_ni(struct sk_buff *skb)
95836 }
95837 EXPORT_SYMBOL(netif_rx_ni);
95838
95839-static void net_tx_action(struct softirq_action *h)
95840+static __latent_entropy void net_tx_action(void)
95841 {
95842 struct softnet_data *sd = &__get_cpu_var(softnet_data);
95843
95844@@ -3628,7 +3628,7 @@ ncls:
95845 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
95846 } else {
95847 drop:
95848- atomic_long_inc(&skb->dev->rx_dropped);
95849+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
95850 kfree_skb(skb);
95851 /* Jamal, now you will not able to escape explaining
95852 * me how you were going to use this. :-)
95853@@ -4288,7 +4288,7 @@ void netif_napi_del(struct napi_struct *napi)
95854 }
95855 EXPORT_SYMBOL(netif_napi_del);
95856
95857-static void net_rx_action(struct softirq_action *h)
95858+static __latent_entropy void net_rx_action(void)
95859 {
95860 struct softnet_data *sd = &__get_cpu_var(softnet_data);
95861 unsigned long time_limit = jiffies + 2;
95862@@ -6177,7 +6177,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
95863 } else {
95864 netdev_stats_to_stats64(storage, &dev->stats);
95865 }
95866- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
95867+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
95868 return storage;
95869 }
95870 EXPORT_SYMBOL(dev_get_stats);
95871diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
95872index 5b7d0e1..cb960fc 100644
95873--- a/net/core/dev_ioctl.c
95874+++ b/net/core/dev_ioctl.c
95875@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
95876 if (no_module && capable(CAP_NET_ADMIN))
95877 no_module = request_module("netdev-%s", name);
95878 if (no_module && capable(CAP_SYS_MODULE)) {
95879+#ifdef CONFIG_GRKERNSEC_MODHARDEN
95880+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
95881+#else
95882 if (!request_module("%s", name))
95883 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
95884 name);
95885+#endif
95886 }
95887 }
95888 EXPORT_SYMBOL(dev_load);
95889diff --git a/net/core/filter.c b/net/core/filter.c
95890index ad30d62..c2757df 100644
95891--- a/net/core/filter.c
95892+++ b/net/core/filter.c
95893@@ -679,7 +679,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
95894 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
95895 if (!fp)
95896 return -ENOMEM;
95897- memcpy(fp->insns, fprog->filter, fsize);
95898+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
95899
95900 atomic_set(&fp->refcnt, 1);
95901 fp->len = fprog->len;
95902diff --git a/net/core/flow.c b/net/core/flow.c
95903index dfa602c..3103d88 100644
95904--- a/net/core/flow.c
95905+++ b/net/core/flow.c
95906@@ -61,7 +61,7 @@ struct flow_cache {
95907 struct timer_list rnd_timer;
95908 };
95909
95910-atomic_t flow_cache_genid = ATOMIC_INIT(0);
95911+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
95912 EXPORT_SYMBOL(flow_cache_genid);
95913 static struct flow_cache flow_cache_global;
95914 static struct kmem_cache *flow_cachep __read_mostly;
95915@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
95916
95917 static int flow_entry_valid(struct flow_cache_entry *fle)
95918 {
95919- if (atomic_read(&flow_cache_genid) != fle->genid)
95920+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
95921 return 0;
95922 if (fle->object && !fle->object->ops->check(fle->object))
95923 return 0;
95924@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
95925 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
95926 fcp->hash_count++;
95927 }
95928- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
95929+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
95930 flo = fle->object;
95931 if (!flo)
95932 goto ret_object;
95933@@ -279,7 +279,7 @@ nocache:
95934 }
95935 flo = resolver(net, key, family, dir, flo, ctx);
95936 if (fle) {
95937- fle->genid = atomic_read(&flow_cache_genid);
95938+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
95939 if (!IS_ERR(flo))
95940 fle->object = flo;
95941 else
95942diff --git a/net/core/iovec.c b/net/core/iovec.c
95943index b618694..192bbba 100644
95944--- a/net/core/iovec.c
95945+++ b/net/core/iovec.c
95946@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
95947 if (m->msg_namelen) {
95948 if (mode == VERIFY_READ) {
95949 void __user *namep;
95950- namep = (void __user __force *) m->msg_name;
95951+ namep = (void __force_user *) m->msg_name;
95952 err = move_addr_to_kernel(namep, m->msg_namelen,
95953 address);
95954 if (err < 0)
95955@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
95956 }
95957
95958 size = m->msg_iovlen * sizeof(struct iovec);
95959- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
95960+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
95961 return -EFAULT;
95962
95963 m->msg_iov = iov;
95964diff --git a/net/core/neighbour.c b/net/core/neighbour.c
95965index 932c6d7..7c7aa10 100644
95966--- a/net/core/neighbour.c
95967+++ b/net/core/neighbour.c
95968@@ -2775,7 +2775,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
95969 void __user *buffer, size_t *lenp, loff_t *ppos)
95970 {
95971 int size, ret;
95972- struct ctl_table tmp = *ctl;
95973+ ctl_table_no_const tmp = *ctl;
95974
95975 tmp.extra1 = &zero;
95976 tmp.extra2 = &unres_qlen_max;
95977diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
95978index 2bf8329..7960607 100644
95979--- a/net/core/net-procfs.c
95980+++ b/net/core/net-procfs.c
95981@@ -283,8 +283,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
95982 else
95983 seq_printf(seq, "%04x", ntohs(pt->type));
95984
95985+#ifdef CONFIG_GRKERNSEC_HIDESYM
95986+ seq_printf(seq, " %-8s %pf\n",
95987+ pt->dev ? pt->dev->name : "", NULL);
95988+#else
95989 seq_printf(seq, " %-8s %pf\n",
95990 pt->dev ? pt->dev->name : "", pt->func);
95991+#endif
95992 }
95993
95994 return 0;
95995diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
95996index f3edf96..3cd8b40 100644
95997--- a/net/core/net-sysfs.c
95998+++ b/net/core/net-sysfs.c
95999@@ -1358,7 +1358,7 @@ void netdev_class_remove_file_ns(struct class_attribute *class_attr,
96000 }
96001 EXPORT_SYMBOL(netdev_class_remove_file_ns);
96002
96003-int netdev_kobject_init(void)
96004+int __init netdev_kobject_init(void)
96005 {
96006 kobj_ns_type_register(&net_ns_type_operations);
96007 return class_register(&net_class);
96008diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
96009index 81d3a9a..a0bd7a8 100644
96010--- a/net/core/net_namespace.c
96011+++ b/net/core/net_namespace.c
96012@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
96013 int error;
96014 LIST_HEAD(net_exit_list);
96015
96016- list_add_tail(&ops->list, list);
96017+ pax_list_add_tail((struct list_head *)&ops->list, list);
96018 if (ops->init || (ops->id && ops->size)) {
96019 for_each_net(net) {
96020 error = ops_init(ops, net);
96021@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
96022
96023 out_undo:
96024 /* If I have an error cleanup all namespaces I initialized */
96025- list_del(&ops->list);
96026+ pax_list_del((struct list_head *)&ops->list);
96027 ops_exit_list(ops, &net_exit_list);
96028 ops_free_list(ops, &net_exit_list);
96029 return error;
96030@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
96031 struct net *net;
96032 LIST_HEAD(net_exit_list);
96033
96034- list_del(&ops->list);
96035+ pax_list_del((struct list_head *)&ops->list);
96036 for_each_net(net)
96037 list_add_tail(&net->exit_list, &net_exit_list);
96038 ops_exit_list(ops, &net_exit_list);
96039@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
96040 mutex_lock(&net_mutex);
96041 error = register_pernet_operations(&pernet_list, ops);
96042 if (!error && (first_device == &pernet_list))
96043- first_device = &ops->list;
96044+ first_device = (struct list_head *)&ops->list;
96045 mutex_unlock(&net_mutex);
96046 return error;
96047 }
96048diff --git a/net/core/netpoll.c b/net/core/netpoll.c
96049index 19fe9c7..b6bb620 100644
96050--- a/net/core/netpoll.c
96051+++ b/net/core/netpoll.c
96052@@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
96053 struct udphdr *udph;
96054 struct iphdr *iph;
96055 struct ethhdr *eth;
96056- static atomic_t ip_ident;
96057+ static atomic_unchecked_t ip_ident;
96058 struct ipv6hdr *ip6h;
96059
96060 udp_len = len + sizeof(*udph);
96061@@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
96062 put_unaligned(0x45, (unsigned char *)iph);
96063 iph->tos = 0;
96064 put_unaligned(htons(ip_len), &(iph->tot_len));
96065- iph->id = htons(atomic_inc_return(&ip_ident));
96066+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
96067 iph->frag_off = 0;
96068 iph->ttl = 64;
96069 iph->protocol = IPPROTO_UDP;
96070diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
96071index cf67144..12bf94c 100644
96072--- a/net/core/rtnetlink.c
96073+++ b/net/core/rtnetlink.c
96074@@ -58,7 +58,7 @@ struct rtnl_link {
96075 rtnl_doit_func doit;
96076 rtnl_dumpit_func dumpit;
96077 rtnl_calcit_func calcit;
96078-};
96079+} __no_const;
96080
96081 static DEFINE_MUTEX(rtnl_mutex);
96082
96083@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
96084 if (rtnl_link_ops_get(ops->kind))
96085 return -EEXIST;
96086
96087- if (!ops->dellink)
96088- ops->dellink = unregister_netdevice_queue;
96089+ if (!ops->dellink) {
96090+ pax_open_kernel();
96091+ *(void **)&ops->dellink = unregister_netdevice_queue;
96092+ pax_close_kernel();
96093+ }
96094
96095- list_add_tail(&ops->list, &link_ops);
96096+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
96097 return 0;
96098 }
96099 EXPORT_SYMBOL_GPL(__rtnl_link_register);
96100@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
96101 for_each_net(net) {
96102 __rtnl_kill_links(net, ops);
96103 }
96104- list_del(&ops->list);
96105+ pax_list_del((struct list_head *)&ops->list);
96106 }
96107 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
96108
96109diff --git a/net/core/scm.c b/net/core/scm.c
96110index b442e7e..6f5b5a2 100644
96111--- a/net/core/scm.c
96112+++ b/net/core/scm.c
96113@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
96114 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
96115 {
96116 struct cmsghdr __user *cm
96117- = (__force struct cmsghdr __user *)msg->msg_control;
96118+ = (struct cmsghdr __force_user *)msg->msg_control;
96119 struct cmsghdr cmhdr;
96120 int cmlen = CMSG_LEN(len);
96121 int err;
96122@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
96123 err = -EFAULT;
96124 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
96125 goto out;
96126- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
96127+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
96128 goto out;
96129 cmlen = CMSG_SPACE(len);
96130 if (msg->msg_controllen < cmlen)
96131@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
96132 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
96133 {
96134 struct cmsghdr __user *cm
96135- = (__force struct cmsghdr __user*)msg->msg_control;
96136+ = (struct cmsghdr __force_user *)msg->msg_control;
96137
96138 int fdmax = 0;
96139 int fdnum = scm->fp->count;
96140@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
96141 if (fdnum < fdmax)
96142 fdmax = fdnum;
96143
96144- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
96145+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
96146 i++, cmfptr++)
96147 {
96148 struct socket *sock;
96149diff --git a/net/core/skbuff.c b/net/core/skbuff.c
96150index 06e72d3..19dfa7b 100644
96151--- a/net/core/skbuff.c
96152+++ b/net/core/skbuff.c
96153@@ -2034,7 +2034,7 @@ EXPORT_SYMBOL(__skb_checksum);
96154 __wsum skb_checksum(const struct sk_buff *skb, int offset,
96155 int len, __wsum csum)
96156 {
96157- const struct skb_checksum_ops ops = {
96158+ static const struct skb_checksum_ops ops = {
96159 .update = csum_partial_ext,
96160 .combine = csum_block_add_ext,
96161 };
96162@@ -3147,13 +3147,15 @@ void __init skb_init(void)
96163 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
96164 sizeof(struct sk_buff),
96165 0,
96166- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
96167+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
96168+ SLAB_NO_SANITIZE,
96169 NULL);
96170 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
96171 (2*sizeof(struct sk_buff)) +
96172 sizeof(atomic_t),
96173 0,
96174- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
96175+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
96176+ SLAB_NO_SANITIZE,
96177 NULL);
96178 }
96179
96180diff --git a/net/core/sock.c b/net/core/sock.c
96181index 5393b4b..997c88b 100644
96182--- a/net/core/sock.c
96183+++ b/net/core/sock.c
96184@@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
96185 struct sk_buff_head *list = &sk->sk_receive_queue;
96186
96187 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
96188- atomic_inc(&sk->sk_drops);
96189+ atomic_inc_unchecked(&sk->sk_drops);
96190 trace_sock_rcvqueue_full(sk, skb);
96191 return -ENOMEM;
96192 }
96193@@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
96194 return err;
96195
96196 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
96197- atomic_inc(&sk->sk_drops);
96198+ atomic_inc_unchecked(&sk->sk_drops);
96199 return -ENOBUFS;
96200 }
96201
96202@@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
96203 skb_dst_force(skb);
96204
96205 spin_lock_irqsave(&list->lock, flags);
96206- skb->dropcount = atomic_read(&sk->sk_drops);
96207+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
96208 __skb_queue_tail(list, skb);
96209 spin_unlock_irqrestore(&list->lock, flags);
96210
96211@@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
96212 skb->dev = NULL;
96213
96214 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
96215- atomic_inc(&sk->sk_drops);
96216+ atomic_inc_unchecked(&sk->sk_drops);
96217 goto discard_and_relse;
96218 }
96219 if (nested)
96220@@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
96221 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
96222 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
96223 bh_unlock_sock(sk);
96224- atomic_inc(&sk->sk_drops);
96225+ atomic_inc_unchecked(&sk->sk_drops);
96226 goto discard_and_relse;
96227 }
96228
96229@@ -950,12 +950,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
96230 struct timeval tm;
96231 } v;
96232
96233- int lv = sizeof(int);
96234- int len;
96235+ unsigned int lv = sizeof(int);
96236+ unsigned int len;
96237
96238 if (get_user(len, optlen))
96239 return -EFAULT;
96240- if (len < 0)
96241+ if (len > INT_MAX)
96242 return -EINVAL;
96243
96244 memset(&v, 0, sizeof(v));
96245@@ -1107,11 +1107,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
96246
96247 case SO_PEERNAME:
96248 {
96249- char address[128];
96250+ char address[_K_SS_MAXSIZE];
96251
96252 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
96253 return -ENOTCONN;
96254- if (lv < len)
96255+ if (lv < len || sizeof address < len)
96256 return -EINVAL;
96257 if (copy_to_user(optval, address, len))
96258 return -EFAULT;
96259@@ -1188,7 +1188,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
96260
96261 if (len > lv)
96262 len = lv;
96263- if (copy_to_user(optval, &v, len))
96264+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
96265 return -EFAULT;
96266 lenout:
96267 if (put_user(len, optlen))
96268@@ -2351,7 +2351,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
96269 */
96270 smp_wmb();
96271 atomic_set(&sk->sk_refcnt, 1);
96272- atomic_set(&sk->sk_drops, 0);
96273+ atomic_set_unchecked(&sk->sk_drops, 0);
96274 }
96275 EXPORT_SYMBOL(sock_init_data);
96276
96277@@ -2476,6 +2476,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
96278 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
96279 int level, int type)
96280 {
96281+ struct sock_extended_err ee;
96282 struct sock_exterr_skb *serr;
96283 struct sk_buff *skb, *skb2;
96284 int copied, err;
96285@@ -2497,7 +2498,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
96286 sock_recv_timestamp(msg, sk, skb);
96287
96288 serr = SKB_EXT_ERR(skb);
96289- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
96290+ ee = serr->ee;
96291+ put_cmsg(msg, level, type, sizeof ee, &ee);
96292
96293 msg->msg_flags |= MSG_ERRQUEUE;
96294 err = copied;
96295diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
96296index a0e9cf6..ef7f9ed 100644
96297--- a/net/core/sock_diag.c
96298+++ b/net/core/sock_diag.c
96299@@ -9,26 +9,33 @@
96300 #include <linux/inet_diag.h>
96301 #include <linux/sock_diag.h>
96302
96303-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
96304+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
96305 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
96306 static DEFINE_MUTEX(sock_diag_table_mutex);
96307
96308 int sock_diag_check_cookie(void *sk, __u32 *cookie)
96309 {
96310+#ifndef CONFIG_GRKERNSEC_HIDESYM
96311 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
96312 cookie[1] != INET_DIAG_NOCOOKIE) &&
96313 ((u32)(unsigned long)sk != cookie[0] ||
96314 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
96315 return -ESTALE;
96316 else
96317+#endif
96318 return 0;
96319 }
96320 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
96321
96322 void sock_diag_save_cookie(void *sk, __u32 *cookie)
96323 {
96324+#ifdef CONFIG_GRKERNSEC_HIDESYM
96325+ cookie[0] = 0;
96326+ cookie[1] = 0;
96327+#else
96328 cookie[0] = (u32)(unsigned long)sk;
96329 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
96330+#endif
96331 }
96332 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
96333
96334@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
96335 mutex_lock(&sock_diag_table_mutex);
96336 if (sock_diag_handlers[hndl->family])
96337 err = -EBUSY;
96338- else
96339+ else {
96340+ pax_open_kernel();
96341 sock_diag_handlers[hndl->family] = hndl;
96342+ pax_close_kernel();
96343+ }
96344 mutex_unlock(&sock_diag_table_mutex);
96345
96346 return err;
96347@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
96348
96349 mutex_lock(&sock_diag_table_mutex);
96350 BUG_ON(sock_diag_handlers[family] != hnld);
96351+ pax_open_kernel();
96352 sock_diag_handlers[family] = NULL;
96353+ pax_close_kernel();
96354 mutex_unlock(&sock_diag_table_mutex);
96355 }
96356 EXPORT_SYMBOL_GPL(sock_diag_unregister);
96357diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
96358index cca4441..5e616de 100644
96359--- a/net/core/sysctl_net_core.c
96360+++ b/net/core/sysctl_net_core.c
96361@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
96362 {
96363 unsigned int orig_size, size;
96364 int ret, i;
96365- struct ctl_table tmp = {
96366+ ctl_table_no_const tmp = {
96367 .data = &size,
96368 .maxlen = sizeof(size),
96369 .mode = table->mode
96370@@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
96371 void __user *buffer, size_t *lenp, loff_t *ppos)
96372 {
96373 char id[IFNAMSIZ];
96374- struct ctl_table tbl = {
96375+ ctl_table_no_const tbl = {
96376 .data = id,
96377 .maxlen = IFNAMSIZ,
96378 };
96379@@ -378,13 +378,12 @@ static struct ctl_table netns_core_table[] = {
96380
96381 static __net_init int sysctl_core_net_init(struct net *net)
96382 {
96383- struct ctl_table *tbl;
96384+ ctl_table_no_const *tbl = NULL;
96385
96386 net->core.sysctl_somaxconn = SOMAXCONN;
96387
96388- tbl = netns_core_table;
96389 if (!net_eq(net, &init_net)) {
96390- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
96391+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
96392 if (tbl == NULL)
96393 goto err_dup;
96394
96395@@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
96396 if (net->user_ns != &init_user_ns) {
96397 tbl[0].procname = NULL;
96398 }
96399- }
96400-
96401- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
96402+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
96403+ } else
96404+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
96405 if (net->core.sysctl_hdr == NULL)
96406 goto err_reg;
96407
96408 return 0;
96409
96410 err_reg:
96411- if (tbl != netns_core_table)
96412- kfree(tbl);
96413+ kfree(tbl);
96414 err_dup:
96415 return -ENOMEM;
96416 }
96417@@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
96418 kfree(tbl);
96419 }
96420
96421-static __net_initdata struct pernet_operations sysctl_core_ops = {
96422+static __net_initconst struct pernet_operations sysctl_core_ops = {
96423 .init = sysctl_core_net_init,
96424 .exit = sysctl_core_net_exit,
96425 };
96426diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
96427index dd4d506..fb2fb87 100644
96428--- a/net/decnet/af_decnet.c
96429+++ b/net/decnet/af_decnet.c
96430@@ -465,6 +465,7 @@ static struct proto dn_proto = {
96431 .sysctl_rmem = sysctl_decnet_rmem,
96432 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
96433 .obj_size = sizeof(struct dn_sock),
96434+ .slab_flags = SLAB_USERCOPY,
96435 };
96436
96437 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
96438diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
96439index dd0dfb2..fdbc764 100644
96440--- a/net/decnet/dn_dev.c
96441+++ b/net/decnet/dn_dev.c
96442@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
96443 .extra1 = &min_t3,
96444 .extra2 = &max_t3
96445 },
96446- {0}
96447+ { }
96448 },
96449 };
96450
96451diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
96452index 5325b54..a0d4d69 100644
96453--- a/net/decnet/sysctl_net_decnet.c
96454+++ b/net/decnet/sysctl_net_decnet.c
96455@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
96456
96457 if (len > *lenp) len = *lenp;
96458
96459- if (copy_to_user(buffer, addr, len))
96460+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
96461 return -EFAULT;
96462
96463 *lenp = len;
96464@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
96465
96466 if (len > *lenp) len = *lenp;
96467
96468- if (copy_to_user(buffer, devname, len))
96469+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
96470 return -EFAULT;
96471
96472 *lenp = len;
96473diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
96474index 1865fdf..581a595 100644
96475--- a/net/ieee802154/dgram.c
96476+++ b/net/ieee802154/dgram.c
96477@@ -315,8 +315,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
96478 if (saddr) {
96479 saddr->family = AF_IEEE802154;
96480 saddr->addr = mac_cb(skb)->sa;
96481+ }
96482+ if (addr_len)
96483 *addr_len = sizeof(*saddr);
96484- }
96485
96486 if (flags & MSG_TRUNC)
96487 copied = skb->len;
96488diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
96489index 70011e0..454ca6a 100644
96490--- a/net/ipv4/af_inet.c
96491+++ b/net/ipv4/af_inet.c
96492@@ -1683,13 +1683,9 @@ static int __init inet_init(void)
96493
96494 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
96495
96496- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
96497- if (!sysctl_local_reserved_ports)
96498- goto out;
96499-
96500 rc = proto_register(&tcp_prot, 1);
96501 if (rc)
96502- goto out_free_reserved_ports;
96503+ goto out;
96504
96505 rc = proto_register(&udp_prot, 1);
96506 if (rc)
96507@@ -1796,8 +1792,6 @@ out_unregister_udp_proto:
96508 proto_unregister(&udp_prot);
96509 out_unregister_tcp_proto:
96510 proto_unregister(&tcp_prot);
96511-out_free_reserved_ports:
96512- kfree(sysctl_local_reserved_ports);
96513 goto out;
96514 }
96515
96516diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
96517index a1b5bcb..62ec5c6 100644
96518--- a/net/ipv4/devinet.c
96519+++ b/net/ipv4/devinet.c
96520@@ -1533,7 +1533,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
96521 idx = 0;
96522 head = &net->dev_index_head[h];
96523 rcu_read_lock();
96524- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
96525+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
96526 net->dev_base_seq;
96527 hlist_for_each_entry_rcu(dev, head, index_hlist) {
96528 if (idx < s_idx)
96529@@ -1844,7 +1844,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
96530 idx = 0;
96531 head = &net->dev_index_head[h];
96532 rcu_read_lock();
96533- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
96534+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
96535 net->dev_base_seq;
96536 hlist_for_each_entry_rcu(dev, head, index_hlist) {
96537 if (idx < s_idx)
96538@@ -2069,7 +2069,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
96539 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
96540 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
96541
96542-static struct devinet_sysctl_table {
96543+static const struct devinet_sysctl_table {
96544 struct ctl_table_header *sysctl_header;
96545 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
96546 } devinet_sysctl = {
96547@@ -2191,7 +2191,7 @@ static __net_init int devinet_init_net(struct net *net)
96548 int err;
96549 struct ipv4_devconf *all, *dflt;
96550 #ifdef CONFIG_SYSCTL
96551- struct ctl_table *tbl = ctl_forward_entry;
96552+ ctl_table_no_const *tbl = NULL;
96553 struct ctl_table_header *forw_hdr;
96554 #endif
96555
96556@@ -2209,7 +2209,7 @@ static __net_init int devinet_init_net(struct net *net)
96557 goto err_alloc_dflt;
96558
96559 #ifdef CONFIG_SYSCTL
96560- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
96561+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
96562 if (tbl == NULL)
96563 goto err_alloc_ctl;
96564
96565@@ -2229,7 +2229,10 @@ static __net_init int devinet_init_net(struct net *net)
96566 goto err_reg_dflt;
96567
96568 err = -ENOMEM;
96569- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
96570+ if (!net_eq(net, &init_net))
96571+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
96572+ else
96573+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
96574 if (forw_hdr == NULL)
96575 goto err_reg_ctl;
96576 net->ipv4.forw_hdr = forw_hdr;
96577@@ -2245,8 +2248,7 @@ err_reg_ctl:
96578 err_reg_dflt:
96579 __devinet_sysctl_unregister(all);
96580 err_reg_all:
96581- if (tbl != ctl_forward_entry)
96582- kfree(tbl);
96583+ kfree(tbl);
96584 err_alloc_ctl:
96585 #endif
96586 if (dflt != &ipv4_devconf_dflt)
96587diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
96588index c7539e2..b455e51 100644
96589--- a/net/ipv4/fib_frontend.c
96590+++ b/net/ipv4/fib_frontend.c
96591@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
96592 #ifdef CONFIG_IP_ROUTE_MULTIPATH
96593 fib_sync_up(dev);
96594 #endif
96595- atomic_inc(&net->ipv4.dev_addr_genid);
96596+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
96597 rt_cache_flush(dev_net(dev));
96598 break;
96599 case NETDEV_DOWN:
96600 fib_del_ifaddr(ifa, NULL);
96601- atomic_inc(&net->ipv4.dev_addr_genid);
96602+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
96603 if (ifa->ifa_dev->ifa_list == NULL) {
96604 /* Last address was deleted from this interface.
96605 * Disable IP.
96606@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
96607 #ifdef CONFIG_IP_ROUTE_MULTIPATH
96608 fib_sync_up(dev);
96609 #endif
96610- atomic_inc(&net->ipv4.dev_addr_genid);
96611+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
96612 rt_cache_flush(net);
96613 break;
96614 case NETDEV_DOWN:
96615diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
96616index e63f47a..e5c531d 100644
96617--- a/net/ipv4/fib_semantics.c
96618+++ b/net/ipv4/fib_semantics.c
96619@@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
96620 nh->nh_saddr = inet_select_addr(nh->nh_dev,
96621 nh->nh_gw,
96622 nh->nh_parent->fib_scope);
96623- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
96624+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
96625
96626 return nh->nh_saddr;
96627 }
96628diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
96629index fc0e649..febfa65 100644
96630--- a/net/ipv4/inet_connection_sock.c
96631+++ b/net/ipv4/inet_connection_sock.c
96632@@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
96633 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
96634 #endif
96635
96636-unsigned long *sysctl_local_reserved_ports;
96637+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
96638 EXPORT_SYMBOL(sysctl_local_reserved_ports);
96639
96640 void inet_get_local_port_range(struct net *net, int *low, int *high)
96641diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
96642index 8b9cf27..0d8d592 100644
96643--- a/net/ipv4/inet_hashtables.c
96644+++ b/net/ipv4/inet_hashtables.c
96645@@ -18,6 +18,7 @@
96646 #include <linux/sched.h>
96647 #include <linux/slab.h>
96648 #include <linux/wait.h>
96649+#include <linux/security.h>
96650
96651 #include <net/inet_connection_sock.h>
96652 #include <net/inet_hashtables.h>
96653@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
96654 return inet_ehashfn(net, laddr, lport, faddr, fport);
96655 }
96656
96657+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
96658+
96659 /*
96660 * Allocate and initialize a new local port bind bucket.
96661 * The bindhash mutex for snum's hash chain must be held here.
96662@@ -554,6 +557,8 @@ ok:
96663 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
96664 spin_unlock(&head->lock);
96665
96666+ gr_update_task_in_ip_table(current, inet_sk(sk));
96667+
96668 if (tw) {
96669 inet_twsk_deschedule(tw, death_row);
96670 while (twrefcnt) {
96671diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
96672index 33d5537..da337a4 100644
96673--- a/net/ipv4/inetpeer.c
96674+++ b/net/ipv4/inetpeer.c
96675@@ -503,8 +503,8 @@ relookup:
96676 if (p) {
96677 p->daddr = *daddr;
96678 atomic_set(&p->refcnt, 1);
96679- atomic_set(&p->rid, 0);
96680- atomic_set(&p->ip_id_count,
96681+ atomic_set_unchecked(&p->rid, 0);
96682+ atomic_set_unchecked(&p->ip_id_count,
96683 (daddr->family == AF_INET) ?
96684 secure_ip_id(daddr->addr.a4) :
96685 secure_ipv6_id(daddr->addr.a6));
96686diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
96687index 2481993..2d9a7a7 100644
96688--- a/net/ipv4/ip_fragment.c
96689+++ b/net/ipv4/ip_fragment.c
96690@@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
96691 return 0;
96692
96693 start = qp->rid;
96694- end = atomic_inc_return(&peer->rid);
96695+ end = atomic_inc_return_unchecked(&peer->rid);
96696 qp->rid = end;
96697
96698 rc = qp->q.fragments && (end - start) > max;
96699@@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
96700
96701 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
96702 {
96703- struct ctl_table *table;
96704+ ctl_table_no_const *table = NULL;
96705 struct ctl_table_header *hdr;
96706
96707- table = ip4_frags_ns_ctl_table;
96708 if (!net_eq(net, &init_net)) {
96709- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
96710+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
96711 if (table == NULL)
96712 goto err_alloc;
96713
96714@@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
96715 /* Don't export sysctls to unprivileged users */
96716 if (net->user_ns != &init_user_ns)
96717 table[0].procname = NULL;
96718- }
96719+ hdr = register_net_sysctl(net, "net/ipv4", table);
96720+ } else
96721+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
96722
96723- hdr = register_net_sysctl(net, "net/ipv4", table);
96724 if (hdr == NULL)
96725 goto err_reg;
96726
96727@@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
96728 return 0;
96729
96730 err_reg:
96731- if (!net_eq(net, &init_net))
96732- kfree(table);
96733+ kfree(table);
96734 err_alloc:
96735 return -ENOMEM;
96736 }
96737diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
96738index d306360..1c1a1f1 100644
96739--- a/net/ipv4/ip_gre.c
96740+++ b/net/ipv4/ip_gre.c
96741@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
96742 module_param(log_ecn_error, bool, 0644);
96743 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
96744
96745-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
96746+static struct rtnl_link_ops ipgre_link_ops;
96747 static int ipgre_tunnel_init(struct net_device *dev);
96748
96749 static int ipgre_net_id __read_mostly;
96750@@ -732,7 +732,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
96751 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
96752 };
96753
96754-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
96755+static struct rtnl_link_ops ipgre_link_ops = {
96756 .kind = "gre",
96757 .maxtype = IFLA_GRE_MAX,
96758 .policy = ipgre_policy,
96759@@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
96760 .fill_info = ipgre_fill_info,
96761 };
96762
96763-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
96764+static struct rtnl_link_ops ipgre_tap_ops = {
96765 .kind = "gretap",
96766 .maxtype = IFLA_GRE_MAX,
96767 .policy = ipgre_policy,
96768diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
96769index ddf32a6..3fdeea9 100644
96770--- a/net/ipv4/ip_sockglue.c
96771+++ b/net/ipv4/ip_sockglue.c
96772@@ -1172,7 +1172,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
96773 len = min_t(unsigned int, len, opt->optlen);
96774 if (put_user(len, optlen))
96775 return -EFAULT;
96776- if (copy_to_user(optval, opt->__data, len))
96777+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
96778+ copy_to_user(optval, opt->__data, len))
96779 return -EFAULT;
96780 return 0;
96781 }
96782@@ -1303,7 +1304,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
96783 if (sk->sk_type != SOCK_STREAM)
96784 return -ENOPROTOOPT;
96785
96786- msg.msg_control = optval;
96787+ msg.msg_control = (void __force_kernel *)optval;
96788 msg.msg_controllen = len;
96789 msg.msg_flags = flags;
96790
96791diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
96792index 52b802a..b725179 100644
96793--- a/net/ipv4/ip_vti.c
96794+++ b/net/ipv4/ip_vti.c
96795@@ -44,7 +44,7 @@
96796 #include <net/net_namespace.h>
96797 #include <net/netns/generic.h>
96798
96799-static struct rtnl_link_ops vti_link_ops __read_mostly;
96800+static struct rtnl_link_ops vti_link_ops;
96801
96802 static int vti_net_id __read_mostly;
96803 static int vti_tunnel_init(struct net_device *dev);
96804@@ -360,7 +360,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
96805 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
96806 };
96807
96808-static struct rtnl_link_ops vti_link_ops __read_mostly = {
96809+static struct rtnl_link_ops vti_link_ops = {
96810 .kind = "vti",
96811 .maxtype = IFLA_VTI_MAX,
96812 .policy = vti_policy,
96813diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
96814index efa1138..20dbba0 100644
96815--- a/net/ipv4/ipconfig.c
96816+++ b/net/ipv4/ipconfig.c
96817@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
96818
96819 mm_segment_t oldfs = get_fs();
96820 set_fs(get_ds());
96821- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
96822+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
96823 set_fs(oldfs);
96824 return res;
96825 }
96826@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
96827
96828 mm_segment_t oldfs = get_fs();
96829 set_fs(get_ds());
96830- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
96831+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
96832 set_fs(oldfs);
96833 return res;
96834 }
96835@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
96836
96837 mm_segment_t oldfs = get_fs();
96838 set_fs(get_ds());
96839- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
96840+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
96841 set_fs(oldfs);
96842 return res;
96843 }
96844diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
96845index fe3e9f7..4956990 100644
96846--- a/net/ipv4/ipip.c
96847+++ b/net/ipv4/ipip.c
96848@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
96849 static int ipip_net_id __read_mostly;
96850
96851 static int ipip_tunnel_init(struct net_device *dev);
96852-static struct rtnl_link_ops ipip_link_ops __read_mostly;
96853+static struct rtnl_link_ops ipip_link_ops;
96854
96855 static int ipip_err(struct sk_buff *skb, u32 info)
96856 {
96857@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
96858 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
96859 };
96860
96861-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
96862+static struct rtnl_link_ops ipip_link_ops = {
96863 .kind = "ipip",
96864 .maxtype = IFLA_IPTUN_MAX,
96865 .policy = ipip_policy,
96866diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
96867index 59da7cd..e318de1 100644
96868--- a/net/ipv4/netfilter/arp_tables.c
96869+++ b/net/ipv4/netfilter/arp_tables.c
96870@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
96871 #endif
96872
96873 static int get_info(struct net *net, void __user *user,
96874- const int *len, int compat)
96875+ int len, int compat)
96876 {
96877 char name[XT_TABLE_MAXNAMELEN];
96878 struct xt_table *t;
96879 int ret;
96880
96881- if (*len != sizeof(struct arpt_getinfo)) {
96882- duprintf("length %u != %Zu\n", *len,
96883+ if (len != sizeof(struct arpt_getinfo)) {
96884+ duprintf("length %u != %Zu\n", len,
96885 sizeof(struct arpt_getinfo));
96886 return -EINVAL;
96887 }
96888@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
96889 info.size = private->size;
96890 strcpy(info.name, name);
96891
96892- if (copy_to_user(user, &info, *len) != 0)
96893+ if (copy_to_user(user, &info, len) != 0)
96894 ret = -EFAULT;
96895 else
96896 ret = 0;
96897@@ -1688,7 +1688,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
96898
96899 switch (cmd) {
96900 case ARPT_SO_GET_INFO:
96901- ret = get_info(sock_net(sk), user, len, 1);
96902+ ret = get_info(sock_net(sk), user, *len, 1);
96903 break;
96904 case ARPT_SO_GET_ENTRIES:
96905 ret = compat_get_entries(sock_net(sk), user, len);
96906@@ -1733,7 +1733,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
96907
96908 switch (cmd) {
96909 case ARPT_SO_GET_INFO:
96910- ret = get_info(sock_net(sk), user, len, 0);
96911+ ret = get_info(sock_net(sk), user, *len, 0);
96912 break;
96913
96914 case ARPT_SO_GET_ENTRIES:
96915diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
96916index 718dfbd..cef4152 100644
96917--- a/net/ipv4/netfilter/ip_tables.c
96918+++ b/net/ipv4/netfilter/ip_tables.c
96919@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
96920 #endif
96921
96922 static int get_info(struct net *net, void __user *user,
96923- const int *len, int compat)
96924+ int len, int compat)
96925 {
96926 char name[XT_TABLE_MAXNAMELEN];
96927 struct xt_table *t;
96928 int ret;
96929
96930- if (*len != sizeof(struct ipt_getinfo)) {
96931- duprintf("length %u != %zu\n", *len,
96932+ if (len != sizeof(struct ipt_getinfo)) {
96933+ duprintf("length %u != %zu\n", len,
96934 sizeof(struct ipt_getinfo));
96935 return -EINVAL;
96936 }
96937@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
96938 info.size = private->size;
96939 strcpy(info.name, name);
96940
96941- if (copy_to_user(user, &info, *len) != 0)
96942+ if (copy_to_user(user, &info, len) != 0)
96943 ret = -EFAULT;
96944 else
96945 ret = 0;
96946@@ -1971,7 +1971,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
96947
96948 switch (cmd) {
96949 case IPT_SO_GET_INFO:
96950- ret = get_info(sock_net(sk), user, len, 1);
96951+ ret = get_info(sock_net(sk), user, *len, 1);
96952 break;
96953 case IPT_SO_GET_ENTRIES:
96954 ret = compat_get_entries(sock_net(sk), user, len);
96955@@ -2018,7 +2018,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
96956
96957 switch (cmd) {
96958 case IPT_SO_GET_INFO:
96959- ret = get_info(sock_net(sk), user, len, 0);
96960+ ret = get_info(sock_net(sk), user, *len, 0);
96961 break;
96962
96963 case IPT_SO_GET_ENTRIES:
96964diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
96965index 242e7f4..a084e95 100644
96966--- a/net/ipv4/ping.c
96967+++ b/net/ipv4/ping.c
96968@@ -55,7 +55,7 @@
96969
96970
96971 struct ping_table ping_table;
96972-struct pingv6_ops pingv6_ops;
96973+struct pingv6_ops *pingv6_ops;
96974 EXPORT_SYMBOL_GPL(pingv6_ops);
96975
96976 static u16 ping_port_rover;
96977@@ -334,7 +334,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
96978 return -ENODEV;
96979 }
96980 }
96981- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
96982+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
96983 scoped);
96984 rcu_read_unlock();
96985
96986@@ -542,7 +542,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
96987 }
96988 #if IS_ENABLED(CONFIG_IPV6)
96989 } else if (skb->protocol == htons(ETH_P_IPV6)) {
96990- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
96991+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
96992 #endif
96993 }
96994
96995@@ -560,7 +560,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
96996 info, (u8 *)icmph);
96997 #if IS_ENABLED(CONFIG_IPV6)
96998 } else if (family == AF_INET6) {
96999- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
97000+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
97001 info, (u8 *)icmph);
97002 #endif
97003 }
97004@@ -830,6 +830,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97005 {
97006 struct inet_sock *isk = inet_sk(sk);
97007 int family = sk->sk_family;
97008+ struct sockaddr_in *sin;
97009+ struct sockaddr_in6 *sin6;
97010 struct sk_buff *skb;
97011 int copied, err;
97012
97013@@ -839,12 +841,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97014 if (flags & MSG_OOB)
97015 goto out;
97016
97017+ if (addr_len) {
97018+ if (family == AF_INET)
97019+ *addr_len = sizeof(*sin);
97020+ else if (family == AF_INET6 && addr_len)
97021+ *addr_len = sizeof(*sin6);
97022+ }
97023+
97024 if (flags & MSG_ERRQUEUE) {
97025 if (family == AF_INET) {
97026 return ip_recv_error(sk, msg, len, addr_len);
97027 #if IS_ENABLED(CONFIG_IPV6)
97028 } else if (family == AF_INET6) {
97029- return pingv6_ops.ipv6_recv_error(sk, msg, len,
97030+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
97031 addr_len);
97032 #endif
97033 }
97034@@ -876,7 +885,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97035 sin->sin_port = 0 /* skb->h.uh->source */;
97036 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97037 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
97038- *addr_len = sizeof(*sin);
97039 }
97040
97041 if (isk->cmsg_flags)
97042@@ -899,11 +907,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97043 sin6->sin6_scope_id =
97044 ipv6_iface_scope_id(&sin6->sin6_addr,
97045 IP6CB(skb)->iif);
97046- *addr_len = sizeof(*sin6);
97047 }
97048
97049 if (inet6_sk(sk)->rxopt.all)
97050- pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
97051+ pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
97052 #endif
97053 } else {
97054 BUG();
97055@@ -1093,7 +1100,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
97056 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
97057 0, sock_i_ino(sp),
97058 atomic_read(&sp->sk_refcnt), sp,
97059- atomic_read(&sp->sk_drops));
97060+ atomic_read_unchecked(&sp->sk_drops));
97061 }
97062
97063 static int ping_v4_seq_show(struct seq_file *seq, void *v)
97064diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
97065index 23c3e5b..cdb8b36 100644
97066--- a/net/ipv4/raw.c
97067+++ b/net/ipv4/raw.c
97068@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
97069 int raw_rcv(struct sock *sk, struct sk_buff *skb)
97070 {
97071 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
97072- atomic_inc(&sk->sk_drops);
97073+ atomic_inc_unchecked(&sk->sk_drops);
97074 kfree_skb(skb);
97075 return NET_RX_DROP;
97076 }
97077@@ -696,6 +696,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97078 if (flags & MSG_OOB)
97079 goto out;
97080
97081+ if (addr_len)
97082+ *addr_len = sizeof(*sin);
97083+
97084 if (flags & MSG_ERRQUEUE) {
97085 err = ip_recv_error(sk, msg, len, addr_len);
97086 goto out;
97087@@ -723,7 +726,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97088 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97089 sin->sin_port = 0;
97090 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
97091- *addr_len = sizeof(*sin);
97092 }
97093 if (inet->cmsg_flags)
97094 ip_cmsg_recv(msg, skb);
97095@@ -748,16 +750,20 @@ static int raw_init(struct sock *sk)
97096
97097 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
97098 {
97099+ struct icmp_filter filter;
97100+
97101 if (optlen > sizeof(struct icmp_filter))
97102 optlen = sizeof(struct icmp_filter);
97103- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
97104+ if (copy_from_user(&filter, optval, optlen))
97105 return -EFAULT;
97106+ raw_sk(sk)->filter = filter;
97107 return 0;
97108 }
97109
97110 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
97111 {
97112 int len, ret = -EFAULT;
97113+ struct icmp_filter filter;
97114
97115 if (get_user(len, optlen))
97116 goto out;
97117@@ -767,8 +773,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
97118 if (len > sizeof(struct icmp_filter))
97119 len = sizeof(struct icmp_filter);
97120 ret = -EFAULT;
97121- if (put_user(len, optlen) ||
97122- copy_to_user(optval, &raw_sk(sk)->filter, len))
97123+ filter = raw_sk(sk)->filter;
97124+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
97125 goto out;
97126 ret = 0;
97127 out: return ret;
97128@@ -997,7 +1003,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
97129 0, 0L, 0,
97130 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
97131 0, sock_i_ino(sp),
97132- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
97133+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
97134 }
97135
97136 static int raw_seq_show(struct seq_file *seq, void *v)
97137diff --git a/net/ipv4/route.c b/net/ipv4/route.c
97138index f8da282..133a1c7 100644
97139--- a/net/ipv4/route.c
97140+++ b/net/ipv4/route.c
97141@@ -2621,34 +2621,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
97142 .maxlen = sizeof(int),
97143 .mode = 0200,
97144 .proc_handler = ipv4_sysctl_rtcache_flush,
97145+ .extra1 = &init_net,
97146 },
97147 { },
97148 };
97149
97150 static __net_init int sysctl_route_net_init(struct net *net)
97151 {
97152- struct ctl_table *tbl;
97153+ ctl_table_no_const *tbl = NULL;
97154
97155- tbl = ipv4_route_flush_table;
97156 if (!net_eq(net, &init_net)) {
97157- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
97158+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
97159 if (tbl == NULL)
97160 goto err_dup;
97161
97162 /* Don't export sysctls to unprivileged users */
97163 if (net->user_ns != &init_user_ns)
97164 tbl[0].procname = NULL;
97165- }
97166- tbl[0].extra1 = net;
97167+ tbl[0].extra1 = net;
97168+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
97169+ } else
97170+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
97171
97172- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
97173 if (net->ipv4.route_hdr == NULL)
97174 goto err_reg;
97175 return 0;
97176
97177 err_reg:
97178- if (tbl != ipv4_route_flush_table)
97179- kfree(tbl);
97180+ kfree(tbl);
97181 err_dup:
97182 return -ENOMEM;
97183 }
97184@@ -2671,8 +2671,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
97185
97186 static __net_init int rt_genid_init(struct net *net)
97187 {
97188- atomic_set(&net->ipv4.rt_genid, 0);
97189- atomic_set(&net->fnhe_genid, 0);
97190+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
97191+ atomic_set_unchecked(&net->fnhe_genid, 0);
97192 get_random_bytes(&net->ipv4.dev_addr_genid,
97193 sizeof(net->ipv4.dev_addr_genid));
97194 return 0;
97195diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
97196index 3d69ec8..57207b4 100644
97197--- a/net/ipv4/sysctl_net_ipv4.c
97198+++ b/net/ipv4/sysctl_net_ipv4.c
97199@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
97200 container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
97201 int ret;
97202 int range[2];
97203- struct ctl_table tmp = {
97204+ ctl_table_no_const tmp = {
97205 .data = &range,
97206 .maxlen = sizeof(range),
97207 .mode = table->mode,
97208@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
97209 int ret;
97210 gid_t urange[2];
97211 kgid_t low, high;
97212- struct ctl_table tmp = {
97213+ ctl_table_no_const tmp = {
97214 .data = &urange,
97215 .maxlen = sizeof(urange),
97216 .mode = table->mode,
97217@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
97218 void __user *buffer, size_t *lenp, loff_t *ppos)
97219 {
97220 char val[TCP_CA_NAME_MAX];
97221- struct ctl_table tbl = {
97222+ ctl_table_no_const tbl = {
97223 .data = val,
97224 .maxlen = TCP_CA_NAME_MAX,
97225 };
97226@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
97227 void __user *buffer, size_t *lenp,
97228 loff_t *ppos)
97229 {
97230- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
97231+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
97232 int ret;
97233
97234 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
97235@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
97236 void __user *buffer, size_t *lenp,
97237 loff_t *ppos)
97238 {
97239- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
97240+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
97241 int ret;
97242
97243 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
97244@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
97245 void __user *buffer, size_t *lenp,
97246 loff_t *ppos)
97247 {
97248- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
97249+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
97250 struct tcp_fastopen_context *ctxt;
97251 int ret;
97252 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
97253@@ -445,7 +445,7 @@ static struct ctl_table ipv4_table[] = {
97254 },
97255 {
97256 .procname = "ip_local_reserved_ports",
97257- .data = NULL, /* initialized in sysctl_ipv4_init */
97258+ .data = sysctl_local_reserved_ports,
97259 .maxlen = 65536,
97260 .mode = 0644,
97261 .proc_handler = proc_do_large_bitmap,
97262@@ -827,13 +827,12 @@ static struct ctl_table ipv4_net_table[] = {
97263
97264 static __net_init int ipv4_sysctl_init_net(struct net *net)
97265 {
97266- struct ctl_table *table;
97267+ ctl_table_no_const *table = NULL;
97268
97269- table = ipv4_net_table;
97270 if (!net_eq(net, &init_net)) {
97271 int i;
97272
97273- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
97274+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
97275 if (table == NULL)
97276 goto err_alloc;
97277
97278@@ -856,15 +855,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
97279 net->ipv4.sysctl_local_ports.range[0] = 32768;
97280 net->ipv4.sysctl_local_ports.range[1] = 61000;
97281
97282- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
97283+ if (!net_eq(net, &init_net))
97284+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
97285+ else
97286+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
97287 if (net->ipv4.ipv4_hdr == NULL)
97288 goto err_reg;
97289
97290 return 0;
97291
97292 err_reg:
97293- if (!net_eq(net, &init_net))
97294- kfree(table);
97295+ kfree(table);
97296 err_alloc:
97297 return -ENOMEM;
97298 }
97299@@ -886,16 +887,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
97300 static __init int sysctl_ipv4_init(void)
97301 {
97302 struct ctl_table_header *hdr;
97303- struct ctl_table *i;
97304-
97305- for (i = ipv4_table; i->procname; i++) {
97306- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
97307- i->data = sysctl_local_reserved_ports;
97308- break;
97309- }
97310- }
97311- if (!i->procname)
97312- return -EINVAL;
97313
97314 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
97315 if (hdr == NULL)
97316diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
97317index c53b7f3..a89aadd 100644
97318--- a/net/ipv4/tcp_input.c
97319+++ b/net/ipv4/tcp_input.c
97320@@ -759,7 +759,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
97321 * without any lock. We want to make sure compiler wont store
97322 * intermediate values in this location.
97323 */
97324- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
97325+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
97326 sk->sk_max_pacing_rate);
97327 }
97328
97329@@ -4482,7 +4482,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
97330 * simplifies code)
97331 */
97332 static void
97333-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
97334+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
97335 struct sk_buff *head, struct sk_buff *tail,
97336 u32 start, u32 end)
97337 {
97338@@ -5559,6 +5559,7 @@ discard:
97339 tcp_paws_reject(&tp->rx_opt, 0))
97340 goto discard_and_undo;
97341
97342+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
97343 if (th->syn) {
97344 /* We see SYN without ACK. It is attempt of
97345 * simultaneous connect with crossed SYNs.
97346@@ -5609,6 +5610,7 @@ discard:
97347 goto discard;
97348 #endif
97349 }
97350+#endif
97351 /* "fifth, if neither of the SYN or RST bits is set then
97352 * drop the segment and return."
97353 */
97354@@ -5655,7 +5657,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
97355 goto discard;
97356
97357 if (th->syn) {
97358- if (th->fin)
97359+ if (th->fin || th->urg || th->psh)
97360 goto discard;
97361 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
97362 return 1;
97363diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
97364index 0672139..cacc17d 100644
97365--- a/net/ipv4/tcp_ipv4.c
97366+++ b/net/ipv4/tcp_ipv4.c
97367@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
97368 EXPORT_SYMBOL(sysctl_tcp_low_latency);
97369
97370
97371+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97372+extern int grsec_enable_blackhole;
97373+#endif
97374+
97375 #ifdef CONFIG_TCP_MD5SIG
97376 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
97377 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97378@@ -1830,6 +1834,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
97379 return 0;
97380
97381 reset:
97382+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97383+ if (!grsec_enable_blackhole)
97384+#endif
97385 tcp_v4_send_reset(rsk, skb);
97386 discard:
97387 kfree_skb(skb);
97388@@ -1975,12 +1982,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
97389 TCP_SKB_CB(skb)->sacked = 0;
97390
97391 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
97392- if (!sk)
97393+ if (!sk) {
97394+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97395+ ret = 1;
97396+#endif
97397 goto no_tcp_socket;
97398-
97399+ }
97400 process:
97401- if (sk->sk_state == TCP_TIME_WAIT)
97402+ if (sk->sk_state == TCP_TIME_WAIT) {
97403+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97404+ ret = 2;
97405+#endif
97406 goto do_time_wait;
97407+ }
97408
97409 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
97410 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
97411@@ -2034,6 +2048,10 @@ csum_error:
97412 bad_packet:
97413 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
97414 } else {
97415+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97416+ if (!grsec_enable_blackhole || (ret == 1 &&
97417+ (skb->dev->flags & IFF_LOOPBACK)))
97418+#endif
97419 tcp_v4_send_reset(NULL, skb);
97420 }
97421
97422diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
97423index 97b6841..0893357 100644
97424--- a/net/ipv4/tcp_minisocks.c
97425+++ b/net/ipv4/tcp_minisocks.c
97426@@ -27,6 +27,10 @@
97427 #include <net/inet_common.h>
97428 #include <net/xfrm.h>
97429
97430+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97431+extern int grsec_enable_blackhole;
97432+#endif
97433+
97434 int sysctl_tcp_syncookies __read_mostly = 1;
97435 EXPORT_SYMBOL(sysctl_tcp_syncookies);
97436
97437@@ -708,7 +712,10 @@ embryonic_reset:
97438 * avoid becoming vulnerable to outside attack aiming at
97439 * resetting legit local connections.
97440 */
97441- req->rsk_ops->send_reset(sk, skb);
97442+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97443+ if (!grsec_enable_blackhole)
97444+#endif
97445+ req->rsk_ops->send_reset(sk, skb);
97446 } else if (fastopen) { /* received a valid RST pkt */
97447 reqsk_fastopen_remove(sk, req, true);
97448 tcp_reset(sk);
97449diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
97450index 8b97d71..9d7ccf5 100644
97451--- a/net/ipv4/tcp_probe.c
97452+++ b/net/ipv4/tcp_probe.c
97453@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
97454 if (cnt + width >= len)
97455 break;
97456
97457- if (copy_to_user(buf + cnt, tbuf, width))
97458+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
97459 return -EFAULT;
97460 cnt += width;
97461 }
97462diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
97463index 64f0354..a81b39d 100644
97464--- a/net/ipv4/tcp_timer.c
97465+++ b/net/ipv4/tcp_timer.c
97466@@ -22,6 +22,10 @@
97467 #include <linux/gfp.h>
97468 #include <net/tcp.h>
97469
97470+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97471+extern int grsec_lastack_retries;
97472+#endif
97473+
97474 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
97475 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
97476 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
97477@@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock *sk)
97478 }
97479 }
97480
97481+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97482+ if ((sk->sk_state == TCP_LAST_ACK) &&
97483+ (grsec_lastack_retries > 0) &&
97484+ (grsec_lastack_retries < retry_until))
97485+ retry_until = grsec_lastack_retries;
97486+#endif
97487+
97488 if (retransmits_timed_out(sk, retry_until,
97489 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
97490 /* Has it gone just too far? */
97491diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
97492index a7e4729..2758946 100644
97493--- a/net/ipv4/udp.c
97494+++ b/net/ipv4/udp.c
97495@@ -87,6 +87,7 @@
97496 #include <linux/types.h>
97497 #include <linux/fcntl.h>
97498 #include <linux/module.h>
97499+#include <linux/security.h>
97500 #include <linux/socket.h>
97501 #include <linux/sockios.h>
97502 #include <linux/igmp.h>
97503@@ -113,6 +114,10 @@
97504 #include <net/busy_poll.h>
97505 #include "udp_impl.h"
97506
97507+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97508+extern int grsec_enable_blackhole;
97509+#endif
97510+
97511 struct udp_table udp_table __read_mostly;
97512 EXPORT_SYMBOL(udp_table);
97513
97514@@ -615,6 +620,9 @@ found:
97515 return s;
97516 }
97517
97518+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
97519+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
97520+
97521 /*
97522 * This routine is called by the ICMP module when it gets some
97523 * sort of error condition. If err < 0 then the socket should
97524@@ -914,9 +922,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97525 dport = usin->sin_port;
97526 if (dport == 0)
97527 return -EINVAL;
97528+
97529+ err = gr_search_udp_sendmsg(sk, usin);
97530+ if (err)
97531+ return err;
97532 } else {
97533 if (sk->sk_state != TCP_ESTABLISHED)
97534 return -EDESTADDRREQ;
97535+
97536+ err = gr_search_udp_sendmsg(sk, NULL);
97537+ if (err)
97538+ return err;
97539+
97540 daddr = inet->inet_daddr;
97541 dport = inet->inet_dport;
97542 /* Open fast path for connected socket.
97543@@ -1163,7 +1180,7 @@ static unsigned int first_packet_length(struct sock *sk)
97544 IS_UDPLITE(sk));
97545 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
97546 IS_UDPLITE(sk));
97547- atomic_inc(&sk->sk_drops);
97548+ atomic_inc_unchecked(&sk->sk_drops);
97549 __skb_unlink(skb, rcvq);
97550 __skb_queue_tail(&list_kill, skb);
97551 }
97552@@ -1234,6 +1251,12 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97553 int is_udplite = IS_UDPLITE(sk);
97554 bool slow;
97555
97556+ /*
97557+ * Check any passed addresses
97558+ */
97559+ if (addr_len)
97560+ *addr_len = sizeof(*sin);
97561+
97562 if (flags & MSG_ERRQUEUE)
97563 return ip_recv_error(sk, msg, len, addr_len);
97564
97565@@ -1243,6 +1266,10 @@ try_again:
97566 if (!skb)
97567 goto out;
97568
97569+ err = gr_search_udp_recvmsg(sk, skb);
97570+ if (err)
97571+ goto out_free;
97572+
97573 ulen = skb->len - sizeof(struct udphdr);
97574 copied = len;
97575 if (copied > ulen)
97576@@ -1276,7 +1303,7 @@ try_again:
97577 if (unlikely(err)) {
97578 trace_kfree_skb(skb, udp_recvmsg);
97579 if (!peeked) {
97580- atomic_inc(&sk->sk_drops);
97581+ atomic_inc_unchecked(&sk->sk_drops);
97582 UDP_INC_STATS_USER(sock_net(sk),
97583 UDP_MIB_INERRORS, is_udplite);
97584 }
97585@@ -1295,7 +1322,6 @@ try_again:
97586 sin->sin_port = udp_hdr(skb)->source;
97587 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97588 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
97589- *addr_len = sizeof(*sin);
97590 }
97591 if (inet->cmsg_flags)
97592 ip_cmsg_recv(msg, skb);
97593@@ -1566,7 +1592,7 @@ csum_error:
97594 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
97595 drop:
97596 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
97597- atomic_inc(&sk->sk_drops);
97598+ atomic_inc_unchecked(&sk->sk_drops);
97599 kfree_skb(skb);
97600 return -1;
97601 }
97602@@ -1585,7 +1611,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
97603 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
97604
97605 if (!skb1) {
97606- atomic_inc(&sk->sk_drops);
97607+ atomic_inc_unchecked(&sk->sk_drops);
97608 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
97609 IS_UDPLITE(sk));
97610 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
97611@@ -1786,6 +1812,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
97612 goto csum_error;
97613
97614 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
97615+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97616+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
97617+#endif
97618 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
97619
97620 /*
97621@@ -2350,7 +2379,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
97622 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
97623 0, sock_i_ino(sp),
97624 atomic_read(&sp->sk_refcnt), sp,
97625- atomic_read(&sp->sk_drops));
97626+ atomic_read_unchecked(&sp->sk_drops));
97627 }
97628
97629 int udp4_seq_show(struct seq_file *seq, void *v)
97630diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
97631index e1a6393..f634ce5 100644
97632--- a/net/ipv4/xfrm4_policy.c
97633+++ b/net/ipv4/xfrm4_policy.c
97634@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
97635 fl4->flowi4_tos = iph->tos;
97636 }
97637
97638-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
97639+static int xfrm4_garbage_collect(struct dst_ops *ops)
97640 {
97641 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
97642
97643- xfrm4_policy_afinfo.garbage_collect(net);
97644+ xfrm_garbage_collect_deferred(net);
97645 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
97646 }
97647
97648@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
97649
97650 static int __net_init xfrm4_net_init(struct net *net)
97651 {
97652- struct ctl_table *table;
97653+ ctl_table_no_const *table = NULL;
97654 struct ctl_table_header *hdr;
97655
97656- table = xfrm4_policy_table;
97657 if (!net_eq(net, &init_net)) {
97658- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
97659+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
97660 if (!table)
97661 goto err_alloc;
97662
97663 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
97664- }
97665-
97666- hdr = register_net_sysctl(net, "net/ipv4", table);
97667+ hdr = register_net_sysctl(net, "net/ipv4", table);
97668+ } else
97669+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
97670 if (!hdr)
97671 goto err_reg;
97672
97673@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
97674 return 0;
97675
97676 err_reg:
97677- if (!net_eq(net, &init_net))
97678- kfree(table);
97679+ kfree(table);
97680 err_alloc:
97681 return -ENOMEM;
97682 }
97683diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
97684index 4b6b720..272c0c5 100644
97685--- a/net/ipv6/addrconf.c
97686+++ b/net/ipv6/addrconf.c
97687@@ -589,7 +589,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
97688 idx = 0;
97689 head = &net->dev_index_head[h];
97690 rcu_read_lock();
97691- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
97692+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
97693 net->dev_base_seq;
97694 hlist_for_each_entry_rcu(dev, head, index_hlist) {
97695 if (idx < s_idx)
97696@@ -2334,7 +2334,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
97697 p.iph.ihl = 5;
97698 p.iph.protocol = IPPROTO_IPV6;
97699 p.iph.ttl = 64;
97700- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
97701+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
97702
97703 if (ops->ndo_do_ioctl) {
97704 mm_segment_t oldfs = get_fs();
97705@@ -3962,7 +3962,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
97706 s_ip_idx = ip_idx = cb->args[2];
97707
97708 rcu_read_lock();
97709- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
97710+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
97711 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
97712 idx = 0;
97713 head = &net->dev_index_head[h];
97714@@ -4569,7 +4569,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
97715 dst_free(&ifp->rt->dst);
97716 break;
97717 }
97718- atomic_inc(&net->ipv6.dev_addr_genid);
97719+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
97720 rt_genid_bump_ipv6(net);
97721 }
97722
97723@@ -4590,7 +4590,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
97724 int *valp = ctl->data;
97725 int val = *valp;
97726 loff_t pos = *ppos;
97727- struct ctl_table lctl;
97728+ ctl_table_no_const lctl;
97729 int ret;
97730
97731 /*
97732@@ -4675,7 +4675,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
97733 int *valp = ctl->data;
97734 int val = *valp;
97735 loff_t pos = *ppos;
97736- struct ctl_table lctl;
97737+ ctl_table_no_const lctl;
97738 int ret;
97739
97740 /*
97741diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
97742index 4fbdb70..f6411f2 100644
97743--- a/net/ipv6/af_inet6.c
97744+++ b/net/ipv6/af_inet6.c
97745@@ -776,7 +776,7 @@ static int __net_init inet6_net_init(struct net *net)
97746
97747 net->ipv6.sysctl.bindv6only = 0;
97748 net->ipv6.sysctl.icmpv6_time = 1*HZ;
97749- atomic_set(&net->ipv6.rt_genid, 0);
97750+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
97751
97752 err = ipv6_init_mibs(net);
97753 if (err)
97754diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
97755index 93b1aa3..e902855 100644
97756--- a/net/ipv6/datagram.c
97757+++ b/net/ipv6/datagram.c
97758@@ -906,5 +906,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
97759 0,
97760 sock_i_ino(sp),
97761 atomic_read(&sp->sk_refcnt), sp,
97762- atomic_read(&sp->sk_drops));
97763+ atomic_read_unchecked(&sp->sk_drops));
97764 }
97765diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
97766index eef8d94..cfa1852 100644
97767--- a/net/ipv6/icmp.c
97768+++ b/net/ipv6/icmp.c
97769@@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_template[] = {
97770
97771 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
97772 {
97773- struct ctl_table *table;
97774+ ctl_table_no_const *table;
97775
97776 table = kmemdup(ipv6_icmp_table_template,
97777 sizeof(ipv6_icmp_table_template),
97778diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
97779index 8acb286..840dd06 100644
97780--- a/net/ipv6/ip6_gre.c
97781+++ b/net/ipv6/ip6_gre.c
97782@@ -74,7 +74,7 @@ struct ip6gre_net {
97783 struct net_device *fb_tunnel_dev;
97784 };
97785
97786-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
97787+static struct rtnl_link_ops ip6gre_link_ops;
97788 static int ip6gre_tunnel_init(struct net_device *dev);
97789 static void ip6gre_tunnel_setup(struct net_device *dev);
97790 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
97791@@ -1294,7 +1294,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
97792 }
97793
97794
97795-static struct inet6_protocol ip6gre_protocol __read_mostly = {
97796+static struct inet6_protocol ip6gre_protocol = {
97797 .handler = ip6gre_rcv,
97798 .err_handler = ip6gre_err,
97799 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
97800@@ -1637,7 +1637,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
97801 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
97802 };
97803
97804-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
97805+static struct rtnl_link_ops ip6gre_link_ops = {
97806 .kind = "ip6gre",
97807 .maxtype = IFLA_GRE_MAX,
97808 .policy = ip6gre_policy,
97809@@ -1650,7 +1650,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
97810 .fill_info = ip6gre_fill_info,
97811 };
97812
97813-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
97814+static struct rtnl_link_ops ip6gre_tap_ops = {
97815 .kind = "ip6gretap",
97816 .maxtype = IFLA_GRE_MAX,
97817 .policy = ip6gre_policy,
97818diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
97819index 7881965..9cf62c4 100644
97820--- a/net/ipv6/ip6_tunnel.c
97821+++ b/net/ipv6/ip6_tunnel.c
97822@@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
97823
97824 static int ip6_tnl_dev_init(struct net_device *dev);
97825 static void ip6_tnl_dev_setup(struct net_device *dev);
97826-static struct rtnl_link_ops ip6_link_ops __read_mostly;
97827+static struct rtnl_link_ops ip6_link_ops;
97828
97829 static int ip6_tnl_net_id __read_mostly;
97830 struct ip6_tnl_net {
97831@@ -1717,7 +1717,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
97832 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
97833 };
97834
97835-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
97836+static struct rtnl_link_ops ip6_link_ops = {
97837 .kind = "ip6tnl",
97838 .maxtype = IFLA_IPTUN_MAX,
97839 .policy = ip6_tnl_policy,
97840diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
97841index 7b42d5e..1eff693 100644
97842--- a/net/ipv6/ip6_vti.c
97843+++ b/net/ipv6/ip6_vti.c
97844@@ -63,7 +63,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
97845
97846 static int vti6_dev_init(struct net_device *dev);
97847 static void vti6_dev_setup(struct net_device *dev);
97848-static struct rtnl_link_ops vti6_link_ops __read_mostly;
97849+static struct rtnl_link_ops vti6_link_ops;
97850
97851 static int vti6_net_id __read_mostly;
97852 struct vti6_net {
97853@@ -902,7 +902,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
97854 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
97855 };
97856
97857-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
97858+static struct rtnl_link_ops vti6_link_ops = {
97859 .kind = "vti6",
97860 .maxtype = IFLA_VTI_MAX,
97861 .policy = vti6_policy,
97862diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
97863index 1c6ce31..299e566 100644
97864--- a/net/ipv6/ipv6_sockglue.c
97865+++ b/net/ipv6/ipv6_sockglue.c
97866@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
97867 if (sk->sk_type != SOCK_STREAM)
97868 return -ENOPROTOOPT;
97869
97870- msg.msg_control = optval;
97871+ msg.msg_control = (void __force_kernel *)optval;
97872 msg.msg_controllen = len;
97873 msg.msg_flags = flags;
97874
97875diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
97876index 710238f..0fd1816 100644
97877--- a/net/ipv6/netfilter/ip6_tables.c
97878+++ b/net/ipv6/netfilter/ip6_tables.c
97879@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
97880 #endif
97881
97882 static int get_info(struct net *net, void __user *user,
97883- const int *len, int compat)
97884+ int len, int compat)
97885 {
97886 char name[XT_TABLE_MAXNAMELEN];
97887 struct xt_table *t;
97888 int ret;
97889
97890- if (*len != sizeof(struct ip6t_getinfo)) {
97891- duprintf("length %u != %zu\n", *len,
97892+ if (len != sizeof(struct ip6t_getinfo)) {
97893+ duprintf("length %u != %zu\n", len,
97894 sizeof(struct ip6t_getinfo));
97895 return -EINVAL;
97896 }
97897@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
97898 info.size = private->size;
97899 strcpy(info.name, name);
97900
97901- if (copy_to_user(user, &info, *len) != 0)
97902+ if (copy_to_user(user, &info, len) != 0)
97903 ret = -EFAULT;
97904 else
97905 ret = 0;
97906@@ -1981,7 +1981,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97907
97908 switch (cmd) {
97909 case IP6T_SO_GET_INFO:
97910- ret = get_info(sock_net(sk), user, len, 1);
97911+ ret = get_info(sock_net(sk), user, *len, 1);
97912 break;
97913 case IP6T_SO_GET_ENTRIES:
97914 ret = compat_get_entries(sock_net(sk), user, len);
97915@@ -2028,7 +2028,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97916
97917 switch (cmd) {
97918 case IP6T_SO_GET_INFO:
97919- ret = get_info(sock_net(sk), user, len, 0);
97920+ ret = get_info(sock_net(sk), user, *len, 0);
97921 break;
97922
97923 case IP6T_SO_GET_ENTRIES:
97924diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
97925index 767ab8d..c5ec70a 100644
97926--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
97927+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
97928@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
97929
97930 static int nf_ct_frag6_sysctl_register(struct net *net)
97931 {
97932- struct ctl_table *table;
97933+ ctl_table_no_const *table = NULL;
97934 struct ctl_table_header *hdr;
97935
97936- table = nf_ct_frag6_sysctl_table;
97937 if (!net_eq(net, &init_net)) {
97938- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
97939+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
97940 GFP_KERNEL);
97941 if (table == NULL)
97942 goto err_alloc;
97943@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
97944 table[0].data = &net->nf_frag.frags.timeout;
97945 table[1].data = &net->nf_frag.frags.low_thresh;
97946 table[2].data = &net->nf_frag.frags.high_thresh;
97947- }
97948-
97949- hdr = register_net_sysctl(net, "net/netfilter", table);
97950+ hdr = register_net_sysctl(net, "net/netfilter", table);
97951+ } else
97952+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
97953 if (hdr == NULL)
97954 goto err_reg;
97955
97956@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
97957 return 0;
97958
97959 err_reg:
97960- if (!net_eq(net, &init_net))
97961- kfree(table);
97962+ kfree(table);
97963 err_alloc:
97964 return -ENOMEM;
97965 }
97966diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
97967index 827f795..7e28e82 100644
97968--- a/net/ipv6/output_core.c
97969+++ b/net/ipv6/output_core.c
97970@@ -9,8 +9,8 @@
97971
97972 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
97973 {
97974- static atomic_t ipv6_fragmentation_id;
97975- int old, new;
97976+ static atomic_unchecked_t ipv6_fragmentation_id;
97977+ int id;
97978
97979 #if IS_ENABLED(CONFIG_IPV6)
97980 if (rt && !(rt->dst.flags & DST_NOPEER)) {
97981@@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
97982 }
97983 }
97984 #endif
97985- do {
97986- old = atomic_read(&ipv6_fragmentation_id);
97987- new = old + 1;
97988- if (!new)
97989- new = 1;
97990- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
97991- fhdr->identification = htonl(new);
97992+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
97993+ if (!id)
97994+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
97995+ fhdr->identification = htonl(id);
97996 }
97997 EXPORT_SYMBOL(ipv6_select_ident);
97998
97999diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
98000index a83243c..a1ca589 100644
98001--- a/net/ipv6/ping.c
98002+++ b/net/ipv6/ping.c
98003@@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_net_ops = {
98004 };
98005 #endif
98006
98007+static struct pingv6_ops real_pingv6_ops = {
98008+ .ipv6_recv_error = ipv6_recv_error,
98009+ .ip6_datagram_recv_ctl = ip6_datagram_recv_ctl,
98010+ .icmpv6_err_convert = icmpv6_err_convert,
98011+ .ipv6_icmp_error = ipv6_icmp_error,
98012+ .ipv6_chk_addr = ipv6_chk_addr,
98013+};
98014+
98015+static struct pingv6_ops dummy_pingv6_ops = {
98016+ .ipv6_recv_error = dummy_ipv6_recv_error,
98017+ .ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl,
98018+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
98019+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
98020+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
98021+};
98022+
98023 int __init pingv6_init(void)
98024 {
98025 #ifdef CONFIG_PROC_FS
98026@@ -253,11 +269,7 @@ int __init pingv6_init(void)
98027 if (ret)
98028 return ret;
98029 #endif
98030- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
98031- pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
98032- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
98033- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
98034- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
98035+ pingv6_ops = &real_pingv6_ops;
98036 return inet6_register_protosw(&pingv6_protosw);
98037 }
98038
98039@@ -266,11 +278,7 @@ int __init pingv6_init(void)
98040 */
98041 void pingv6_exit(void)
98042 {
98043- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
98044- pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
98045- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
98046- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
98047- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
98048+ pingv6_ops = &dummy_pingv6_ops;
98049 #ifdef CONFIG_PROC_FS
98050 unregister_pernet_subsys(&ping_v6_net_ops);
98051 #endif
98052diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
98053index b6bb87e..06cc9ed 100644
98054--- a/net/ipv6/raw.c
98055+++ b/net/ipv6/raw.c
98056@@ -384,7 +384,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
98057 {
98058 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
98059 skb_checksum_complete(skb)) {
98060- atomic_inc(&sk->sk_drops);
98061+ atomic_inc_unchecked(&sk->sk_drops);
98062 kfree_skb(skb);
98063 return NET_RX_DROP;
98064 }
98065@@ -412,7 +412,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
98066 struct raw6_sock *rp = raw6_sk(sk);
98067
98068 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
98069- atomic_inc(&sk->sk_drops);
98070+ atomic_inc_unchecked(&sk->sk_drops);
98071 kfree_skb(skb);
98072 return NET_RX_DROP;
98073 }
98074@@ -436,7 +436,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
98075
98076 if (inet->hdrincl) {
98077 if (skb_checksum_complete(skb)) {
98078- atomic_inc(&sk->sk_drops);
98079+ atomic_inc_unchecked(&sk->sk_drops);
98080 kfree_skb(skb);
98081 return NET_RX_DROP;
98082 }
98083@@ -465,6 +465,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98084 if (flags & MSG_OOB)
98085 return -EOPNOTSUPP;
98086
98087+ if (addr_len)
98088+ *addr_len=sizeof(*sin6);
98089+
98090 if (flags & MSG_ERRQUEUE)
98091 return ipv6_recv_error(sk, msg, len, addr_len);
98092
98093@@ -503,7 +506,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98094 sin6->sin6_flowinfo = 0;
98095 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
98096 IP6CB(skb)->iif);
98097- *addr_len = sizeof(*sin6);
98098 }
98099
98100 sock_recv_ts_and_drops(msg, sk, skb);
98101@@ -606,7 +608,7 @@ out:
98102 return err;
98103 }
98104
98105-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
98106+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
98107 struct flowi6 *fl6, struct dst_entry **dstp,
98108 unsigned int flags)
98109 {
98110@@ -918,12 +920,15 @@ do_confirm:
98111 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
98112 char __user *optval, int optlen)
98113 {
98114+ struct icmp6_filter filter;
98115+
98116 switch (optname) {
98117 case ICMPV6_FILTER:
98118 if (optlen > sizeof(struct icmp6_filter))
98119 optlen = sizeof(struct icmp6_filter);
98120- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
98121+ if (copy_from_user(&filter, optval, optlen))
98122 return -EFAULT;
98123+ raw6_sk(sk)->filter = filter;
98124 return 0;
98125 default:
98126 return -ENOPROTOOPT;
98127@@ -936,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
98128 char __user *optval, int __user *optlen)
98129 {
98130 int len;
98131+ struct icmp6_filter filter;
98132
98133 switch (optname) {
98134 case ICMPV6_FILTER:
98135@@ -947,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
98136 len = sizeof(struct icmp6_filter);
98137 if (put_user(len, optlen))
98138 return -EFAULT;
98139- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
98140+ filter = raw6_sk(sk)->filter;
98141+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
98142 return -EFAULT;
98143 return 0;
98144 default:
98145diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
98146index cc85a9b..526a133 100644
98147--- a/net/ipv6/reassembly.c
98148+++ b/net/ipv6/reassembly.c
98149@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
98150
98151 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
98152 {
98153- struct ctl_table *table;
98154+ ctl_table_no_const *table = NULL;
98155 struct ctl_table_header *hdr;
98156
98157- table = ip6_frags_ns_ctl_table;
98158 if (!net_eq(net, &init_net)) {
98159- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
98160+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
98161 if (table == NULL)
98162 goto err_alloc;
98163
98164@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
98165 /* Don't export sysctls to unprivileged users */
98166 if (net->user_ns != &init_user_ns)
98167 table[0].procname = NULL;
98168- }
98169+ hdr = register_net_sysctl(net, "net/ipv6", table);
98170+ } else
98171+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
98172
98173- hdr = register_net_sysctl(net, "net/ipv6", table);
98174 if (hdr == NULL)
98175 goto err_reg;
98176
98177@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
98178 return 0;
98179
98180 err_reg:
98181- if (!net_eq(net, &init_net))
98182- kfree(table);
98183+ kfree(table);
98184 err_alloc:
98185 return -ENOMEM;
98186 }
98187diff --git a/net/ipv6/route.c b/net/ipv6/route.c
98188index 4b4944c..4580b91 100644
98189--- a/net/ipv6/route.c
98190+++ b/net/ipv6/route.c
98191@@ -2954,7 +2954,7 @@ struct ctl_table ipv6_route_table_template[] = {
98192
98193 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
98194 {
98195- struct ctl_table *table;
98196+ ctl_table_no_const *table;
98197
98198 table = kmemdup(ipv6_route_table_template,
98199 sizeof(ipv6_route_table_template),
98200diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
98201index d3005b3..b36df4a 100644
98202--- a/net/ipv6/sit.c
98203+++ b/net/ipv6/sit.c
98204@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
98205 static void ipip6_dev_free(struct net_device *dev);
98206 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
98207 __be32 *v4dst);
98208-static struct rtnl_link_ops sit_link_ops __read_mostly;
98209+static struct rtnl_link_ops sit_link_ops;
98210
98211 static int sit_net_id __read_mostly;
98212 struct sit_net {
98213@@ -1664,7 +1664,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
98214 unregister_netdevice_queue(dev, head);
98215 }
98216
98217-static struct rtnl_link_ops sit_link_ops __read_mostly = {
98218+static struct rtnl_link_ops sit_link_ops = {
98219 .kind = "sit",
98220 .maxtype = IFLA_IPTUN_MAX,
98221 .policy = ipip6_policy,
98222diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
98223index 107b2f1..72741a9 100644
98224--- a/net/ipv6/sysctl_net_ipv6.c
98225+++ b/net/ipv6/sysctl_net_ipv6.c
98226@@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] = {
98227
98228 static int __net_init ipv6_sysctl_net_init(struct net *net)
98229 {
98230- struct ctl_table *ipv6_table;
98231+ ctl_table_no_const *ipv6_table;
98232 struct ctl_table *ipv6_route_table;
98233 struct ctl_table *ipv6_icmp_table;
98234 int err;
98235diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
98236index f67033b..6f974fc 100644
98237--- a/net/ipv6/tcp_ipv6.c
98238+++ b/net/ipv6/tcp_ipv6.c
98239@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
98240 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
98241 }
98242
98243+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98244+extern int grsec_enable_blackhole;
98245+#endif
98246+
98247 static void tcp_v6_hash(struct sock *sk)
98248 {
98249 if (sk->sk_state != TCP_CLOSE) {
98250@@ -1397,6 +1401,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
98251 return 0;
98252
98253 reset:
98254+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98255+ if (!grsec_enable_blackhole)
98256+#endif
98257 tcp_v6_send_reset(sk, skb);
98258 discard:
98259 if (opt_skb)
98260@@ -1479,12 +1486,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
98261 TCP_SKB_CB(skb)->sacked = 0;
98262
98263 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
98264- if (!sk)
98265+ if (!sk) {
98266+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98267+ ret = 1;
98268+#endif
98269 goto no_tcp_socket;
98270+ }
98271
98272 process:
98273- if (sk->sk_state == TCP_TIME_WAIT)
98274+ if (sk->sk_state == TCP_TIME_WAIT) {
98275+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98276+ ret = 2;
98277+#endif
98278 goto do_time_wait;
98279+ }
98280
98281 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
98282 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
98283@@ -1536,6 +1551,10 @@ csum_error:
98284 bad_packet:
98285 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
98286 } else {
98287+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98288+ if (!grsec_enable_blackhole || (ret == 1 &&
98289+ (skb->dev->flags & IFF_LOOPBACK)))
98290+#endif
98291 tcp_v6_send_reset(NULL, skb);
98292 }
98293
98294diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
98295index 089c741..cfee117 100644
98296--- a/net/ipv6/udp.c
98297+++ b/net/ipv6/udp.c
98298@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
98299 udp_ipv6_hash_secret + net_hash_mix(net));
98300 }
98301
98302+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98303+extern int grsec_enable_blackhole;
98304+#endif
98305+
98306 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
98307 {
98308 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
98309@@ -392,6 +396,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98310 int is_udp4;
98311 bool slow;
98312
98313+ if (addr_len)
98314+ *addr_len = sizeof(struct sockaddr_in6);
98315+
98316 if (flags & MSG_ERRQUEUE)
98317 return ipv6_recv_error(sk, msg, len, addr_len);
98318
98319@@ -435,7 +442,7 @@ try_again:
98320 if (unlikely(err)) {
98321 trace_kfree_skb(skb, udpv6_recvmsg);
98322 if (!peeked) {
98323- atomic_inc(&sk->sk_drops);
98324+ atomic_inc_unchecked(&sk->sk_drops);
98325 if (is_udp4)
98326 UDP_INC_STATS_USER(sock_net(sk),
98327 UDP_MIB_INERRORS,
98328@@ -477,7 +484,7 @@ try_again:
98329 ipv6_iface_scope_id(&sin6->sin6_addr,
98330 IP6CB(skb)->iif);
98331 }
98332- *addr_len = sizeof(*sin6);
98333+
98334 }
98335 if (is_udp4) {
98336 if (inet->cmsg_flags)
98337@@ -685,7 +692,7 @@ csum_error:
98338 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
98339 drop:
98340 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
98341- atomic_inc(&sk->sk_drops);
98342+ atomic_inc_unchecked(&sk->sk_drops);
98343 kfree_skb(skb);
98344 return -1;
98345 }
98346@@ -742,7 +749,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
98347 if (likely(skb1 == NULL))
98348 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
98349 if (!skb1) {
98350- atomic_inc(&sk->sk_drops);
98351+ atomic_inc_unchecked(&sk->sk_drops);
98352 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
98353 IS_UDPLITE(sk));
98354 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
98355@@ -881,6 +888,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
98356 goto csum_error;
98357
98358 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
98359+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98360+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
98361+#endif
98362 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
98363
98364 kfree_skb(skb);
98365diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
98366index 5f8e128..865d38e 100644
98367--- a/net/ipv6/xfrm6_policy.c
98368+++ b/net/ipv6/xfrm6_policy.c
98369@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
98370 }
98371 }
98372
98373-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
98374+static int xfrm6_garbage_collect(struct dst_ops *ops)
98375 {
98376 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
98377
98378- xfrm6_policy_afinfo.garbage_collect(net);
98379+ xfrm_garbage_collect_deferred(net);
98380 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
98381 }
98382
98383@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
98384
98385 static int __net_init xfrm6_net_init(struct net *net)
98386 {
98387- struct ctl_table *table;
98388+ ctl_table_no_const *table = NULL;
98389 struct ctl_table_header *hdr;
98390
98391- table = xfrm6_policy_table;
98392 if (!net_eq(net, &init_net)) {
98393- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
98394+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
98395 if (!table)
98396 goto err_alloc;
98397
98398 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
98399- }
98400+ hdr = register_net_sysctl(net, "net/ipv6", table);
98401+ } else
98402+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
98403
98404- hdr = register_net_sysctl(net, "net/ipv6", table);
98405 if (!hdr)
98406 goto err_reg;
98407
98408@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
98409 return 0;
98410
98411 err_reg:
98412- if (!net_eq(net, &init_net))
98413- kfree(table);
98414+ kfree(table);
98415 err_alloc:
98416 return -ENOMEM;
98417 }
98418diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
98419index 41ac7938..75e3bb1 100644
98420--- a/net/irda/ircomm/ircomm_tty.c
98421+++ b/net/irda/ircomm/ircomm_tty.c
98422@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
98423 add_wait_queue(&port->open_wait, &wait);
98424
98425 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
98426- __FILE__, __LINE__, tty->driver->name, port->count);
98427+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
98428
98429 spin_lock_irqsave(&port->lock, flags);
98430 if (!tty_hung_up_p(filp))
98431- port->count--;
98432+ atomic_dec(&port->count);
98433 port->blocked_open++;
98434 spin_unlock_irqrestore(&port->lock, flags);
98435
98436@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
98437 }
98438
98439 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
98440- __FILE__, __LINE__, tty->driver->name, port->count);
98441+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
98442
98443 schedule();
98444 }
98445@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
98446
98447 spin_lock_irqsave(&port->lock, flags);
98448 if (!tty_hung_up_p(filp))
98449- port->count++;
98450+ atomic_inc(&port->count);
98451 port->blocked_open--;
98452 spin_unlock_irqrestore(&port->lock, flags);
98453
98454 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
98455- __FILE__, __LINE__, tty->driver->name, port->count);
98456+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
98457
98458 if (!retval)
98459 port->flags |= ASYNC_NORMAL_ACTIVE;
98460@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
98461
98462 /* ++ is not atomic, so this should be protected - Jean II */
98463 spin_lock_irqsave(&self->port.lock, flags);
98464- self->port.count++;
98465+ atomic_inc(&self->port.count);
98466 spin_unlock_irqrestore(&self->port.lock, flags);
98467 tty_port_tty_set(&self->port, tty);
98468
98469 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
98470- self->line, self->port.count);
98471+ self->line, atomic_read(&self->port.count));
98472
98473 /* Not really used by us, but lets do it anyway */
98474 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
98475@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
98476 tty_kref_put(port->tty);
98477 }
98478 port->tty = NULL;
98479- port->count = 0;
98480+ atomic_set(&port->count, 0);
98481 spin_unlock_irqrestore(&port->lock, flags);
98482
98483 wake_up_interruptible(&port->open_wait);
98484@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
98485 seq_putc(m, '\n');
98486
98487 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
98488- seq_printf(m, "Open count: %d\n", self->port.count);
98489+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
98490 seq_printf(m, "Max data size: %d\n", self->max_data_size);
98491 seq_printf(m, "Max header size: %d\n", self->max_header_size);
98492
98493diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
98494index c4b7218..3e83259 100644
98495--- a/net/iucv/af_iucv.c
98496+++ b/net/iucv/af_iucv.c
98497@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
98498
98499 write_lock_bh(&iucv_sk_list.lock);
98500
98501- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
98502+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
98503 while (__iucv_get_sock_by_name(name)) {
98504 sprintf(name, "%08x",
98505- atomic_inc_return(&iucv_sk_list.autobind_name));
98506+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
98507 }
98508
98509 write_unlock_bh(&iucv_sk_list.lock);
98510diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
98511index cd5b8ec..f205e6b 100644
98512--- a/net/iucv/iucv.c
98513+++ b/net/iucv/iucv.c
98514@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
98515 return NOTIFY_OK;
98516 }
98517
98518-static struct notifier_block __refdata iucv_cpu_notifier = {
98519+static struct notifier_block iucv_cpu_notifier = {
98520 .notifier_call = iucv_cpu_notify,
98521 };
98522
98523diff --git a/net/key/af_key.c b/net/key/af_key.c
98524index 545f047..9757a9d 100644
98525--- a/net/key/af_key.c
98526+++ b/net/key/af_key.c
98527@@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
98528 static u32 get_acqseq(void)
98529 {
98530 u32 res;
98531- static atomic_t acqseq;
98532+ static atomic_unchecked_t acqseq;
98533
98534 do {
98535- res = atomic_inc_return(&acqseq);
98536+ res = atomic_inc_return_unchecked(&acqseq);
98537 } while (!res);
98538 return res;
98539 }
98540diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
98541index da1a1ce..571db8d 100644
98542--- a/net/l2tp/l2tp_ip.c
98543+++ b/net/l2tp/l2tp_ip.c
98544@@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
98545 if (flags & MSG_OOB)
98546 goto out;
98547
98548+ if (addr_len)
98549+ *addr_len = sizeof(*sin);
98550+
98551 skb = skb_recv_datagram(sk, flags, noblock, &err);
98552 if (!skb)
98553 goto out;
98554@@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
98555 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
98556 sin->sin_port = 0;
98557 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
98558- *addr_len = sizeof(*sin);
98559 }
98560 if (inet->cmsg_flags)
98561 ip_cmsg_recv(msg, skb);
98562diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
98563index 364ce0c..3ebb5a4 100644
98564--- a/net/mac80211/cfg.c
98565+++ b/net/mac80211/cfg.c
98566@@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
98567 ret = ieee80211_vif_use_channel(sdata, chandef,
98568 IEEE80211_CHANCTX_EXCLUSIVE);
98569 }
98570- } else if (local->open_count == local->monitors) {
98571+ } else if (local_read(&local->open_count) == local->monitors) {
98572 local->_oper_chandef = *chandef;
98573 ieee80211_hw_config(local, 0);
98574 }
98575@@ -3308,7 +3308,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
98576 else
98577 local->probe_req_reg--;
98578
98579- if (!local->open_count)
98580+ if (!local_read(&local->open_count))
98581 break;
98582
98583 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
98584@@ -3771,8 +3771,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
98585 if (chanctx_conf) {
98586 *chandef = chanctx_conf->def;
98587 ret = 0;
98588- } else if (local->open_count > 0 &&
98589- local->open_count == local->monitors &&
98590+ } else if (local_read(&local->open_count) > 0 &&
98591+ local_read(&local->open_count) == local->monitors &&
98592 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
98593 if (local->use_chanctx)
98594 *chandef = local->monitor_chandef;
98595diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
98596index 4aea4e7..9e698d1 100644
98597--- a/net/mac80211/ieee80211_i.h
98598+++ b/net/mac80211/ieee80211_i.h
98599@@ -28,6 +28,7 @@
98600 #include <net/ieee80211_radiotap.h>
98601 #include <net/cfg80211.h>
98602 #include <net/mac80211.h>
98603+#include <asm/local.h>
98604 #include "key.h"
98605 #include "sta_info.h"
98606 #include "debug.h"
98607@@ -961,7 +962,7 @@ struct ieee80211_local {
98608 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
98609 spinlock_t queue_stop_reason_lock;
98610
98611- int open_count;
98612+ local_t open_count;
98613 int monitors, cooked_mntrs;
98614 /* number of interfaces with corresponding FIF_ flags */
98615 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
98616diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
98617index a075791..1d0027f 100644
98618--- a/net/mac80211/iface.c
98619+++ b/net/mac80211/iface.c
98620@@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98621 break;
98622 }
98623
98624- if (local->open_count == 0) {
98625+ if (local_read(&local->open_count) == 0) {
98626 res = drv_start(local);
98627 if (res)
98628 goto err_del_bss;
98629@@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98630 res = drv_add_interface(local, sdata);
98631 if (res)
98632 goto err_stop;
98633- } else if (local->monitors == 0 && local->open_count == 0) {
98634+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
98635 res = ieee80211_add_virtual_monitor(local);
98636 if (res)
98637 goto err_stop;
98638@@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98639 atomic_inc(&local->iff_promiscs);
98640
98641 if (coming_up)
98642- local->open_count++;
98643+ local_inc(&local->open_count);
98644
98645 if (hw_reconf_flags)
98646 ieee80211_hw_config(local, hw_reconf_flags);
98647@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98648 err_del_interface:
98649 drv_remove_interface(local, sdata);
98650 err_stop:
98651- if (!local->open_count)
98652+ if (!local_read(&local->open_count))
98653 drv_stop(local);
98654 err_del_bss:
98655 sdata->bss = NULL;
98656@@ -856,7 +856,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98657 }
98658
98659 if (going_down)
98660- local->open_count--;
98661+ local_dec(&local->open_count);
98662
98663 switch (sdata->vif.type) {
98664 case NL80211_IFTYPE_AP_VLAN:
98665@@ -923,7 +923,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98666 }
98667 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
98668
98669- if (local->open_count == 0)
98670+ if (local_read(&local->open_count) == 0)
98671 ieee80211_clear_tx_pending(local);
98672
98673 /*
98674@@ -963,7 +963,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98675
98676 ieee80211_recalc_ps(local, -1);
98677
98678- if (local->open_count == 0) {
98679+ if (local_read(&local->open_count) == 0) {
98680 ieee80211_stop_device(local);
98681
98682 /* no reconfiguring after stop! */
98683@@ -974,7 +974,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98684 ieee80211_configure_filter(local);
98685 ieee80211_hw_config(local, hw_reconf_flags);
98686
98687- if (local->monitors == local->open_count)
98688+ if (local->monitors == local_read(&local->open_count))
98689 ieee80211_add_virtual_monitor(local);
98690 }
98691
98692diff --git a/net/mac80211/main.c b/net/mac80211/main.c
98693index 7d1c3ac..b62dd29 100644
98694--- a/net/mac80211/main.c
98695+++ b/net/mac80211/main.c
98696@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
98697 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
98698 IEEE80211_CONF_CHANGE_POWER);
98699
98700- if (changed && local->open_count) {
98701+ if (changed && local_read(&local->open_count)) {
98702 ret = drv_config(local, changed);
98703 /*
98704 * Goal:
98705diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
98706index 3401262..d5cd68d 100644
98707--- a/net/mac80211/pm.c
98708+++ b/net/mac80211/pm.c
98709@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98710 struct ieee80211_sub_if_data *sdata;
98711 struct sta_info *sta;
98712
98713- if (!local->open_count)
98714+ if (!local_read(&local->open_count))
98715 goto suspend;
98716
98717 ieee80211_scan_cancel(local);
98718@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98719 cancel_work_sync(&local->dynamic_ps_enable_work);
98720 del_timer_sync(&local->dynamic_ps_timer);
98721
98722- local->wowlan = wowlan && local->open_count;
98723+ local->wowlan = wowlan && local_read(&local->open_count);
98724 if (local->wowlan) {
98725 int err = drv_suspend(local, wowlan);
98726 if (err < 0) {
98727@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98728 WARN_ON(!list_empty(&local->chanctx_list));
98729
98730 /* stop hardware - this must stop RX */
98731- if (local->open_count)
98732+ if (local_read(&local->open_count))
98733 ieee80211_stop_device(local);
98734
98735 suspend:
98736diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
98737index 22b223f..ab70070 100644
98738--- a/net/mac80211/rate.c
98739+++ b/net/mac80211/rate.c
98740@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
98741
98742 ASSERT_RTNL();
98743
98744- if (local->open_count)
98745+ if (local_read(&local->open_count))
98746 return -EBUSY;
98747
98748 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
98749diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
98750index 6ff1346..936ca9a 100644
98751--- a/net/mac80211/rc80211_pid_debugfs.c
98752+++ b/net/mac80211/rc80211_pid_debugfs.c
98753@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
98754
98755 spin_unlock_irqrestore(&events->lock, status);
98756
98757- if (copy_to_user(buf, pb, p))
98758+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
98759 return -EFAULT;
98760
98761 return p;
98762diff --git a/net/mac80211/util.c b/net/mac80211/util.c
98763index 9f9b9bd..d6fcf59 100644
98764--- a/net/mac80211/util.c
98765+++ b/net/mac80211/util.c
98766@@ -1474,7 +1474,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
98767 }
98768 #endif
98769 /* everything else happens only if HW was up & running */
98770- if (!local->open_count)
98771+ if (!local_read(&local->open_count))
98772 goto wake_up;
98773
98774 /*
98775@@ -1699,7 +1699,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
98776 local->in_reconfig = false;
98777 barrier();
98778
98779- if (local->monitors == local->open_count && local->monitors > 0)
98780+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
98781 ieee80211_add_virtual_monitor(local);
98782
98783 /*
98784diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
98785index c3398cd..98ad3b4 100644
98786--- a/net/netfilter/Kconfig
98787+++ b/net/netfilter/Kconfig
98788@@ -1002,6 +1002,16 @@ config NETFILTER_XT_MATCH_ESP
98789
98790 To compile it as a module, choose M here. If unsure, say N.
98791
98792+config NETFILTER_XT_MATCH_GRADM
98793+ tristate '"gradm" match support'
98794+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
98795+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
98796+ ---help---
98797+ The gradm match allows to match on grsecurity RBAC being enabled.
98798+ It is useful when iptables rules are applied early on bootup to
98799+ prevent connections to the machine (except from a trusted host)
98800+ while the RBAC system is disabled.
98801+
98802 config NETFILTER_XT_MATCH_HASHLIMIT
98803 tristate '"hashlimit" match support'
98804 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
98805diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
98806index 394483b..ed51f2d 100644
98807--- a/net/netfilter/Makefile
98808+++ b/net/netfilter/Makefile
98809@@ -130,6 +130,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
98810 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
98811 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
98812 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
98813+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
98814 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
98815 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
98816 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
98817diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
98818index bac7e01..1d7a31a 100644
98819--- a/net/netfilter/ipset/ip_set_core.c
98820+++ b/net/netfilter/ipset/ip_set_core.c
98821@@ -1950,7 +1950,7 @@ done:
98822 return ret;
98823 }
98824
98825-static struct nf_sockopt_ops so_set __read_mostly = {
98826+static struct nf_sockopt_ops so_set = {
98827 .pf = PF_INET,
98828 .get_optmin = SO_IP_SET,
98829 .get_optmax = SO_IP_SET + 1,
98830diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
98831index 4c8e5c0..5a79b4d 100644
98832--- a/net/netfilter/ipvs/ip_vs_conn.c
98833+++ b/net/netfilter/ipvs/ip_vs_conn.c
98834@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
98835 /* Increase the refcnt counter of the dest */
98836 ip_vs_dest_hold(dest);
98837
98838- conn_flags = atomic_read(&dest->conn_flags);
98839+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
98840 if (cp->protocol != IPPROTO_UDP)
98841 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
98842 flags = cp->flags;
98843@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
98844
98845 cp->control = NULL;
98846 atomic_set(&cp->n_control, 0);
98847- atomic_set(&cp->in_pkts, 0);
98848+ atomic_set_unchecked(&cp->in_pkts, 0);
98849
98850 cp->packet_xmit = NULL;
98851 cp->app = NULL;
98852@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
98853
98854 /* Don't drop the entry if its number of incoming packets is not
98855 located in [0, 8] */
98856- i = atomic_read(&cp->in_pkts);
98857+ i = atomic_read_unchecked(&cp->in_pkts);
98858 if (i > 8 || i < 0) return 0;
98859
98860 if (!todrop_rate[i]) return 0;
98861diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
98862index 4f26ee4..6a9d7c3 100644
98863--- a/net/netfilter/ipvs/ip_vs_core.c
98864+++ b/net/netfilter/ipvs/ip_vs_core.c
98865@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
98866 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
98867 /* do not touch skb anymore */
98868
98869- atomic_inc(&cp->in_pkts);
98870+ atomic_inc_unchecked(&cp->in_pkts);
98871 ip_vs_conn_put(cp);
98872 return ret;
98873 }
98874@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
98875 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
98876 pkts = sysctl_sync_threshold(ipvs);
98877 else
98878- pkts = atomic_add_return(1, &cp->in_pkts);
98879+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
98880
98881 if (ipvs->sync_state & IP_VS_STATE_MASTER)
98882 ip_vs_sync_conn(net, cp, pkts);
98883diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
98884index 35be035..dad174b 100644
98885--- a/net/netfilter/ipvs/ip_vs_ctl.c
98886+++ b/net/netfilter/ipvs/ip_vs_ctl.c
98887@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
98888 */
98889 ip_vs_rs_hash(ipvs, dest);
98890 }
98891- atomic_set(&dest->conn_flags, conn_flags);
98892+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
98893
98894 /* bind the service */
98895 old_svc = rcu_dereference_protected(dest->svc, 1);
98896@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
98897 * align with netns init in ip_vs_control_net_init()
98898 */
98899
98900-static struct ctl_table vs_vars[] = {
98901+static ctl_table_no_const vs_vars[] __read_only = {
98902 {
98903 .procname = "amemthresh",
98904 .maxlen = sizeof(int),
98905@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
98906 " %-7s %-6d %-10d %-10d\n",
98907 &dest->addr.in6,
98908 ntohs(dest->port),
98909- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
98910+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
98911 atomic_read(&dest->weight),
98912 atomic_read(&dest->activeconns),
98913 atomic_read(&dest->inactconns));
98914@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
98915 "%-7s %-6d %-10d %-10d\n",
98916 ntohl(dest->addr.ip),
98917 ntohs(dest->port),
98918- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
98919+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
98920 atomic_read(&dest->weight),
98921 atomic_read(&dest->activeconns),
98922 atomic_read(&dest->inactconns));
98923@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
98924
98925 entry.addr = dest->addr.ip;
98926 entry.port = dest->port;
98927- entry.conn_flags = atomic_read(&dest->conn_flags);
98928+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
98929 entry.weight = atomic_read(&dest->weight);
98930 entry.u_threshold = dest->u_threshold;
98931 entry.l_threshold = dest->l_threshold;
98932@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
98933 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
98934 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
98935 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
98936- (atomic_read(&dest->conn_flags) &
98937+ (atomic_read_unchecked(&dest->conn_flags) &
98938 IP_VS_CONN_F_FWD_MASK)) ||
98939 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
98940 atomic_read(&dest->weight)) ||
98941@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
98942 {
98943 int idx;
98944 struct netns_ipvs *ipvs = net_ipvs(net);
98945- struct ctl_table *tbl;
98946+ ctl_table_no_const *tbl;
98947
98948 atomic_set(&ipvs->dropentry, 0);
98949 spin_lock_init(&ipvs->dropentry_lock);
98950diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
98951index ca056a3..9cf01ef 100644
98952--- a/net/netfilter/ipvs/ip_vs_lblc.c
98953+++ b/net/netfilter/ipvs/ip_vs_lblc.c
98954@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
98955 * IPVS LBLC sysctl table
98956 */
98957 #ifdef CONFIG_SYSCTL
98958-static struct ctl_table vs_vars_table[] = {
98959+static ctl_table_no_const vs_vars_table[] __read_only = {
98960 {
98961 .procname = "lblc_expiration",
98962 .data = NULL,
98963diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
98964index 3f21a2f..a112e85 100644
98965--- a/net/netfilter/ipvs/ip_vs_lblcr.c
98966+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
98967@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
98968 * IPVS LBLCR sysctl table
98969 */
98970
98971-static struct ctl_table vs_vars_table[] = {
98972+static ctl_table_no_const vs_vars_table[] __read_only = {
98973 {
98974 .procname = "lblcr_expiration",
98975 .data = NULL,
98976diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
98977index f63c238..1b87f8a 100644
98978--- a/net/netfilter/ipvs/ip_vs_sync.c
98979+++ b/net/netfilter/ipvs/ip_vs_sync.c
98980@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
98981 cp = cp->control;
98982 if (cp) {
98983 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
98984- pkts = atomic_add_return(1, &cp->in_pkts);
98985+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
98986 else
98987 pkts = sysctl_sync_threshold(ipvs);
98988 ip_vs_sync_conn(net, cp->control, pkts);
98989@@ -771,7 +771,7 @@ control:
98990 if (!cp)
98991 return;
98992 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
98993- pkts = atomic_add_return(1, &cp->in_pkts);
98994+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
98995 else
98996 pkts = sysctl_sync_threshold(ipvs);
98997 goto sloop;
98998@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
98999
99000 if (opt)
99001 memcpy(&cp->in_seq, opt, sizeof(*opt));
99002- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
99003+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
99004 cp->state = state;
99005 cp->old_state = cp->state;
99006 /*
99007diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
99008index c47444e..b0961c6 100644
99009--- a/net/netfilter/ipvs/ip_vs_xmit.c
99010+++ b/net/netfilter/ipvs/ip_vs_xmit.c
99011@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
99012 else
99013 rc = NF_ACCEPT;
99014 /* do not touch skb anymore */
99015- atomic_inc(&cp->in_pkts);
99016+ atomic_inc_unchecked(&cp->in_pkts);
99017 goto out;
99018 }
99019
99020@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
99021 else
99022 rc = NF_ACCEPT;
99023 /* do not touch skb anymore */
99024- atomic_inc(&cp->in_pkts);
99025+ atomic_inc_unchecked(&cp->in_pkts);
99026 goto out;
99027 }
99028
99029diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
99030index a4b5e2a..13b1de3 100644
99031--- a/net/netfilter/nf_conntrack_acct.c
99032+++ b/net/netfilter/nf_conntrack_acct.c
99033@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
99034 #ifdef CONFIG_SYSCTL
99035 static int nf_conntrack_acct_init_sysctl(struct net *net)
99036 {
99037- struct ctl_table *table;
99038+ ctl_table_no_const *table;
99039
99040 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
99041 GFP_KERNEL);
99042diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
99043index 43549eb..0bbeace 100644
99044--- a/net/netfilter/nf_conntrack_core.c
99045+++ b/net/netfilter/nf_conntrack_core.c
99046@@ -1605,6 +1605,10 @@ void nf_conntrack_init_end(void)
99047 #define DYING_NULLS_VAL ((1<<30)+1)
99048 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
99049
99050+#ifdef CONFIG_GRKERNSEC_HIDESYM
99051+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
99052+#endif
99053+
99054 int nf_conntrack_init_net(struct net *net)
99055 {
99056 int ret;
99057@@ -1619,7 +1623,11 @@ int nf_conntrack_init_net(struct net *net)
99058 goto err_stat;
99059 }
99060
99061+#ifdef CONFIG_GRKERNSEC_HIDESYM
99062+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
99063+#else
99064 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
99065+#endif
99066 if (!net->ct.slabname) {
99067 ret = -ENOMEM;
99068 goto err_slabname;
99069diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
99070index 1df1761..ce8b88a 100644
99071--- a/net/netfilter/nf_conntrack_ecache.c
99072+++ b/net/netfilter/nf_conntrack_ecache.c
99073@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
99074 #ifdef CONFIG_SYSCTL
99075 static int nf_conntrack_event_init_sysctl(struct net *net)
99076 {
99077- struct ctl_table *table;
99078+ ctl_table_no_const *table;
99079
99080 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
99081 GFP_KERNEL);
99082diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
99083index 974a2a4..52cc6ff 100644
99084--- a/net/netfilter/nf_conntrack_helper.c
99085+++ b/net/netfilter/nf_conntrack_helper.c
99086@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
99087
99088 static int nf_conntrack_helper_init_sysctl(struct net *net)
99089 {
99090- struct ctl_table *table;
99091+ ctl_table_no_const *table;
99092
99093 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
99094 GFP_KERNEL);
99095diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
99096index ce30041..3861b5d 100644
99097--- a/net/netfilter/nf_conntrack_proto.c
99098+++ b/net/netfilter/nf_conntrack_proto.c
99099@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
99100
99101 static void
99102 nf_ct_unregister_sysctl(struct ctl_table_header **header,
99103- struct ctl_table **table,
99104+ ctl_table_no_const **table,
99105 unsigned int users)
99106 {
99107 if (users > 0)
99108diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
99109index a99b6c3..cb372f9 100644
99110--- a/net/netfilter/nf_conntrack_proto_dccp.c
99111+++ b/net/netfilter/nf_conntrack_proto_dccp.c
99112@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
99113 const char *msg;
99114 u_int8_t state;
99115
99116- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99117+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99118 BUG_ON(dh == NULL);
99119
99120 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
99121@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
99122 out_invalid:
99123 if (LOG_INVALID(net, IPPROTO_DCCP))
99124 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
99125- NULL, msg);
99126+ NULL, "%s", msg);
99127 return false;
99128 }
99129
99130@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
99131 u_int8_t type, old_state, new_state;
99132 enum ct_dccp_roles role;
99133
99134- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99135+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99136 BUG_ON(dh == NULL);
99137 type = dh->dccph_type;
99138
99139@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
99140 unsigned int cscov;
99141 const char *msg;
99142
99143- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99144+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99145 if (dh == NULL) {
99146 msg = "nf_ct_dccp: short packet ";
99147 goto out_invalid;
99148@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
99149
99150 out_invalid:
99151 if (LOG_INVALID(net, IPPROTO_DCCP))
99152- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
99153+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
99154 return -NF_ACCEPT;
99155 }
99156
99157diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
99158index f641751..d3c5b51 100644
99159--- a/net/netfilter/nf_conntrack_standalone.c
99160+++ b/net/netfilter/nf_conntrack_standalone.c
99161@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
99162
99163 static int nf_conntrack_standalone_init_sysctl(struct net *net)
99164 {
99165- struct ctl_table *table;
99166+ ctl_table_no_const *table;
99167
99168 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
99169 GFP_KERNEL);
99170diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
99171index 7a394df..bd91a8a 100644
99172--- a/net/netfilter/nf_conntrack_timestamp.c
99173+++ b/net/netfilter/nf_conntrack_timestamp.c
99174@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
99175 #ifdef CONFIG_SYSCTL
99176 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
99177 {
99178- struct ctl_table *table;
99179+ ctl_table_no_const *table;
99180
99181 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
99182 GFP_KERNEL);
99183diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
99184index 85296d4..8becdec 100644
99185--- a/net/netfilter/nf_log.c
99186+++ b/net/netfilter/nf_log.c
99187@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
99188
99189 #ifdef CONFIG_SYSCTL
99190 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
99191-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
99192+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
99193
99194 static int nf_log_proc_dostring(struct ctl_table *table, int write,
99195 void __user *buffer, size_t *lenp, loff_t *ppos)
99196@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
99197 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
99198 mutex_unlock(&nf_log_mutex);
99199 } else {
99200+ ctl_table_no_const nf_log_table = *table;
99201+
99202 mutex_lock(&nf_log_mutex);
99203 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
99204 lockdep_is_held(&nf_log_mutex));
99205 if (!logger)
99206- table->data = "NONE";
99207+ nf_log_table.data = "NONE";
99208 else
99209- table->data = logger->name;
99210- r = proc_dostring(table, write, buffer, lenp, ppos);
99211+ nf_log_table.data = logger->name;
99212+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
99213 mutex_unlock(&nf_log_mutex);
99214 }
99215
99216diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
99217index f042ae5..30ea486 100644
99218--- a/net/netfilter/nf_sockopt.c
99219+++ b/net/netfilter/nf_sockopt.c
99220@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
99221 }
99222 }
99223
99224- list_add(&reg->list, &nf_sockopts);
99225+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
99226 out:
99227 mutex_unlock(&nf_sockopt_mutex);
99228 return ret;
99229@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
99230 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
99231 {
99232 mutex_lock(&nf_sockopt_mutex);
99233- list_del(&reg->list);
99234+ pax_list_del((struct list_head *)&reg->list);
99235 mutex_unlock(&nf_sockopt_mutex);
99236 }
99237 EXPORT_SYMBOL(nf_unregister_sockopt);
99238diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
99239index a155d19..726b0f2 100644
99240--- a/net/netfilter/nfnetlink_log.c
99241+++ b/net/netfilter/nfnetlink_log.c
99242@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
99243 struct nfnl_log_net {
99244 spinlock_t instances_lock;
99245 struct hlist_head instance_table[INSTANCE_BUCKETS];
99246- atomic_t global_seq;
99247+ atomic_unchecked_t global_seq;
99248 };
99249
99250 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
99251@@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_net *log,
99252 /* global sequence number */
99253 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
99254 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
99255- htonl(atomic_inc_return(&log->global_seq))))
99256+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
99257 goto nla_put_failure;
99258
99259 if (data_len) {
99260diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
99261index da0c1f4..f79737a 100644
99262--- a/net/netfilter/nft_compat.c
99263+++ b/net/netfilter/nft_compat.c
99264@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
99265 /* We want to reuse existing compat_to_user */
99266 old_fs = get_fs();
99267 set_fs(KERNEL_DS);
99268- t->compat_to_user(out, in);
99269+ t->compat_to_user((void __force_user *)out, in);
99270 set_fs(old_fs);
99271 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
99272 kfree(out);
99273@@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
99274 /* We want to reuse existing compat_to_user */
99275 old_fs = get_fs();
99276 set_fs(KERNEL_DS);
99277- m->compat_to_user(out, in);
99278+ m->compat_to_user((void __force_user *)out, in);
99279 set_fs(old_fs);
99280 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
99281 kfree(out);
99282diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
99283new file mode 100644
99284index 0000000..c566332
99285--- /dev/null
99286+++ b/net/netfilter/xt_gradm.c
99287@@ -0,0 +1,51 @@
99288+/*
99289+ * gradm match for netfilter
99290